file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
ChildNodeInterface.js | /*
* Copyright 2013 The Polymer Authors. All rights reserved.
* Use of this source code is goverened by a BSD-style
* license that can be found in the LICENSE file.
*/
suite('ChildNodeInterface', function() {
function getTree() {
var tree = {};
var div = tree.div = document.createElement('div');
div.innerHTML = 'a<b></b>c<d></d>e';
var a = tree.a = div.firstChild;
var b = tree.b = a.nextSibling;
var c = tree.c = b.nextSibling;
var d = tree.d = c.nextSibling;
var e = tree.e = d.nextSibling;
var sr = tree.sr = div.createShadowRoot();
sr.innerHTML = 'f<g></g>h<content></content>i<j></j>k';
var f = tree.f = sr.firstChild;
var g = tree.g = f.nextSibling;
var h = tree.h = g.nextSibling;
var content = tree.content = h.nextSibling;
var i = tree.i = content.nextSibling;
var j = tree.j = i.nextSibling;
var k = tree.k = j.nextSibling;
div.offsetHeight; // trigger rendering
return tree;
}
test('nextElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.nextElementSibling, tree.d);
assert.equal(tree.d.nextElementSibling, null);
assert.equal(tree.g.nextElementSibling, tree.content);
assert.equal(tree.content.nextElementSibling, tree.j);
assert.equal(tree.j.nextElementSibling, null);
});
test('previousElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.previousElementSibling, null);
assert.equal(tree.d.previousElementSibling, tree.b);
assert.equal(tree.g.previousElementSibling, null);
assert.equal(tree.content.previousElementSibling, tree.g);
assert.equal(tree.j.previousElementSibling, tree.content); | div.innerHTML = '<a></a>';
var a = div.firstChild;
a.remove();
assert.equal(div.firstChild, null);
assert.equal(a.parentNode, null);
// no op.
div.remove();
});
}); | });
test('remove', function() {
var div = document.createElement('div'); | random_line_split |
111815001.py | import random
import math
import sympy
from sympy import latex, fraction, Symbol, Rational
localid =11181500100000
letter=["a","b","c","d"]
n=[0,0,0,0,0,0]
m=[0,0,0,0,0]
f = open("111815001.tex","w") #opens file with name of "test.txt"
for x in range(0, 1000):
localid = localid +1
writewrong=["\correctchoice{\(","\wrongchoice{\(","\wrongchoice{\(","\wrongchoice{\("]
for count in range (0,5):
n[count]=random.randint(-20, 20)
m[1]=n[4]-n[2]
m[2]=n[3]-n[1]
m[3]=n[2]-n[1]
m[4]=n[4]-n[3]
if n[2]==n[4]:
letter[0]='undefined'
letter[2]=latex(Rational(-m[3],m[2]))
letter[3]=latex(Rational(-m[4],m[3]))
letter[1]=latex(Rational(m[4],m[3]))
else:
letter[0]=latex(Rational(m[1],m[2]))
letter[1]=latex(Rational(-m[1],m[2]))
letter[2]=latex(Rational(-m[2],m[1]))
letter[3]=latex(Rational(m[2],m[1]))
zz=random.randint(1,6)
if zz==1:
letter[1]=latex(Rational(m[4],m[3]))
elif zz==2:
letter[2]=latex(Rational(m[4],m[3]))
elif zz==3:
letter[3]=latex(Rational(m[4],m[3]))
n[5]=random.randint(0,10)
if n[2]==n[4]:
letter[0]='undefined'
elif n[5]==8:
zz=random.randint(1,3)
letter[zz]='undefined'
if(len(letter)==4):
for z in range (0, 4): | writewrong[z]=writewrong[z]+str(letter[z])
random.shuffle(writewrong)
f.write("\n\n\n")
f.write("\\element{slope}{")
f.write("\n")
f.write("\\begin{question}{")
f.write(str(localid))
f.write("}")
f.write("\n")
f.write("Find the slope using points: (")
f.write(str(n[1]))
f.write(",")
f.write(str(n[2]))
f.write(") and (")
f.write(str(n[3]))
f.write(",")
f.write(str(n[4]))
f.write("):")
f.write("\n")
f.write("\\begin{choiceshoriz}")
f.write("\n")
for y in range(0, 4):
f.write("\n")
f.write(writewrong[y])
f.write("\)}")
f.write("\n")
f.write("\\end{choiceshoriz}")
f.write("\n")
f.write("\\end{question}")
f.write("\n")
f.write("}")
f.close() | random_line_split |
|
111815001.py | import random
import math
import sympy
from sympy import latex, fraction, Symbol, Rational
localid =11181500100000
letter=["a","b","c","d"]
n=[0,0,0,0,0,0]
m=[0,0,0,0,0]
f = open("111815001.tex","w") #opens file with name of "test.txt"
for x in range(0, 1000):
localid = localid +1
writewrong=["\correctchoice{\(","\wrongchoice{\(","\wrongchoice{\(","\wrongchoice{\("]
for count in range (0,5):
n[count]=random.randint(-20, 20)
m[1]=n[4]-n[2]
m[2]=n[3]-n[1]
m[3]=n[2]-n[1]
m[4]=n[4]-n[3]
if n[2]==n[4]:
letter[0]='undefined'
letter[2]=latex(Rational(-m[3],m[2]))
letter[3]=latex(Rational(-m[4],m[3]))
letter[1]=latex(Rational(m[4],m[3]))
else:
letter[0]=latex(Rational(m[1],m[2]))
letter[1]=latex(Rational(-m[1],m[2]))
letter[2]=latex(Rational(-m[2],m[1]))
letter[3]=latex(Rational(m[2],m[1]))
zz=random.randint(1,6)
if zz==1:
letter[1]=latex(Rational(m[4],m[3]))
elif zz==2:
letter[2]=latex(Rational(m[4],m[3]))
elif zz==3:
letter[3]=latex(Rational(m[4],m[3]))
n[5]=random.randint(0,10)
if n[2]==n[4]:
letter[0]='undefined'
elif n[5]==8:
zz=random.randint(1,3)
letter[zz]='undefined'
if(len(letter)==4):
for z in range (0, 4):
|
random.shuffle(writewrong)
f.write("\n\n\n")
f.write("\\element{slope}{")
f.write("\n")
f.write("\\begin{question}{")
f.write(str(localid))
f.write("}")
f.write("\n")
f.write("Find the slope using points: (")
f.write(str(n[1]))
f.write(",")
f.write(str(n[2]))
f.write(") and (")
f.write(str(n[3]))
f.write(",")
f.write(str(n[4]))
f.write("):")
f.write("\n")
f.write("\\begin{choiceshoriz}")
f.write("\n")
for y in range(0, 4):
f.write("\n")
f.write(writewrong[y])
f.write("\)}")
f.write("\n")
f.write("\\end{choiceshoriz}")
f.write("\n")
f.write("\\end{question}")
f.write("\n")
f.write("}")
f.close()
| writewrong[z]=writewrong[z]+str(letter[z]) | conditional_block |
Entity.ts | /**
* @fileOverview Abstract interface for entities
* @author <a href="mailto:[email protected]">Tim Hollies</a>
* @version 0.0.1
*/
import { Serializable } from './Serializable';
export class Entity implements Serializable {
public uid: number | null;
public entityType: number; // TODO: ideally this should be readonly
public parent: number | null;
public readonly readonly: boolean;
public label: string;
public creator: number;
public creationTimestamp: string;
public lastmodifiedTimestamp: string;
public deserialize(data: any) : Entity |
public serialize() : any {
return this;
}
} | {
this.uid = data.uid;
this.entityType = data.entityType;
this.label = data.label;
this.parent = data.parent;
this.creator = data.creator;
this.creationTimestamp = data.creationTimestamp;
this.lastmodifiedTimestamp = data.lastmodifiedTimestamp;
return this;
} | identifier_body |
Entity.ts | /**
* @fileOverview Abstract interface for entities
* @author <a href="mailto:[email protected]">Tim Hollies</a>
* @version 0.0.1
*/
import { Serializable } from './Serializable';
export class Entity implements Serializable {
public uid: number | null;
public entityType: number; // TODO: ideally this should be readonly
public parent: number | null;
public readonly readonly: boolean;
public label: string;
public creator: number;
public creationTimestamp: string;
public lastmodifiedTimestamp: string;
public | (data: any) : Entity {
this.uid = data.uid;
this.entityType = data.entityType;
this.label = data.label;
this.parent = data.parent;
this.creator = data.creator;
this.creationTimestamp = data.creationTimestamp;
this.lastmodifiedTimestamp = data.lastmodifiedTimestamp;
return this;
}
public serialize() : any {
return this;
}
} | deserialize | identifier_name |
Entity.ts | /**
* @fileOverview Abstract interface for entities
* @author <a href="mailto:[email protected]">Tim Hollies</a>
* @version 0.0.1
*/
import { Serializable } from './Serializable';
export class Entity implements Serializable {
public uid: number | null;
public entityType: number; // TODO: ideally this should be readonly
public parent: number | null;
public readonly readonly: boolean;
public label: string;
public creator: number;
public creationTimestamp: string;
public lastmodifiedTimestamp: string;
public deserialize(data: any) : Entity {
this.uid = data.uid;
this.entityType = data.entityType;
this.label = data.label;
this.parent = data.parent; |
this.creationTimestamp = data.creationTimestamp;
this.lastmodifiedTimestamp = data.lastmodifiedTimestamp;
return this;
}
public serialize() : any {
return this;
}
} | this.creator = data.creator; | random_line_split |
pyrenamer_menu_cb.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2006-2008 Adolfo González Blázquez <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: [email protected]
"""
class PyrenamerMenuCB:
def __init__(self, main):
self.main = main
def on_menu_undo_activate(self, widget):
self.main.undo_manager.undo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(False)
self.main.menu_redo.set_sensitive(True)
def on_menu_redo_activate(self, widget):
self.main.undo_manager.redo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(True)
self.main.menu_redo.set_sensitive(False)
def on_menu_refresh_activate(self, widget):
se | def on_menu_patterns_activate(self, widget):
self.main.notebook.set_current_page(0)
def on_menu_substitutions_activate(self, widget):
self.main.notebook.set_current_page(1)
def on_menu_insert_activate(self, widget):
self.main.notebook.set_current_page(2)
def on_menu_manual_activate(self, widget):
self.main.notebook.set_current_page(3)
def on_menu_images_activate(self, widget):
self.main.notebook.set_current_page(4)
def on_menu_music_activate(self, widget):
self.main.notebook.set_current_page(5)
def on_menu_show_options_activate(self, widget):
self.main.options_panel_state(widget.get_active()) | lf.main.file_browser.create_new()
self.main.file_browser.set_active_dir(self.main.active_dir)
| identifier_body |
pyrenamer_menu_cb.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2006-2008 Adolfo González Blázquez <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: [email protected]
"""
class Py |
def __init__(self, main):
self.main = main
def on_menu_undo_activate(self, widget):
self.main.undo_manager.undo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(False)
self.main.menu_redo.set_sensitive(True)
def on_menu_redo_activate(self, widget):
self.main.undo_manager.redo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(True)
self.main.menu_redo.set_sensitive(False)
def on_menu_refresh_activate(self, widget):
self.main.file_browser.create_new()
self.main.file_browser.set_active_dir(self.main.active_dir)
def on_menu_patterns_activate(self, widget):
self.main.notebook.set_current_page(0)
def on_menu_substitutions_activate(self, widget):
self.main.notebook.set_current_page(1)
def on_menu_insert_activate(self, widget):
self.main.notebook.set_current_page(2)
def on_menu_manual_activate(self, widget):
self.main.notebook.set_current_page(3)
def on_menu_images_activate(self, widget):
self.main.notebook.set_current_page(4)
def on_menu_music_activate(self, widget):
self.main.notebook.set_current_page(5)
def on_menu_show_options_activate(self, widget):
self.main.options_panel_state(widget.get_active()) | renamerMenuCB:
| identifier_name |
pyrenamer_menu_cb.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2006-2008 Adolfo González Blázquez <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: [email protected]
"""
class PyrenamerMenuCB:
def __init__(self, main):
self.main = main
def on_menu_undo_activate(self, widget):
self.main.undo_manager.undo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(False)
self.main.menu_redo.set_sensitive(True)
def on_menu_redo_activate(self, widget):
self.main.undo_manager.redo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(True)
self.main.menu_redo.set_sensitive(False)
def on_menu_refresh_activate(self, widget):
self.main.file_browser.create_new()
self.main.file_browser.set_active_dir(self.main.active_dir)
def on_menu_patterns_activate(self, widget):
self.main.notebook.set_current_page(0)
def on_menu_substitutions_activate(self, widget):
self.main.notebook.set_current_page(1)
def on_menu_insert_activate(self, widget):
self.main.notebook.set_current_page(2)
def on_menu_manual_activate(self, widget):
self.main.notebook.set_current_page(3)
def on_menu_images_activate(self, widget):
self.main.notebook.set_current_page(4)
def on_menu_music_activate(self, widget):
self.main.notebook.set_current_page(5)
def on_menu_show_options_activate(self, widget):
self.main.options_panel_state(widget.get_active()) | random_line_split |
|
video.rs | const VI_V_CURRENT_REG: u32 = 0x10;
const VI_INTR_REG: u32 = 0x0c;
const VI_H_START_REG: u32 = 0x24;
#[derive(Default, Debug)]
pub struct Video {
intr_half_line: u32,
horizontal_video_start: u16,
horizontal_video_end: u16,
current_vertical_line: u16,
}
impl Video {
pub fn read(&self, addr: u32) -> u32 {
match addr {
VI_INTR_REG => self.read_halfline(),
VI_H_START_REG => self.read_h_video(),
VI_V_CURRENT_REG => self.read_current_vertical_line() as u32,
_ => panic!("Unknown address in Video {:#x}", addr),
}
}
pub fn write(&mut self, addr: u32, value: u32) {
match addr {
VI_INTR_REG => self.write_halfline(value),
VI_H_START_REG => self.write_h_video(value),
VI_V_CURRENT_REG => self.write_current_vertical_line(value),
_ => {
panic!("Cannot write to register in Video {:#x} <- {:#x}",
addr,
value)
}
}
}
fn read_halfline(&self) -> u32 {
self.intr_half_line
}
fn write_halfline(&mut self, value: u32) {
self.intr_half_line = value & 0x3ff;
}
fn read_h_video(&self) -> u32 {
(self.horizontal_video_start as u32) << 16 | (self.horizontal_video_end as u32)
}
fn write_h_video(&mut self, value: u32) {
self.horizontal_video_start = (value >> 16 & 0x3ff) as u16;
self.horizontal_video_end = (value & 0x3ff) as u16;
}
fn read_current_vertical_line(&self) -> u16 { | self.current_vertical_line = (value & 0x3ff) as u16;
// TODO clear interrupt line
}
} | self.current_vertical_line & 0x3ff
}
fn write_current_vertical_line(&mut self, value: u32) { | random_line_split |
video.rs | const VI_V_CURRENT_REG: u32 = 0x10;
const VI_INTR_REG: u32 = 0x0c;
const VI_H_START_REG: u32 = 0x24;
#[derive(Default, Debug)]
pub struct Video {
intr_half_line: u32,
horizontal_video_start: u16,
horizontal_video_end: u16,
current_vertical_line: u16,
}
impl Video {
pub fn read(&self, addr: u32) -> u32 {
match addr {
VI_INTR_REG => self.read_halfline(),
VI_H_START_REG => self.read_h_video(),
VI_V_CURRENT_REG => self.read_current_vertical_line() as u32,
_ => panic!("Unknown address in Video {:#x}", addr),
}
}
pub fn | (&mut self, addr: u32, value: u32) {
match addr {
VI_INTR_REG => self.write_halfline(value),
VI_H_START_REG => self.write_h_video(value),
VI_V_CURRENT_REG => self.write_current_vertical_line(value),
_ => {
panic!("Cannot write to register in Video {:#x} <- {:#x}",
addr,
value)
}
}
}
fn read_halfline(&self) -> u32 {
self.intr_half_line
}
fn write_halfline(&mut self, value: u32) {
self.intr_half_line = value & 0x3ff;
}
fn read_h_video(&self) -> u32 {
(self.horizontal_video_start as u32) << 16 | (self.horizontal_video_end as u32)
}
fn write_h_video(&mut self, value: u32) {
self.horizontal_video_start = (value >> 16 & 0x3ff) as u16;
self.horizontal_video_end = (value & 0x3ff) as u16;
}
fn read_current_vertical_line(&self) -> u16 {
self.current_vertical_line & 0x3ff
}
fn write_current_vertical_line(&mut self, value: u32) {
self.current_vertical_line = (value & 0x3ff) as u16;
// TODO clear interrupt line
}
}
| write | identifier_name |
video.rs | const VI_V_CURRENT_REG: u32 = 0x10;
const VI_INTR_REG: u32 = 0x0c;
const VI_H_START_REG: u32 = 0x24;
#[derive(Default, Debug)]
pub struct Video {
intr_half_line: u32,
horizontal_video_start: u16,
horizontal_video_end: u16,
current_vertical_line: u16,
}
impl Video {
pub fn read(&self, addr: u32) -> u32 {
match addr {
VI_INTR_REG => self.read_halfline(),
VI_H_START_REG => self.read_h_video(),
VI_V_CURRENT_REG => self.read_current_vertical_line() as u32,
_ => panic!("Unknown address in Video {:#x}", addr),
}
}
pub fn write(&mut self, addr: u32, value: u32) {
match addr {
VI_INTR_REG => self.write_halfline(value),
VI_H_START_REG => self.write_h_video(value),
VI_V_CURRENT_REG => self.write_current_vertical_line(value),
_ => {
panic!("Cannot write to register in Video {:#x} <- {:#x}",
addr,
value)
}
}
}
fn read_halfline(&self) -> u32 |
fn write_halfline(&mut self, value: u32) {
self.intr_half_line = value & 0x3ff;
}
fn read_h_video(&self) -> u32 {
(self.horizontal_video_start as u32) << 16 | (self.horizontal_video_end as u32)
}
fn write_h_video(&mut self, value: u32) {
self.horizontal_video_start = (value >> 16 & 0x3ff) as u16;
self.horizontal_video_end = (value & 0x3ff) as u16;
}
fn read_current_vertical_line(&self) -> u16 {
self.current_vertical_line & 0x3ff
}
fn write_current_vertical_line(&mut self, value: u32) {
self.current_vertical_line = (value & 0x3ff) as u16;
// TODO clear interrupt line
}
}
| {
self.intr_half_line
} | identifier_body |
base.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import geo_target_constant
from google.ads.googleads.v9.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
|
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_geo_target_constant: gapic_v1.method.wrap_method(
self.get_geo_target_constant,
default_timeout=None,
client_info=client_info,
),
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_geo_target_constant(
self,
) -> typing.Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
raise NotImplementedError
@property
def suggest_geo_target_constants(
self,
) -> typing.Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
raise NotImplementedError
__all__ = ("GeoTargetConstantServiceTransport",)
| credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) | conditional_block |
base.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import geo_target_constant
from google.ads.googleads.v9.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_geo_target_constant: gapic_v1.method.wrap_method(
self.get_geo_target_constant,
default_timeout=None,
client_info=client_info,
),
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def | (self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_geo_target_constant(
self,
) -> typing.Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
raise NotImplementedError
@property
def suggest_geo_target_constants(
self,
) -> typing.Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
raise NotImplementedError
__all__ = ("GeoTargetConstantServiceTransport",)
| close | identifier_name |
base.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import geo_target_constant
from google.ads.googleads.v9.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
|
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_geo_target_constant: gapic_v1.method.wrap_method(
self.get_geo_target_constant,
default_timeout=None,
client_info=client_info,
),
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_geo_target_constant(
self,
) -> typing.Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
raise NotImplementedError
@property
def suggest_geo_target_constants(
self,
) -> typing.Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
raise NotImplementedError
__all__ = ("GeoTargetConstantServiceTransport",)
| """Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info) | identifier_body |
base.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources |
from google.ads.googleads.v9.resources.types import geo_target_constant
from google.ads.googleads.v9.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_geo_target_constant: gapic_v1.method.wrap_method(
self.get_geo_target_constant,
default_timeout=None,
client_info=client_info,
),
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_geo_target_constant(
self,
) -> typing.Callable[
[geo_target_constant_service.GetGeoTargetConstantRequest],
geo_target_constant.GeoTargetConstant,
]:
raise NotImplementedError
@property
def suggest_geo_target_constants(
self,
) -> typing.Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
]:
raise NotImplementedError
__all__ = ("GeoTargetConstantServiceTransport",) |
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore | random_line_split |
uid_filter.rs | use filter;
use filter::Filter;
use walkdir::DirEntry;
use std::os::unix::fs::MetadataExt;
use std::process;
pub struct UidFilter {
uid: u32,
comp_op: filter::CompOp,
}
impl UidFilter {
pub fn new(comp_op: filter::CompOp, uid: u32) -> UidFilter {
UidFilter{comp_op: comp_op, uid: uid}
}
}
impl Filter for UidFilter {
fn test(&self, dir_entry: &DirEntry) -> bool {
match self.comp_op {
filter::CompOp::Equal => self.uid == dir_entry.metadata().unwrap().uid(), | _ => {
eprintln!("Operator {:?} not covered for attribute uid!", self.comp_op);
process::exit(1);
},
}
}
} | filter::CompOp::Unequal => self.uid != dir_entry.metadata().unwrap().uid(), | random_line_split |
uid_filter.rs | use filter;
use filter::Filter;
use walkdir::DirEntry;
use std::os::unix::fs::MetadataExt;
use std::process;
pub struct UidFilter {
uid: u32,
comp_op: filter::CompOp,
}
impl UidFilter {
pub fn new(comp_op: filter::CompOp, uid: u32) -> UidFilter {
UidFilter{comp_op: comp_op, uid: uid}
}
}
impl Filter for UidFilter {
fn | (&self, dir_entry: &DirEntry) -> bool {
match self.comp_op {
filter::CompOp::Equal => self.uid == dir_entry.metadata().unwrap().uid(),
filter::CompOp::Unequal => self.uid != dir_entry.metadata().unwrap().uid(),
_ => {
eprintln!("Operator {:?} not covered for attribute uid!", self.comp_op);
process::exit(1);
},
}
}
}
| test | identifier_name |
tail.rs | #![crate_name = "tail"]
#![feature(collections, core, old_io, old_path, rustc_private, std_misc)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Morten Olsen Lysgaard <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
*/
extern crate getopts;
use std::char::CharExt;
use std::old_io::{stdin, stdout};
use std::old_io::{BufferedReader, BytesReader};
use std::old_io::fs::File;
use std::old_path::Path;
use std::str::from_utf8;
use getopts::{optopt, optflag, getopts, usage};
use std::collections::VecDeque;
use std::old_io::timer::sleep;
use std::time::duration::Duration;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "tail";
static VERSION: &'static str = "0.0.1";
pub fn uumain(args: Vec<String>) -> i32 {
let mut beginning = false;
let mut lines = true;
let mut byte_count = 0usize;
let mut line_count = 10usize;
let mut sleep_msec = 1000u64;
// handle obsolete -number syntax
let options = match obsolete(args.tail()) {
(args, Some(n)) => { line_count = n; args },
(args, None) => args
};
let args = options;
let possible_options = [
optopt("c", "bytes", "Number of bytes to print", "k"),
optopt("n", "lines", "Number of lines to print", "k"),
optflag("f", "follow", "Print the file as it grows"),
optopt("s", "sleep-interval", "Number or seconds to sleep between polling the file when running with -f", "n"),
optflag("h", "help", "help"),
optflag("V", "version", "version"),
];
let given_options = match getopts(args.as_slice(), &possible_options) {
Ok (m) => { m }
Err(_) => {
println!("{}", usage(NAME, &possible_options));
return 1;
}
};
if given_options.opt_present("h") {
println!("{}", usage(NAME, &possible_options));
return 0;
}
if given_options.opt_present("V") { version(); return 0 }
let follow = given_options.opt_present("f");
if follow {
match given_options.opt_str("s") {
Some(n) => {
let parsed: Option<u64> = n.parse().ok();
match parsed {
Some(m) => { sleep_msec = m * 1000 }
None => {}
}
}
None => {}
};
}
match given_options.opt_str("n") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
line_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of lines ({})", slice);
return 1;
}
};
}
None => match given_options.opt_str("c") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
byte_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of bytes ({})", slice);
return 1;
}
};
lines = false;
}
None => { }
}
};
let files = given_options.free;
if files.is_empty() {
let mut buffer = BufferedReader::new(stdin());
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
} else {
let mut multiple = false;
let mut firstime = true;
if files.len() > 1 {
multiple = true;
}
for file in files.iter() {
if multiple {
if !firstime { println!(""); }
println!("==> {} <==", file.as_slice());
}
firstime = false;
let path = Path::new(file.as_slice());
let reader = File::open(&path).unwrap();
let mut buffer = BufferedReader::new(reader);
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
}
}
0
}
fn parse_size(mut size_slice: &str) -> Option<usize> {
let mut base =
if size_slice.len() > 0 && size_slice.char_at(size_slice.len() - 1) == 'B' {
size_slice = &size_slice[..size_slice.len() - 1];
1000usize
} else {
1024usize
};
let exponent =
if size_slice.len() > 0 {
let mut has_suffix = true;
let exp = match size_slice.char_at(size_slice.len() - 1) {
'K' => 1usize,
'M' => 2usize,
'G' => 3usize,
'T' => 4usize,
'P' => 5usize,
'E' => 6usize,
'Z' => 7usize,
'Y' => 8usize,
'b' => {
base = 512usize;
1usize
}
_ => {
has_suffix = false;
0usize
}
};
if has_suffix {
size_slice = &size_slice[..size_slice.len() - 1];
}
exp
} else {
0usize
};
let mut multiplier = 1usize;
for _ in range(0usize, exponent) {
multiplier *= base;
}
if base == 1000usize && exponent == 0usize {
// sole B is not a valid suffix
None
} else {
let value = size_slice.parse();
match value {
Ok(v) => Some(multiplier * v),
_ => None
}
}
}
// It searches for an option in the form of -123123
//
// In case is found, the options vector will get rid of that object so that
// getopts works correctly.
fn obsolete(options: &[String]) -> (Vec<String>, Option<usize>) {
let mut options: Vec<String> = options.to_vec();
let mut a = 0;
let b = options.len();
while a < b {
let current = options[a].clone();
let current = current.as_bytes();
if current.len() > 1 && current[0] == '-' as u8 {
let len = current.len();
for pos in range(1, len) {
// Ensure that the argument is only made out of digits
if !(current[pos] as char).is_numeric() { break; }
// If this is the last number
if pos == len - 1 {
options.remove(a);
let number: Option<usize> = from_utf8(¤t[1..len]).unwrap().parse().ok();
return (options, Some(number.unwrap()));
}
}
}
a += 1;
};
(options, None)
}
macro_rules! tail_impl (
($kind:ty, $kindfn:ident, $kindprint:ident, $reader:ident, $count:ident, $beginning:ident) => ({
// read through each line and store them in a ringbuffer that always contains
// count lines/chars. When reaching the end of file, output the data in the
// ringbuf.
let mut ringbuf: VecDeque<$kind> = VecDeque::new();
let data = $reader.$kindfn().skip(
if $beginning {
let temp = $count;
$count = ::std::usize::MAX;
temp - 1
} else {
0
}
);
for io_datum in data {
match io_datum {
Ok(datum) => {
if $count <= ringbuf.len() {
ringbuf.pop_front();
}
ringbuf.push_back(datum);
}
Err(err) => panic!(err)
}
}
let mut stdout = stdout();
for datum in ringbuf.iter() {
$kindprint(&mut stdout, datum);
}
})
);
fn tail<T: Reader>(reader: &mut BufferedReader<T>, mut line_count: usize, mut byte_count: usize, beginning: bool, lines: bool, follow: bool, sleep_msec: u64) |
#[inline]
fn print_byte<T: Writer>(stdout: &mut T, ch: &u8) {
if let Err(err) = stdout.write_u8(*ch) {
crash!(1, "{}", err);
}
}
#[inline]
fn print_string<T: Writer>(_: &mut T, s: &String) {
print!("{}", s);
}
fn version () {
println!("{} v{}", NAME, VERSION);
}
| {
if lines {
tail_impl!(String, lines, print_string, reader, line_count, beginning);
} else {
tail_impl!(u8, bytes, print_byte, reader, byte_count, beginning);
}
// if we follow the file, sleep a bit and print the rest if the file has grown.
while follow {
sleep(Duration::milliseconds(sleep_msec as i64));
for io_line in reader.lines() {
match io_line {
Ok(line) => print!("{}", line),
Err(err) => panic!(err)
}
}
}
} | identifier_body |
tail.rs | #![crate_name = "tail"]
#![feature(collections, core, old_io, old_path, rustc_private, std_misc)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Morten Olsen Lysgaard <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
*/
extern crate getopts;
use std::char::CharExt;
use std::old_io::{stdin, stdout};
use std::old_io::{BufferedReader, BytesReader};
use std::old_io::fs::File;
use std::old_path::Path;
use std::str::from_utf8;
use getopts::{optopt, optflag, getopts, usage};
use std::collections::VecDeque;
use std::old_io::timer::sleep;
use std::time::duration::Duration;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "tail";
static VERSION: &'static str = "0.0.1";
pub fn uumain(args: Vec<String>) -> i32 {
let mut beginning = false;
let mut lines = true;
let mut byte_count = 0usize;
let mut line_count = 10usize;
let mut sleep_msec = 1000u64;
// handle obsolete -number syntax
let options = match obsolete(args.tail()) {
(args, Some(n)) => { line_count = n; args },
(args, None) => args
};
let args = options;
let possible_options = [
optopt("c", "bytes", "Number of bytes to print", "k"),
optopt("n", "lines", "Number of lines to print", "k"),
optflag("f", "follow", "Print the file as it grows"),
optopt("s", "sleep-interval", "Number or seconds to sleep between polling the file when running with -f", "n"),
optflag("h", "help", "help"),
optflag("V", "version", "version"),
];
let given_options = match getopts(args.as_slice(), &possible_options) {
Ok (m) => { m }
Err(_) => {
println!("{}", usage(NAME, &possible_options));
return 1;
}
};
if given_options.opt_present("h") {
println!("{}", usage(NAME, &possible_options));
return 0;
}
if given_options.opt_present("V") { version(); return 0 }
let follow = given_options.opt_present("f");
if follow {
match given_options.opt_str("s") {
Some(n) => {
let parsed: Option<u64> = n.parse().ok();
match parsed {
Some(m) => { sleep_msec = m * 1000 }
None => {}
}
}
None => {}
};
}
match given_options.opt_str("n") {
Some(n) => |
None => match given_options.opt_str("c") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
byte_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of bytes ({})", slice);
return 1;
}
};
lines = false;
}
None => { }
}
};
let files = given_options.free;
if files.is_empty() {
let mut buffer = BufferedReader::new(stdin());
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
} else {
let mut multiple = false;
let mut firstime = true;
if files.len() > 1 {
multiple = true;
}
for file in files.iter() {
if multiple {
if !firstime { println!(""); }
println!("==> {} <==", file.as_slice());
}
firstime = false;
let path = Path::new(file.as_slice());
let reader = File::open(&path).unwrap();
let mut buffer = BufferedReader::new(reader);
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
}
}
0
}
fn parse_size(mut size_slice: &str) -> Option<usize> {
let mut base =
if size_slice.len() > 0 && size_slice.char_at(size_slice.len() - 1) == 'B' {
size_slice = &size_slice[..size_slice.len() - 1];
1000usize
} else {
1024usize
};
let exponent =
if size_slice.len() > 0 {
let mut has_suffix = true;
let exp = match size_slice.char_at(size_slice.len() - 1) {
'K' => 1usize,
'M' => 2usize,
'G' => 3usize,
'T' => 4usize,
'P' => 5usize,
'E' => 6usize,
'Z' => 7usize,
'Y' => 8usize,
'b' => {
base = 512usize;
1usize
}
_ => {
has_suffix = false;
0usize
}
};
if has_suffix {
size_slice = &size_slice[..size_slice.len() - 1];
}
exp
} else {
0usize
};
let mut multiplier = 1usize;
for _ in range(0usize, exponent) {
multiplier *= base;
}
if base == 1000usize && exponent == 0usize {
// sole B is not a valid suffix
None
} else {
let value = size_slice.parse();
match value {
Ok(v) => Some(multiplier * v),
_ => None
}
}
}
// It searches for an option in the form of -123123
//
// In case is found, the options vector will get rid of that object so that
// getopts works correctly.
fn obsolete(options: &[String]) -> (Vec<String>, Option<usize>) {
let mut options: Vec<String> = options.to_vec();
let mut a = 0;
let b = options.len();
while a < b {
let current = options[a].clone();
let current = current.as_bytes();
if current.len() > 1 && current[0] == '-' as u8 {
let len = current.len();
for pos in range(1, len) {
// Ensure that the argument is only made out of digits
if !(current[pos] as char).is_numeric() { break; }
// If this is the last number
if pos == len - 1 {
options.remove(a);
let number: Option<usize> = from_utf8(¤t[1..len]).unwrap().parse().ok();
return (options, Some(number.unwrap()));
}
}
}
a += 1;
};
(options, None)
}
macro_rules! tail_impl (
($kind:ty, $kindfn:ident, $kindprint:ident, $reader:ident, $count:ident, $beginning:ident) => ({
// read through each line and store them in a ringbuffer that always contains
// count lines/chars. When reaching the end of file, output the data in the
// ringbuf.
let mut ringbuf: VecDeque<$kind> = VecDeque::new();
let data = $reader.$kindfn().skip(
if $beginning {
let temp = $count;
$count = ::std::usize::MAX;
temp - 1
} else {
0
}
);
for io_datum in data {
match io_datum {
Ok(datum) => {
if $count <= ringbuf.len() {
ringbuf.pop_front();
}
ringbuf.push_back(datum);
}
Err(err) => panic!(err)
}
}
let mut stdout = stdout();
for datum in ringbuf.iter() {
$kindprint(&mut stdout, datum);
}
})
);
fn tail<T: Reader>(reader: &mut BufferedReader<T>, mut line_count: usize, mut byte_count: usize, beginning: bool, lines: bool, follow: bool, sleep_msec: u64) {
if lines {
tail_impl!(String, lines, print_string, reader, line_count, beginning);
} else {
tail_impl!(u8, bytes, print_byte, reader, byte_count, beginning);
}
// if we follow the file, sleep a bit and print the rest if the file has grown.
while follow {
sleep(Duration::milliseconds(sleep_msec as i64));
for io_line in reader.lines() {
match io_line {
Ok(line) => print!("{}", line),
Err(err) => panic!(err)
}
}
}
}
#[inline]
fn print_byte<T: Writer>(stdout: &mut T, ch: &u8) {
if let Err(err) = stdout.write_u8(*ch) {
crash!(1, "{}", err);
}
}
#[inline]
fn print_string<T: Writer>(_: &mut T, s: &String) {
print!("{}", s);
}
fn version () {
println!("{} v{}", NAME, VERSION);
}
| {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
line_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of lines ({})", slice);
return 1;
}
};
} | conditional_block |
tail.rs | #![crate_name = "tail"]
#![feature(collections, core, old_io, old_path, rustc_private, std_misc)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Morten Olsen Lysgaard <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
*/
extern crate getopts;
use std::char::CharExt;
use std::old_io::{stdin, stdout};
use std::old_io::{BufferedReader, BytesReader};
use std::old_io::fs::File;
use std::old_path::Path;
use std::str::from_utf8;
use getopts::{optopt, optflag, getopts, usage};
use std::collections::VecDeque;
use std::old_io::timer::sleep;
use std::time::duration::Duration;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "tail";
static VERSION: &'static str = "0.0.1";
pub fn uumain(args: Vec<String>) -> i32 {
let mut beginning = false;
let mut lines = true;
let mut byte_count = 0usize;
let mut line_count = 10usize;
let mut sleep_msec = 1000u64;
// handle obsolete -number syntax
let options = match obsolete(args.tail()) {
(args, Some(n)) => { line_count = n; args },
(args, None) => args
};
let args = options;
let possible_options = [
optopt("c", "bytes", "Number of bytes to print", "k"),
optopt("n", "lines", "Number of lines to print", "k"),
optflag("f", "follow", "Print the file as it grows"),
optopt("s", "sleep-interval", "Number or seconds to sleep between polling the file when running with -f", "n"),
optflag("h", "help", "help"),
optflag("V", "version", "version"),
];
let given_options = match getopts(args.as_slice(), &possible_options) {
Ok (m) => { m }
Err(_) => {
println!("{}", usage(NAME, &possible_options));
return 1;
}
};
if given_options.opt_present("h") {
println!("{}", usage(NAME, &possible_options));
return 0;
}
if given_options.opt_present("V") { version(); return 0 }
let follow = given_options.opt_present("f");
if follow {
match given_options.opt_str("s") {
Some(n) => {
let parsed: Option<u64> = n.parse().ok();
match parsed {
Some(m) => { sleep_msec = m * 1000 }
None => {}
}
}
None => {}
};
}
match given_options.opt_str("n") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
line_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of lines ({})", slice);
return 1;
}
};
}
None => match given_options.opt_str("c") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
byte_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of bytes ({})", slice);
return 1;
}
};
lines = false;
}
None => { }
}
};
let files = given_options.free;
if files.is_empty() {
let mut buffer = BufferedReader::new(stdin());
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
} else {
let mut multiple = false;
let mut firstime = true;
if files.len() > 1 {
multiple = true;
}
for file in files.iter() {
if multiple {
if !firstime { println!(""); }
println!("==> {} <==", file.as_slice());
}
firstime = false;
let path = Path::new(file.as_slice());
let reader = File::open(&path).unwrap();
let mut buffer = BufferedReader::new(reader);
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
}
}
0
}
fn parse_size(mut size_slice: &str) -> Option<usize> {
let mut base =
if size_slice.len() > 0 && size_slice.char_at(size_slice.len() - 1) == 'B' {
size_slice = &size_slice[..size_slice.len() - 1];
1000usize
} else {
1024usize
};
let exponent =
if size_slice.len() > 0 {
let mut has_suffix = true;
let exp = match size_slice.char_at(size_slice.len() - 1) {
'K' => 1usize,
'M' => 2usize,
'G' => 3usize,
'T' => 4usize,
'P' => 5usize,
'E' => 6usize,
'Z' => 7usize,
'Y' => 8usize,
'b' => {
base = 512usize;
1usize
}
_ => {
has_suffix = false;
0usize
}
};
if has_suffix {
size_slice = &size_slice[..size_slice.len() - 1];
}
exp
} else {
0usize
};
let mut multiplier = 1usize;
for _ in range(0usize, exponent) {
multiplier *= base;
}
if base == 1000usize && exponent == 0usize {
// sole B is not a valid suffix
None
} else {
let value = size_slice.parse();
match value {
Ok(v) => Some(multiplier * v),
_ => None
}
}
}
// It searches for an option in the form of -123123
//
// In case is found, the options vector will get rid of that object so that
// getopts works correctly.
fn obsolete(options: &[String]) -> (Vec<String>, Option<usize>) {
let mut options: Vec<String> = options.to_vec();
let mut a = 0;
let b = options.len();
while a < b {
let current = options[a].clone();
let current = current.as_bytes();
if current.len() > 1 && current[0] == '-' as u8 {
let len = current.len();
for pos in range(1, len) {
// Ensure that the argument is only made out of digits
if !(current[pos] as char).is_numeric() { break; }
// If this is the last number
if pos == len - 1 {
options.remove(a);
let number: Option<usize> = from_utf8(¤t[1..len]).unwrap().parse().ok();
return (options, Some(number.unwrap()));
}
}
}
a += 1;
};
(options, None)
}
macro_rules! tail_impl (
($kind:ty, $kindfn:ident, $kindprint:ident, $reader:ident, $count:ident, $beginning:ident) => ({
// read through each line and store them in a ringbuffer that always contains
// count lines/chars. When reaching the end of file, output the data in the
// ringbuf.
let mut ringbuf: VecDeque<$kind> = VecDeque::new();
let data = $reader.$kindfn().skip(
if $beginning {
let temp = $count;
$count = ::std::usize::MAX;
temp - 1
} else {
0
}
);
for io_datum in data {
match io_datum {
Ok(datum) => {
if $count <= ringbuf.len() {
ringbuf.pop_front();
}
ringbuf.push_back(datum);
}
Err(err) => panic!(err)
}
}
let mut stdout = stdout();
for datum in ringbuf.iter() {
$kindprint(&mut stdout, datum);
}
})
);
fn tail<T: Reader>(reader: &mut BufferedReader<T>, mut line_count: usize, mut byte_count: usize, beginning: bool, lines: bool, follow: bool, sleep_msec: u64) {
if lines {
tail_impl!(String, lines, print_string, reader, line_count, beginning); |
// if we follow the file, sleep a bit and print the rest if the file has grown.
while follow {
sleep(Duration::milliseconds(sleep_msec as i64));
for io_line in reader.lines() {
match io_line {
Ok(line) => print!("{}", line),
Err(err) => panic!(err)
}
}
}
}
#[inline]
fn print_byte<T: Writer>(stdout: &mut T, ch: &u8) {
if let Err(err) = stdout.write_u8(*ch) {
crash!(1, "{}", err);
}
}
#[inline]
fn print_string<T: Writer>(_: &mut T, s: &String) {
print!("{}", s);
}
fn version () {
println!("{} v{}", NAME, VERSION);
} | } else {
tail_impl!(u8, bytes, print_byte, reader, byte_count, beginning);
} | random_line_split |
tail.rs | #![crate_name = "tail"]
#![feature(collections, core, old_io, old_path, rustc_private, std_misc)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Morten Olsen Lysgaard <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
*/
extern crate getopts;
use std::char::CharExt;
use std::old_io::{stdin, stdout};
use std::old_io::{BufferedReader, BytesReader};
use std::old_io::fs::File;
use std::old_path::Path;
use std::str::from_utf8;
use getopts::{optopt, optflag, getopts, usage};
use std::collections::VecDeque;
use std::old_io::timer::sleep;
use std::time::duration::Duration;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "tail";
static VERSION: &'static str = "0.0.1";
pub fn uumain(args: Vec<String>) -> i32 {
let mut beginning = false;
let mut lines = true;
let mut byte_count = 0usize;
let mut line_count = 10usize;
let mut sleep_msec = 1000u64;
// handle obsolete -number syntax
let options = match obsolete(args.tail()) {
(args, Some(n)) => { line_count = n; args },
(args, None) => args
};
let args = options;
let possible_options = [
optopt("c", "bytes", "Number of bytes to print", "k"),
optopt("n", "lines", "Number of lines to print", "k"),
optflag("f", "follow", "Print the file as it grows"),
optopt("s", "sleep-interval", "Number or seconds to sleep between polling the file when running with -f", "n"),
optflag("h", "help", "help"),
optflag("V", "version", "version"),
];
let given_options = match getopts(args.as_slice(), &possible_options) {
Ok (m) => { m }
Err(_) => {
println!("{}", usage(NAME, &possible_options));
return 1;
}
};
if given_options.opt_present("h") {
println!("{}", usage(NAME, &possible_options));
return 0;
}
if given_options.opt_present("V") { version(); return 0 }
let follow = given_options.opt_present("f");
if follow {
match given_options.opt_str("s") {
Some(n) => {
let parsed: Option<u64> = n.parse().ok();
match parsed {
Some(m) => { sleep_msec = m * 1000 }
None => {}
}
}
None => {}
};
}
match given_options.opt_str("n") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
line_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of lines ({})", slice);
return 1;
}
};
}
None => match given_options.opt_str("c") {
Some(n) => {
let mut slice = n.as_slice();
if slice.len() > 0 && slice.char_at(0) == '+' {
beginning = true;
slice = &slice[1..];
}
byte_count = match parse_size(slice) {
Some(m) => m,
None => {
show_error!("invalid number of bytes ({})", slice);
return 1;
}
};
lines = false;
}
None => { }
}
};
let files = given_options.free;
if files.is_empty() {
let mut buffer = BufferedReader::new(stdin());
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
} else {
let mut multiple = false;
let mut firstime = true;
if files.len() > 1 {
multiple = true;
}
for file in files.iter() {
if multiple {
if !firstime { println!(""); }
println!("==> {} <==", file.as_slice());
}
firstime = false;
let path = Path::new(file.as_slice());
let reader = File::open(&path).unwrap();
let mut buffer = BufferedReader::new(reader);
tail(&mut buffer, line_count, byte_count, beginning, lines, follow, sleep_msec);
}
}
0
}
fn parse_size(mut size_slice: &str) -> Option<usize> {
let mut base =
if size_slice.len() > 0 && size_slice.char_at(size_slice.len() - 1) == 'B' {
size_slice = &size_slice[..size_slice.len() - 1];
1000usize
} else {
1024usize
};
let exponent =
if size_slice.len() > 0 {
let mut has_suffix = true;
let exp = match size_slice.char_at(size_slice.len() - 1) {
'K' => 1usize,
'M' => 2usize,
'G' => 3usize,
'T' => 4usize,
'P' => 5usize,
'E' => 6usize,
'Z' => 7usize,
'Y' => 8usize,
'b' => {
base = 512usize;
1usize
}
_ => {
has_suffix = false;
0usize
}
};
if has_suffix {
size_slice = &size_slice[..size_slice.len() - 1];
}
exp
} else {
0usize
};
let mut multiplier = 1usize;
for _ in range(0usize, exponent) {
multiplier *= base;
}
if base == 1000usize && exponent == 0usize {
// sole B is not a valid suffix
None
} else {
let value = size_slice.parse();
match value {
Ok(v) => Some(multiplier * v),
_ => None
}
}
}
// It searches for an option in the form of -123123
//
// In case is found, the options vector will get rid of that object so that
// getopts works correctly.
fn obsolete(options: &[String]) -> (Vec<String>, Option<usize>) {
let mut options: Vec<String> = options.to_vec();
let mut a = 0;
let b = options.len();
while a < b {
let current = options[a].clone();
let current = current.as_bytes();
if current.len() > 1 && current[0] == '-' as u8 {
let len = current.len();
for pos in range(1, len) {
// Ensure that the argument is only made out of digits
if !(current[pos] as char).is_numeric() { break; }
// If this is the last number
if pos == len - 1 {
options.remove(a);
let number: Option<usize> = from_utf8(¤t[1..len]).unwrap().parse().ok();
return (options, Some(number.unwrap()));
}
}
}
a += 1;
};
(options, None)
}
macro_rules! tail_impl (
($kind:ty, $kindfn:ident, $kindprint:ident, $reader:ident, $count:ident, $beginning:ident) => ({
// read through each line and store them in a ringbuffer that always contains
// count lines/chars. When reaching the end of file, output the data in the
// ringbuf.
let mut ringbuf: VecDeque<$kind> = VecDeque::new();
let data = $reader.$kindfn().skip(
if $beginning {
let temp = $count;
$count = ::std::usize::MAX;
temp - 1
} else {
0
}
);
for io_datum in data {
match io_datum {
Ok(datum) => {
if $count <= ringbuf.len() {
ringbuf.pop_front();
}
ringbuf.push_back(datum);
}
Err(err) => panic!(err)
}
}
let mut stdout = stdout();
for datum in ringbuf.iter() {
$kindprint(&mut stdout, datum);
}
})
);
fn | <T: Reader>(reader: &mut BufferedReader<T>, mut line_count: usize, mut byte_count: usize, beginning: bool, lines: bool, follow: bool, sleep_msec: u64) {
if lines {
tail_impl!(String, lines, print_string, reader, line_count, beginning);
} else {
tail_impl!(u8, bytes, print_byte, reader, byte_count, beginning);
}
// if we follow the file, sleep a bit and print the rest if the file has grown.
while follow {
sleep(Duration::milliseconds(sleep_msec as i64));
for io_line in reader.lines() {
match io_line {
Ok(line) => print!("{}", line),
Err(err) => panic!(err)
}
}
}
}
#[inline]
fn print_byte<T: Writer>(stdout: &mut T, ch: &u8) {
if let Err(err) = stdout.write_u8(*ch) {
crash!(1, "{}", err);
}
}
#[inline]
fn print_string<T: Writer>(_: &mut T, s: &String) {
print!("{}", s);
}
fn version () {
println!("{} v{}", NAME, VERSION);
}
| tail | identifier_name |
title.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Inject, Injectable} from '@angular/core';
import {getDOM} from '../dom/dom_adapter';
import {DOCUMENT} from '../dom/dom_tokens'; |
/**
* A service that can be used to get and set the title of a current HTML document.
*
* Since an Angular application can't be bootstrapped on the entire HTML document (`<html>` tag)
* it is not possible to bind to the `text` property of the `HTMLTitleElement` elements
* (representing the `<title>` tag). Instead, this service can be used to set and get the current
* title value.
*
* @experimental
*/
@Injectable()
export class Title {
constructor(@Inject(DOCUMENT) private _doc: any) {}
/**
* Get the title of the current HTML document.
* @returns {string}
*/
getTitle(): string { return getDOM().getTitle(this._doc); }
/**
* Set the title of the current HTML document.
* @param newTitle
*/
setTitle(newTitle: string) { getDOM().setTitle(this._doc, newTitle); }
} | random_line_split |
|
title.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Inject, Injectable} from '@angular/core';
import {getDOM} from '../dom/dom_adapter';
import {DOCUMENT} from '../dom/dom_tokens';
/**
* A service that can be used to get and set the title of a current HTML document.
*
* Since an Angular application can't be bootstrapped on the entire HTML document (`<html>` tag)
* it is not possible to bind to the `text` property of the `HTMLTitleElement` elements
* (representing the `<title>` tag). Instead, this service can be used to set and get the current
* title value.
*
* @experimental
*/
@Injectable()
export class Title {
constructor(@Inject(DOCUMENT) private _doc: any) {}
/**
* Get the title of the current HTML document.
* @returns {string}
*/
getTitle(): string { return getDOM().getTitle(this._doc); }
/**
* Set the title of the current HTML document.
* @param newTitle
*/
| (newTitle: string) { getDOM().setTitle(this._doc, newTitle); }
}
| setTitle | identifier_name |
title.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Inject, Injectable} from '@angular/core';
import {getDOM} from '../dom/dom_adapter';
import {DOCUMENT} from '../dom/dom_tokens';
/**
* A service that can be used to get and set the title of a current HTML document.
*
* Since an Angular application can't be bootstrapped on the entire HTML document (`<html>` tag)
* it is not possible to bind to the `text` property of the `HTMLTitleElement` elements
* (representing the `<title>` tag). Instead, this service can be used to set and get the current
* title value.
*
* @experimental
*/
@Injectable()
export class Title {
constructor(@Inject(DOCUMENT) private _doc: any) {}
/**
* Get the title of the current HTML document.
* @returns {string}
*/
getTitle(): string |
/**
* Set the title of the current HTML document.
* @param newTitle
*/
setTitle(newTitle: string) { getDOM().setTitle(this._doc, newTitle); }
}
| { return getDOM().getTitle(this._doc); } | identifier_body |
hydrogen.py | # -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <[email protected]>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three parameters: *function*,
*path*, and *excinfo*. For more information, see the :mod:`atexit`
documentation.
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
if cleanup:
if on_cleanup_error is None:
def on_cleanup_error(function, path, excinfo):
click.secho("warning: failed to remove file or directory: {}\n"
"please delete it manually.".format(path),
fg="red")
atexit.register(shutil.rmtree, path=path, onerror=on_cleanup_error)
return path
class Requirement(object):
"""Represents a single package requirement.
.. note::
This class overrides `__hash__` in order to ensure that package
names remain unique when in a set.
.. todo::
Extend :class:`pkg_resources.Requirement` for Python requirements.
"""
# TODO: support multiple version specs (e.g. >=1.0,<=2.0)
spec_regex = r"(.+?)\s*(?:([<>~=]?=)\s*(.+?))?$"
def __init__(self, package, version):
"""Construct a new requirement.
:param package: the package name.
:param version: a semver compatible version specification.
"""
self.package = package
self.version = version
if self.version and not re.match(r"[<=>~]", version[:2]):
self.version = "=={}".format(self.version)
@classmethod
def coerce(cls, string):
"""Create a :class:`Requirement` object from a given package spec."""
match = re.match(cls.spec_regex, string)
if not match:
raise InvalidRequirementSpecError("could not parse requirement")
package = match.group(1)
if all(match.group(2, 3)):
version = "".join(match.group(2, 3))
else:
version = None
return cls(package, version)
def load_installed_version(self):
installed_packages = get_installed_pypackages()
if self.package in installed_packages:
self.version = "=={}".format(
installed_packages[self.package].version)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
other.package == self.package)
def __hash__(self):
return hash(self.package)
def __str__(self):
return "".join([self.package, self.version or ""])
def __repr__(self):
return "<Requirement(package={package}, version='{version}')>".format(
package=self.package, version=self.version)
class Requirements(set):
"""Represents a set of requirements."""
def __init__(self, filename=None):
self.filename = None
if filename:
self.load(filename)
def add(self, elem, replace=False):
"""Add a requirement.
:param elem: a string or :class:`Requirement` instance.
:param replace: if `True`, packages in the set with the same name will
be removed first.
"""
if isinstance(elem, text_type):
elem = Requirement.coerce(elem)
if replace and elem in self:
self.remove(elem)
super(Requirements, self).add(elem)
def load(self, requirements_file=None):
"""Load or reload requirements from a requirements.txt file.
:param requirements_file: if not given, the filename used from
initialization will be read again.
"""
if requirements_file is None:
requirements_file = self.filename
if requirements_file is None:
raise ValueError("no filename provided")
elif isinstance(requirements_file, text_type):
requirements_file = Path(requirements_file)
self.clear()
with requirements_file.open() as f:
self.loads(f.read())
if isinstance(requirements_file, (text_type, Path)):
self.filename = requirements_file
def loads(self, requirements_text):
lines = re.findall(Requirement.spec_regex,
requirements_text,
re.MULTILINE)
for line in lines:
self.add(Requirement(line[0], "".join(line[1:])))
def remove(self, elem):
"""Remove a requirement.
:param elem: a string or :class:`Requirement` instance.
"""
if isinstance(elem, text_type):
for requirement in self:
if requirement.package == elem:
return super(Requirements, self).remove(requirement)
return super(Requirements, self).remove(elem)
def __str__(self):
return "\n".join([str(x) for x in self])
def __repr__(self):
return "<Requirements({})>".format(self.filename.name or "")
class NamedRequirements(Requirements):
def __init__(self, name, filename=None):
self.name = name
super(NamedRequirements, self).__init__(filename=filename)
def __repr__(self):
return "<NamedRequirements({}{})>".format(
self.name,
", filename='{}'".format(self.filename.name) if self.filename
else "")
class GroupedRequirements(defaultdict):
default_groups = ["all", "dev", "bower", "bower-dev"]
default_pip_files = {
"all": "requirements.txt",
"dev": "dev-requirements.txt"
}
def __init__(self, groups=None):
super(GroupedRequirements, self).__init__(NamedRequirements)
self.groups = groups or self.default_groups
self.filename = None
self.create_default_groups()
def clear(self):
super(GroupedRequirements, self).clear()
self.create_default_groups()
def create_default_groups(self):
for group in self.groups:
group = group.replace(" ", "_").lower()
self[group] = NamedRequirements(group)
def load_pip_requirements(self, files_map=None, freeze=True):
if files_map is None:
files_map = self.default_pip_files
for group, requirements_txt in files_map.items():
path = Path(requirements_txt)
if not path.exists() and group.lower() == "all" and freeze:
cmd = envoy.run("pip freeze")
self[group].loads(cmd.std_out)
elif path.exists():
self[group].load(path)
def load(self, filename, create_if_missing=True):
filename = Path(filename)
if not filename.exists() and create_if_missing:
self.load_pip_requirements()
with filename.open("w") as f:
f.write(yaml.dump(self.serialized, default_flow_style=False,
encoding=None))
self.filename = filename
return self.save(filename)
with filename.open() as f:
for group, requirements in yaml.load(f.read()).items():
for requirement in requirements:
self[group].add(Requirement.coerce(requirement))
self.filename = filename
def save(self, filename=None):
filename = Path(filename) if filename is not None else self.filename
with filename.open("w") as f:
f.write(self.yaml)
@property
def serialized(self):
to_ret = {}
for group, requirements in self.items():
to_ret[group] = [str(requirement) for requirement in requirements]
return to_ret
@property
def yaml(self):
return yaml.dump(self.serialized, default_flow_style=False,
encoding=None)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(name=key)
return ret
class Bower(object):
bower_base_uri = "https://bower.herokuapp.com"
@classmethod
def get_package_url(cls, package, session=None, silent=False):
response = get("{}/packages/{}".format(cls.bower_base_uri, package))
return response.json().get("url", None)
@classmethod
def clean_semver(cls, version_spec):
return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
class Hydrogen(object):
def __init__(self, assets_dir=None, requirements_file="requirements.yml"):
self.assets_dir = assets_dir or Path(".") / "assets"
self.requirements = GroupedRequirements()
self.requirements.load(requirements_file)
self.temp_dir = mkdtemp()
def | (self, zip_file, dest, expected_version=None):
bower_json = None
root = None
deps_installed = []
for info in zip_file.infolist():
if PurePath(info.filename).name == "bower.json":
with zip_file.open(info) as f:
bower_json = json.load(f)
root = str(PurePath(info.filename).parent)
break
version = bower_json["version"]
if expected_version is not None:
expected_version = Bower.clean_semver(expected_version)
if not semver.match(version, expected_version):
click.secho("error: versions do not match ({} =/= {})".format(
version, expected_version))
raise InvalidPackageError
if "dependencies" in bower_json:
for package, version in bower_json["dependencies"].items():
url = Bower.get_package_url(package)
deps_installed.extend(self.get_bower_package(
url, dest=dest, version=version))
ignore_patterns = [GitIgnorePattern(ig) for ig in bower_json["ignore"]]
path_spec = PathSpec(ignore_patterns)
namelist = [path for path in zip_file.namelist()
if PurePath(path).parts[0] == root]
ignored = list(path_spec.match_files(namelist))
for path in namelist:
dest_path = PurePath(
bower_json["name"],
*PurePath(path).parts[1:])
if path in ignored:
continue
for path in ignored:
for parent in PurePath(path):
if parent in ignored:
continue
if path.endswith("/"):
if list(path_spec.match_files([str(dest_path)])):
ignored.append(PurePath(path))
elif not (dest / dest_path).is_dir():
(dest / dest_path).mkdir(parents=True)
else:
target_path = dest / dest_path.parent / dest_path.name
source = zip_file.open(path)
target = target_path.open("wb")
with source, target:
shutil.copyfileobj(source, target)
deps_installed.append((bower_json["name"], bower_json["version"]))
return deps_installed
def get_bower_package(self, url, dest=None, version=None,
process_deps=True):
dest = dest or Path(".") / "assets"
parsed_url = urlparse(url)
if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
if parsed_url.netloc == "github.com":
user, repo = parsed_url.path[1:-4].split("/")
response = get(github_api_uri +
"/repos/{}/{}/tags".format(user, repo))
tags = response.json()
target = None
if not len(tags):
click.secho("fatal: no tags exist for {}/{}".format(
user, repo), fg="red")
raise InvalidPackageError
if version is None:
target = tags[0]
else:
for tag in tags:
if semver.match(tag["name"],
Bower.clean_semver(version)):
target = tag
break
if not target:
click.secho(
"fatal: failed to find matching tag for "
"{user}/{repo} {version}".format(user, repo, version),
fg="red")
raise VersionNotFoundError
click.secho("installing {}/{}#{}".format(
user, repo, tags[0]["name"]), fg="green")
return self.get_bower_package(
url=target["zipball_url"],
dest=dest,
version=version)
raise NotImplementedError
click.echo("git clone {url}".format(url=url))
cmd = envoy.run('git clone {url} "{dest}"'.format(
url=url, dest=dest))
elif parsed_url.scheme in ("http", "https"):
zip_dest = download_file(url, dest=self.temp_dir,
label="{dest_basename}",
expected_extension="zip")
with zipfile.ZipFile(zip_dest, "r") as pkg:
return self.extract_bower_zipfile(pkg, dest,
expected_version=version)
# pkg.extractall(str(dest))
else:
click.secho("protocol currently unsupported :(")
sys.exit(1)
def install_bower(self, package, save=True, save_dev=False):
"""Installs a bower package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a list of tuples, containing all installed package names
and versions, including any dependencies.
"""
requirement = Requirement.coerce(package)
url = Bower.get_package_url(requirement.package)
installed = []
for name, _ in self.get_bower_package(url):
installed.append(Requirement(name, requirement.version))
for requirement in installed:
if save:
self.requirements["bower"].add(requirement, replace=True)
if save_dev:
self.requirements["bower-dev"].add(requirement, replace=True)
success("installed {}".format(str(requirement)))
if save or save_dev:
self.requirements.save()
return installed
def install_pip(self, package, save=True, save_dev=False):
"""Installs a pip package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a **single** :class:`Requirement` object, representing
the installed version of the given package.
"""
requirement = Requirement.coerce(package)
click.echo("pip install " + requirement.package)
cmd = envoy.run("pip install {}".format(str(requirement)))
if cmd.status_code == 0:
installed_packages = get_installed_pypackages()
package = installed_packages[requirement.package]
requirement.version = "=={}".format(package.version)
if save:
self.requirements["all"].add(requirement)
if save_dev:
self.requirements["dev"].add(requirement)
if save or save_dev:
self.requirements.save()
return requirement
else:
fatal(cmd.std_err)
def groups_option(f):
new_func = click.option("-g", "--groups",
help="Comma-separated list of requirement groups "
"to include.")(f)
return update_wrapper(new_func, f)
@click.group()
@click.version_option(prog_name=prog_name)
@click.pass_context
def main(ctx):
which = "where" if sys.platform == "win32" else "which"
if envoy.run(which + " git").status_code != 0:
click.secho("fatal: git not found in PATH", fg="red")
sys.exit(1)
ctx.obj = Hydrogen()
@main.command()
@click.pass_obj
@click.option("output_yaml", "--yaml", "-y", is_flag=True,
help="Show requirements in YAML format.")
@click.option("--resolve", "-r", is_flag=True,
help="Resolve version numbers for ambiguous packages.")
@groups_option
def freeze(h, output_yaml, resolve, groups):
"""Output installed packages."""
if not groups:
groups = filter(lambda group: not group.lower().startswith("bower"),
h.requirements.keys())
else:
groups = [text_type.strip(group) for group in groups.split(",")]
if output_yaml:
for requirements in h.requirements.values():
for requirement in requirements:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(h.requirements.yaml)
else:
for group in groups:
if not h.requirements[group]:
continue
click.echo("# {}".format(group))
for requirement in h.requirements[group]:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(str(requirement))
@main.command()
@click.pass_obj
@click.option("--pip/--bower", default=True)
@groups_option
@click.option("--save", is_flag=True)
@click.option("--save-dev", is_flag=True)
@click.argument("packages", nargs=-1)
def install(h, pip, groups, save, save_dev, packages):
"""Install a pip or bower package."""
if groups:
groups = [text_type.strip(group) for group in groups.split(",")]
else:
groups = h.requirements.keys()
if not packages:
for group in groups:
if group not in h.requirements:
warning("{} not in requirements".format(group))
continue
install = (h.install_bower if group.startswith("bower")
else h.install_pip)
for requirement in h.requirements[group]:
install(str(requirement), save=False, save_dev=False)
if pip:
for package in packages:
h.install_pip(package, save=save, save_dev=save_dev)
else:
for package in packages:
h.install_bower(package, save=save, save_dev=save_dev)
if __name__ == "__main__":
main()
| extract_bower_zipfile | identifier_name |
hydrogen.py | # -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <[email protected]>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three parameters: *function*,
*path*, and *excinfo*. For more information, see the :mod:`atexit`
documentation.
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
if cleanup:
if on_cleanup_error is None:
def on_cleanup_error(function, path, excinfo):
click.secho("warning: failed to remove file or directory: {}\n"
"please delete it manually.".format(path),
fg="red")
atexit.register(shutil.rmtree, path=path, onerror=on_cleanup_error)
return path
class Requirement(object):
"""Represents a single package requirement.
.. note::
This class overrides `__hash__` in order to ensure that package
names remain unique when in a set.
.. todo::
Extend :class:`pkg_resources.Requirement` for Python requirements.
"""
# TODO: support multiple version specs (e.g. >=1.0,<=2.0)
spec_regex = r"(.+?)\s*(?:([<>~=]?=)\s*(.+?))?$"
def __init__(self, package, version):
"""Construct a new requirement.
:param package: the package name.
:param version: a semver compatible version specification.
"""
self.package = package
self.version = version
if self.version and not re.match(r"[<=>~]", version[:2]):
self.version = "=={}".format(self.version)
@classmethod
def coerce(cls, string):
"""Create a :class:`Requirement` object from a given package spec."""
match = re.match(cls.spec_regex, string)
if not match:
raise InvalidRequirementSpecError("could not parse requirement")
package = match.group(1)
if all(match.group(2, 3)):
version = "".join(match.group(2, 3))
else:
version = None
return cls(package, version)
def load_installed_version(self):
installed_packages = get_installed_pypackages()
if self.package in installed_packages:
self.version = "=={}".format(
installed_packages[self.package].version)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
other.package == self.package)
def __hash__(self):
return hash(self.package)
def __str__(self):
return "".join([self.package, self.version or ""])
def __repr__(self):
return "<Requirement(package={package}, version='{version}')>".format(
package=self.package, version=self.version)
class Requirements(set):
"""Represents a set of requirements."""
def __init__(self, filename=None):
self.filename = None
if filename:
self.load(filename)
def add(self, elem, replace=False):
"""Add a requirement.
:param elem: a string or :class:`Requirement` instance.
:param replace: if `True`, packages in the set with the same name will
be removed first.
"""
if isinstance(elem, text_type):
elem = Requirement.coerce(elem)
if replace and elem in self:
self.remove(elem)
super(Requirements, self).add(elem)
def load(self, requirements_file=None):
"""Load or reload requirements from a requirements.txt file.
:param requirements_file: if not given, the filename used from
initialization will be read again.
"""
if requirements_file is None:
requirements_file = self.filename
if requirements_file is None:
raise ValueError("no filename provided")
elif isinstance(requirements_file, text_type):
requirements_file = Path(requirements_file)
self.clear()
with requirements_file.open() as f:
self.loads(f.read())
if isinstance(requirements_file, (text_type, Path)):
self.filename = requirements_file
def loads(self, requirements_text):
lines = re.findall(Requirement.spec_regex,
requirements_text,
re.MULTILINE)
for line in lines:
self.add(Requirement(line[0], "".join(line[1:])))
def remove(self, elem):
"""Remove a requirement.
:param elem: a string or :class:`Requirement` instance.
"""
if isinstance(elem, text_type):
for requirement in self:
if requirement.package == elem:
return super(Requirements, self).remove(requirement)
return super(Requirements, self).remove(elem)
def __str__(self):
return "\n".join([str(x) for x in self])
def __repr__(self):
return "<Requirements({})>".format(self.filename.name or "")
class NamedRequirements(Requirements):
def __init__(self, name, filename=None):
self.name = name
super(NamedRequirements, self).__init__(filename=filename)
def __repr__(self):
return "<NamedRequirements({}{})>".format(
self.name,
", filename='{}'".format(self.filename.name) if self.filename
else "")
class GroupedRequirements(defaultdict):
default_groups = ["all", "dev", "bower", "bower-dev"]
default_pip_files = {
"all": "requirements.txt",
"dev": "dev-requirements.txt"
}
def __init__(self, groups=None):
super(GroupedRequirements, self).__init__(NamedRequirements)
self.groups = groups or self.default_groups
self.filename = None
self.create_default_groups()
def clear(self):
super(GroupedRequirements, self).clear()
self.create_default_groups()
def create_default_groups(self):
for group in self.groups:
group = group.replace(" ", "_").lower()
self[group] = NamedRequirements(group)
def load_pip_requirements(self, files_map=None, freeze=True):
if files_map is None:
files_map = self.default_pip_files
for group, requirements_txt in files_map.items():
path = Path(requirements_txt)
if not path.exists() and group.lower() == "all" and freeze:
cmd = envoy.run("pip freeze")
self[group].loads(cmd.std_out)
elif path.exists():
self[group].load(path)
def load(self, filename, create_if_missing=True):
filename = Path(filename)
if not filename.exists() and create_if_missing:
self.load_pip_requirements()
with filename.open("w") as f:
f.write(yaml.dump(self.serialized, default_flow_style=False,
encoding=None))
self.filename = filename
return self.save(filename)
with filename.open() as f:
for group, requirements in yaml.load(f.read()).items():
for requirement in requirements:
self[group].add(Requirement.coerce(requirement))
self.filename = filename
def save(self, filename=None):
filename = Path(filename) if filename is not None else self.filename
with filename.open("w") as f:
f.write(self.yaml)
@property
def serialized(self):
to_ret = {}
for group, requirements in self.items():
to_ret[group] = [str(requirement) for requirement in requirements]
return to_ret
@property
def yaml(self):
return yaml.dump(self.serialized, default_flow_style=False,
encoding=None)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(name=key)
return ret
class Bower(object):
bower_base_uri = "https://bower.herokuapp.com"
@classmethod
def get_package_url(cls, package, session=None, silent=False):
response = get("{}/packages/{}".format(cls.bower_base_uri, package))
return response.json().get("url", None)
@classmethod
def clean_semver(cls, version_spec):
return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
class Hydrogen(object):
def __init__(self, assets_dir=None, requirements_file="requirements.yml"):
self.assets_dir = assets_dir or Path(".") / "assets"
self.requirements = GroupedRequirements()
self.requirements.load(requirements_file)
self.temp_dir = mkdtemp()
def extract_bower_zipfile(self, zip_file, dest, expected_version=None):
bower_json = None
root = None
deps_installed = []
for info in zip_file.infolist():
if PurePath(info.filename).name == "bower.json":
with zip_file.open(info) as f:
bower_json = json.load(f)
root = str(PurePath(info.filename).parent)
break
version = bower_json["version"]
if expected_version is not None:
expected_version = Bower.clean_semver(expected_version)
if not semver.match(version, expected_version):
click.secho("error: versions do not match ({} =/= {})".format(
version, expected_version))
raise InvalidPackageError
if "dependencies" in bower_json:
for package, version in bower_json["dependencies"].items():
url = Bower.get_package_url(package)
deps_installed.extend(self.get_bower_package(
url, dest=dest, version=version))
ignore_patterns = [GitIgnorePattern(ig) for ig in bower_json["ignore"]]
path_spec = PathSpec(ignore_patterns)
namelist = [path for path in zip_file.namelist()
if PurePath(path).parts[0] == root]
ignored = list(path_spec.match_files(namelist))
for path in namelist:
dest_path = PurePath(
bower_json["name"],
*PurePath(path).parts[1:])
if path in ignored:
continue
for path in ignored:
for parent in PurePath(path):
if parent in ignored:
continue
if path.endswith("/"):
if list(path_spec.match_files([str(dest_path)])):
ignored.append(PurePath(path))
elif not (dest / dest_path).is_dir():
(dest / dest_path).mkdir(parents=True)
else:
target_path = dest / dest_path.parent / dest_path.name
source = zip_file.open(path)
target = target_path.open("wb")
with source, target:
shutil.copyfileobj(source, target)
deps_installed.append((bower_json["name"], bower_json["version"]))
return deps_installed
def get_bower_package(self, url, dest=None, version=None,
process_deps=True):
dest = dest or Path(".") / "assets"
parsed_url = urlparse(url)
if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
if parsed_url.netloc == "github.com":
user, repo = parsed_url.path[1:-4].split("/")
response = get(github_api_uri +
"/repos/{}/{}/tags".format(user, repo))
tags = response.json()
target = None
if not len(tags):
click.secho("fatal: no tags exist for {}/{}".format(
user, repo), fg="red")
raise InvalidPackageError
if version is None:
target = tags[0]
else:
for tag in tags:
if semver.match(tag["name"],
Bower.clean_semver(version)):
target = tag
break
if not target:
click.secho(
"fatal: failed to find matching tag for "
"{user}/{repo} {version}".format(user, repo, version),
fg="red")
raise VersionNotFoundError
click.secho("installing {}/{}#{}".format(
user, repo, tags[0]["name"]), fg="green")
return self.get_bower_package(
url=target["zipball_url"],
dest=dest,
version=version)
raise NotImplementedError
click.echo("git clone {url}".format(url=url))
cmd = envoy.run('git clone {url} "{dest}"'.format(
url=url, dest=dest))
elif parsed_url.scheme in ("http", "https"):
zip_dest = download_file(url, dest=self.temp_dir,
label="{dest_basename}",
expected_extension="zip")
with zipfile.ZipFile(zip_dest, "r") as pkg:
return self.extract_bower_zipfile(pkg, dest,
expected_version=version)
# pkg.extractall(str(dest))
else:
click.secho("protocol currently unsupported :(")
sys.exit(1)
def install_bower(self, package, save=True, save_dev=False):
"""Installs a bower package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a list of tuples, containing all installed package names
and versions, including any dependencies.
"""
requirement = Requirement.coerce(package)
url = Bower.get_package_url(requirement.package)
installed = []
for name, _ in self.get_bower_package(url):
installed.append(Requirement(name, requirement.version))
for requirement in installed:
if save:
self.requirements["bower"].add(requirement, replace=True)
if save_dev:
self.requirements["bower-dev"].add(requirement, replace=True)
success("installed {}".format(str(requirement)))
if save or save_dev:
self.requirements.save()
return installed
def install_pip(self, package, save=True, save_dev=False):
"""Installs a pip package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a **single** :class:`Requirement` object, representing
the installed version of the given package.
"""
requirement = Requirement.coerce(package)
click.echo("pip install " + requirement.package)
cmd = envoy.run("pip install {}".format(str(requirement)))
if cmd.status_code == 0:
installed_packages = get_installed_pypackages()
package = installed_packages[requirement.package]
requirement.version = "=={}".format(package.version)
if save:
self.requirements["all"].add(requirement)
if save_dev:
self.requirements["dev"].add(requirement)
if save or save_dev:
self.requirements.save()
return requirement
else:
fatal(cmd.std_err)
def groups_option(f):
new_func = click.option("-g", "--groups",
help="Comma-separated list of requirement groups "
"to include.")(f)
return update_wrapper(new_func, f)
@click.group()
@click.version_option(prog_name=prog_name)
@click.pass_context
def main(ctx):
which = "where" if sys.platform == "win32" else "which"
if envoy.run(which + " git").status_code != 0:
click.secho("fatal: git not found in PATH", fg="red")
sys.exit(1)
ctx.obj = Hydrogen()
@main.command()
@click.pass_obj
@click.option("output_yaml", "--yaml", "-y", is_flag=True,
help="Show requirements in YAML format.")
@click.option("--resolve", "-r", is_flag=True,
help="Resolve version numbers for ambiguous packages.")
@groups_option
def freeze(h, output_yaml, resolve, groups):
"""Output installed packages."""
if not groups:
groups = filter(lambda group: not group.lower().startswith("bower"),
h.requirements.keys())
else:
groups = [text_type.strip(group) for group in groups.split(",")]
if output_yaml:
for requirements in h.requirements.values():
for requirement in requirements:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(h.requirements.yaml)
else:
for group in groups:
if not h.requirements[group]:
continue
click.echo("# {}".format(group))
for requirement in h.requirements[group]:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(str(requirement))
@main.command()
@click.pass_obj
@click.option("--pip/--bower", default=True)
@groups_option
@click.option("--save", is_flag=True)
@click.option("--save-dev", is_flag=True)
@click.argument("packages", nargs=-1)
def install(h, pip, groups, save, save_dev, packages):
"""Install a pip or bower package."""
if groups:
groups = [text_type.strip(group) for group in groups.split(",")]
else:
groups = h.requirements.keys() |
if not packages:
for group in groups:
if group not in h.requirements:
warning("{} not in requirements".format(group))
continue
install = (h.install_bower if group.startswith("bower")
else h.install_pip)
for requirement in h.requirements[group]:
install(str(requirement), save=False, save_dev=False)
if pip:
for package in packages:
h.install_pip(package, save=save, save_dev=save_dev)
else:
for package in packages:
h.install_bower(package, save=save, save_dev=save_dev)
if __name__ == "__main__":
main() | random_line_split |
|
hydrogen.py | # -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <[email protected]>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three parameters: *function*,
*path*, and *excinfo*. For more information, see the :mod:`atexit`
documentation.
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
if cleanup:
if on_cleanup_error is None:
def on_cleanup_error(function, path, excinfo):
click.secho("warning: failed to remove file or directory: {}\n"
"please delete it manually.".format(path),
fg="red")
atexit.register(shutil.rmtree, path=path, onerror=on_cleanup_error)
return path
class Requirement(object):
"""Represents a single package requirement.
.. note::
This class overrides `__hash__` in order to ensure that package
names remain unique when in a set.
.. todo::
Extend :class:`pkg_resources.Requirement` for Python requirements.
"""
# TODO: support multiple version specs (e.g. >=1.0,<=2.0)
spec_regex = r"(.+?)\s*(?:([<>~=]?=)\s*(.+?))?$"
def __init__(self, package, version):
"""Construct a new requirement.
:param package: the package name.
:param version: a semver compatible version specification.
"""
self.package = package
self.version = version
if self.version and not re.match(r"[<=>~]", version[:2]):
self.version = "=={}".format(self.version)
@classmethod
def coerce(cls, string):
"""Create a :class:`Requirement` object from a given package spec."""
match = re.match(cls.spec_regex, string)
if not match:
raise InvalidRequirementSpecError("could not parse requirement")
package = match.group(1)
if all(match.group(2, 3)):
version = "".join(match.group(2, 3))
else:
version = None
return cls(package, version)
def load_installed_version(self):
installed_packages = get_installed_pypackages()
if self.package in installed_packages:
self.version = "=={}".format(
installed_packages[self.package].version)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
other.package == self.package)
def __hash__(self):
return hash(self.package)
def __str__(self):
return "".join([self.package, self.version or ""])
def __repr__(self):
return "<Requirement(package={package}, version='{version}')>".format(
package=self.package, version=self.version)
class Requirements(set):
"""Represents a set of requirements."""
def __init__(self, filename=None):
self.filename = None
if filename:
self.load(filename)
def add(self, elem, replace=False):
"""Add a requirement.
:param elem: a string or :class:`Requirement` instance.
:param replace: if `True`, packages in the set with the same name will
be removed first.
"""
if isinstance(elem, text_type):
elem = Requirement.coerce(elem)
if replace and elem in self:
self.remove(elem)
super(Requirements, self).add(elem)
def load(self, requirements_file=None):
"""Load or reload requirements from a requirements.txt file.
:param requirements_file: if not given, the filename used from
initialization will be read again.
"""
if requirements_file is None:
requirements_file = self.filename
if requirements_file is None:
raise ValueError("no filename provided")
elif isinstance(requirements_file, text_type):
requirements_file = Path(requirements_file)
self.clear()
with requirements_file.open() as f:
self.loads(f.read())
if isinstance(requirements_file, (text_type, Path)):
self.filename = requirements_file
def loads(self, requirements_text):
lines = re.findall(Requirement.spec_regex,
requirements_text,
re.MULTILINE)
for line in lines:
self.add(Requirement(line[0], "".join(line[1:])))
def remove(self, elem):
"""Remove a requirement.
:param elem: a string or :class:`Requirement` instance.
"""
if isinstance(elem, text_type):
for requirement in self:
if requirement.package == elem:
return super(Requirements, self).remove(requirement)
return super(Requirements, self).remove(elem)
def __str__(self):
return "\n".join([str(x) for x in self])
def __repr__(self):
return "<Requirements({})>".format(self.filename.name or "")
class NamedRequirements(Requirements):
def __init__(self, name, filename=None):
self.name = name
super(NamedRequirements, self).__init__(filename=filename)
def __repr__(self):
return "<NamedRequirements({}{})>".format(
self.name,
", filename='{}'".format(self.filename.name) if self.filename
else "")
class GroupedRequirements(defaultdict):
default_groups = ["all", "dev", "bower", "bower-dev"]
default_pip_files = {
"all": "requirements.txt",
"dev": "dev-requirements.txt"
}
def __init__(self, groups=None):
super(GroupedRequirements, self).__init__(NamedRequirements)
self.groups = groups or self.default_groups
self.filename = None
self.create_default_groups()
def clear(self):
super(GroupedRequirements, self).clear()
self.create_default_groups()
def create_default_groups(self):
for group in self.groups:
group = group.replace(" ", "_").lower()
self[group] = NamedRequirements(group)
def load_pip_requirements(self, files_map=None, freeze=True):
if files_map is None:
files_map = self.default_pip_files
for group, requirements_txt in files_map.items():
path = Path(requirements_txt)
if not path.exists() and group.lower() == "all" and freeze:
cmd = envoy.run("pip freeze")
self[group].loads(cmd.std_out)
elif path.exists():
self[group].load(path)
def load(self, filename, create_if_missing=True):
filename = Path(filename)
if not filename.exists() and create_if_missing:
self.load_pip_requirements()
with filename.open("w") as f:
f.write(yaml.dump(self.serialized, default_flow_style=False,
encoding=None))
self.filename = filename
return self.save(filename)
with filename.open() as f:
for group, requirements in yaml.load(f.read()).items():
for requirement in requirements:
self[group].add(Requirement.coerce(requirement))
self.filename = filename
def save(self, filename=None):
filename = Path(filename) if filename is not None else self.filename
with filename.open("w") as f:
f.write(self.yaml)
@property
def serialized(self):
|
@property
def yaml(self):
return yaml.dump(self.serialized, default_flow_style=False,
encoding=None)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(name=key)
return ret
class Bower(object):
bower_base_uri = "https://bower.herokuapp.com"
@classmethod
def get_package_url(cls, package, session=None, silent=False):
response = get("{}/packages/{}".format(cls.bower_base_uri, package))
return response.json().get("url", None)
@classmethod
def clean_semver(cls, version_spec):
return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
class Hydrogen(object):
def __init__(self, assets_dir=None, requirements_file="requirements.yml"):
self.assets_dir = assets_dir or Path(".") / "assets"
self.requirements = GroupedRequirements()
self.requirements.load(requirements_file)
self.temp_dir = mkdtemp()
def extract_bower_zipfile(self, zip_file, dest, expected_version=None):
bower_json = None
root = None
deps_installed = []
for info in zip_file.infolist():
if PurePath(info.filename).name == "bower.json":
with zip_file.open(info) as f:
bower_json = json.load(f)
root = str(PurePath(info.filename).parent)
break
version = bower_json["version"]
if expected_version is not None:
expected_version = Bower.clean_semver(expected_version)
if not semver.match(version, expected_version):
click.secho("error: versions do not match ({} =/= {})".format(
version, expected_version))
raise InvalidPackageError
if "dependencies" in bower_json:
for package, version in bower_json["dependencies"].items():
url = Bower.get_package_url(package)
deps_installed.extend(self.get_bower_package(
url, dest=dest, version=version))
ignore_patterns = [GitIgnorePattern(ig) for ig in bower_json["ignore"]]
path_spec = PathSpec(ignore_patterns)
namelist = [path for path in zip_file.namelist()
if PurePath(path).parts[0] == root]
ignored = list(path_spec.match_files(namelist))
for path in namelist:
dest_path = PurePath(
bower_json["name"],
*PurePath(path).parts[1:])
if path in ignored:
continue
for path in ignored:
for parent in PurePath(path):
if parent in ignored:
continue
if path.endswith("/"):
if list(path_spec.match_files([str(dest_path)])):
ignored.append(PurePath(path))
elif not (dest / dest_path).is_dir():
(dest / dest_path).mkdir(parents=True)
else:
target_path = dest / dest_path.parent / dest_path.name
source = zip_file.open(path)
target = target_path.open("wb")
with source, target:
shutil.copyfileobj(source, target)
deps_installed.append((bower_json["name"], bower_json["version"]))
return deps_installed
def get_bower_package(self, url, dest=None, version=None,
process_deps=True):
dest = dest or Path(".") / "assets"
parsed_url = urlparse(url)
if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
if parsed_url.netloc == "github.com":
user, repo = parsed_url.path[1:-4].split("/")
response = get(github_api_uri +
"/repos/{}/{}/tags".format(user, repo))
tags = response.json()
target = None
if not len(tags):
click.secho("fatal: no tags exist for {}/{}".format(
user, repo), fg="red")
raise InvalidPackageError
if version is None:
target = tags[0]
else:
for tag in tags:
if semver.match(tag["name"],
Bower.clean_semver(version)):
target = tag
break
if not target:
click.secho(
"fatal: failed to find matching tag for "
"{user}/{repo} {version}".format(user, repo, version),
fg="red")
raise VersionNotFoundError
click.secho("installing {}/{}#{}".format(
user, repo, tags[0]["name"]), fg="green")
return self.get_bower_package(
url=target["zipball_url"],
dest=dest,
version=version)
raise NotImplementedError
click.echo("git clone {url}".format(url=url))
cmd = envoy.run('git clone {url} "{dest}"'.format(
url=url, dest=dest))
elif parsed_url.scheme in ("http", "https"):
zip_dest = download_file(url, dest=self.temp_dir,
label="{dest_basename}",
expected_extension="zip")
with zipfile.ZipFile(zip_dest, "r") as pkg:
return self.extract_bower_zipfile(pkg, dest,
expected_version=version)
# pkg.extractall(str(dest))
else:
click.secho("protocol currently unsupported :(")
sys.exit(1)
def install_bower(self, package, save=True, save_dev=False):
"""Installs a bower package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a list of tuples, containing all installed package names
and versions, including any dependencies.
"""
requirement = Requirement.coerce(package)
url = Bower.get_package_url(requirement.package)
installed = []
for name, _ in self.get_bower_package(url):
installed.append(Requirement(name, requirement.version))
for requirement in installed:
if save:
self.requirements["bower"].add(requirement, replace=True)
if save_dev:
self.requirements["bower-dev"].add(requirement, replace=True)
success("installed {}".format(str(requirement)))
if save or save_dev:
self.requirements.save()
return installed
def install_pip(self, package, save=True, save_dev=False):
"""Installs a pip package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a **single** :class:`Requirement` object, representing
the installed version of the given package.
"""
requirement = Requirement.coerce(package)
click.echo("pip install " + requirement.package)
cmd = envoy.run("pip install {}".format(str(requirement)))
if cmd.status_code == 0:
installed_packages = get_installed_pypackages()
package = installed_packages[requirement.package]
requirement.version = "=={}".format(package.version)
if save:
self.requirements["all"].add(requirement)
if save_dev:
self.requirements["dev"].add(requirement)
if save or save_dev:
self.requirements.save()
return requirement
else:
fatal(cmd.std_err)
def groups_option(f):
new_func = click.option("-g", "--groups",
help="Comma-separated list of requirement groups "
"to include.")(f)
return update_wrapper(new_func, f)
@click.group()
@click.version_option(prog_name=prog_name)
@click.pass_context
def main(ctx):
which = "where" if sys.platform == "win32" else "which"
if envoy.run(which + " git").status_code != 0:
click.secho("fatal: git not found in PATH", fg="red")
sys.exit(1)
ctx.obj = Hydrogen()
@main.command()
@click.pass_obj
@click.option("output_yaml", "--yaml", "-y", is_flag=True,
help="Show requirements in YAML format.")
@click.option("--resolve", "-r", is_flag=True,
help="Resolve version numbers for ambiguous packages.")
@groups_option
def freeze(h, output_yaml, resolve, groups):
"""Output installed packages."""
if not groups:
groups = filter(lambda group: not group.lower().startswith("bower"),
h.requirements.keys())
else:
groups = [text_type.strip(group) for group in groups.split(",")]
if output_yaml:
for requirements in h.requirements.values():
for requirement in requirements:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(h.requirements.yaml)
else:
for group in groups:
if not h.requirements[group]:
continue
click.echo("# {}".format(group))
for requirement in h.requirements[group]:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(str(requirement))
@main.command()
@click.pass_obj
@click.option("--pip/--bower", default=True)
@groups_option
@click.option("--save", is_flag=True)
@click.option("--save-dev", is_flag=True)
@click.argument("packages", nargs=-1)
def install(h, pip, groups, save, save_dev, packages):
"""Install a pip or bower package."""
if groups:
groups = [text_type.strip(group) for group in groups.split(",")]
else:
groups = h.requirements.keys()
if not packages:
for group in groups:
if group not in h.requirements:
warning("{} not in requirements".format(group))
continue
install = (h.install_bower if group.startswith("bower")
else h.install_pip)
for requirement in h.requirements[group]:
install(str(requirement), save=False, save_dev=False)
if pip:
for package in packages:
h.install_pip(package, save=save, save_dev=save_dev)
else:
for package in packages:
h.install_bower(package, save=save, save_dev=save_dev)
if __name__ == "__main__":
main()
| to_ret = {}
for group, requirements in self.items():
to_ret[group] = [str(requirement) for requirement in requirements]
return to_ret | identifier_body |
hydrogen.py | # -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <[email protected]>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three parameters: *function*,
*path*, and *excinfo*. For more information, see the :mod:`atexit`
documentation.
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
if cleanup:
if on_cleanup_error is None:
def on_cleanup_error(function, path, excinfo):
click.secho("warning: failed to remove file or directory: {}\n"
"please delete it manually.".format(path),
fg="red")
atexit.register(shutil.rmtree, path=path, onerror=on_cleanup_error)
return path
class Requirement(object):
"""Represents a single package requirement.
.. note::
This class overrides `__hash__` in order to ensure that package
names remain unique when in a set.
.. todo::
Extend :class:`pkg_resources.Requirement` for Python requirements.
"""
# TODO: support multiple version specs (e.g. >=1.0,<=2.0)
spec_regex = r"(.+?)\s*(?:([<>~=]?=)\s*(.+?))?$"
def __init__(self, package, version):
"""Construct a new requirement.
:param package: the package name.
:param version: a semver compatible version specification.
"""
self.package = package
self.version = version
if self.version and not re.match(r"[<=>~]", version[:2]):
self.version = "=={}".format(self.version)
@classmethod
def coerce(cls, string):
"""Create a :class:`Requirement` object from a given package spec."""
match = re.match(cls.spec_regex, string)
if not match:
raise InvalidRequirementSpecError("could not parse requirement")
package = match.group(1)
if all(match.group(2, 3)):
version = "".join(match.group(2, 3))
else:
version = None
return cls(package, version)
def load_installed_version(self):
installed_packages = get_installed_pypackages()
if self.package in installed_packages:
self.version = "=={}".format(
installed_packages[self.package].version)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
other.package == self.package)
def __hash__(self):
return hash(self.package)
def __str__(self):
return "".join([self.package, self.version or ""])
def __repr__(self):
return "<Requirement(package={package}, version='{version}')>".format(
package=self.package, version=self.version)
class Requirements(set):
"""Represents a set of requirements."""
def __init__(self, filename=None):
self.filename = None
if filename:
self.load(filename)
def add(self, elem, replace=False):
"""Add a requirement.
:param elem: a string or :class:`Requirement` instance.
:param replace: if `True`, packages in the set with the same name will
be removed first.
"""
if isinstance(elem, text_type):
elem = Requirement.coerce(elem)
if replace and elem in self:
self.remove(elem)
super(Requirements, self).add(elem)
def load(self, requirements_file=None):
"""Load or reload requirements from a requirements.txt file.
:param requirements_file: if not given, the filename used from
initialization will be read again.
"""
if requirements_file is None:
requirements_file = self.filename
if requirements_file is None:
raise ValueError("no filename provided")
elif isinstance(requirements_file, text_type):
requirements_file = Path(requirements_file)
self.clear()
with requirements_file.open() as f:
self.loads(f.read())
if isinstance(requirements_file, (text_type, Path)):
self.filename = requirements_file
def loads(self, requirements_text):
lines = re.findall(Requirement.spec_regex,
requirements_text,
re.MULTILINE)
for line in lines:
self.add(Requirement(line[0], "".join(line[1:])))
def remove(self, elem):
"""Remove a requirement.
:param elem: a string or :class:`Requirement` instance.
"""
if isinstance(elem, text_type):
for requirement in self:
if requirement.package == elem:
return super(Requirements, self).remove(requirement)
return super(Requirements, self).remove(elem)
def __str__(self):
return "\n".join([str(x) for x in self])
def __repr__(self):
return "<Requirements({})>".format(self.filename.name or "")
class NamedRequirements(Requirements):
def __init__(self, name, filename=None):
self.name = name
super(NamedRequirements, self).__init__(filename=filename)
def __repr__(self):
return "<NamedRequirements({}{})>".format(
self.name,
", filename='{}'".format(self.filename.name) if self.filename
else "")
class GroupedRequirements(defaultdict):
default_groups = ["all", "dev", "bower", "bower-dev"]
default_pip_files = {
"all": "requirements.txt",
"dev": "dev-requirements.txt"
}
def __init__(self, groups=None):
super(GroupedRequirements, self).__init__(NamedRequirements)
self.groups = groups or self.default_groups
self.filename = None
self.create_default_groups()
def clear(self):
super(GroupedRequirements, self).clear()
self.create_default_groups()
def create_default_groups(self):
for group in self.groups:
group = group.replace(" ", "_").lower()
self[group] = NamedRequirements(group)
def load_pip_requirements(self, files_map=None, freeze=True):
if files_map is None:
files_map = self.default_pip_files
for group, requirements_txt in files_map.items():
path = Path(requirements_txt)
if not path.exists() and group.lower() == "all" and freeze:
cmd = envoy.run("pip freeze")
self[group].loads(cmd.std_out)
elif path.exists():
self[group].load(path)
def load(self, filename, create_if_missing=True):
filename = Path(filename)
if not filename.exists() and create_if_missing:
self.load_pip_requirements()
with filename.open("w") as f:
f.write(yaml.dump(self.serialized, default_flow_style=False,
encoding=None))
self.filename = filename
return self.save(filename)
with filename.open() as f:
for group, requirements in yaml.load(f.read()).items():
|
self.filename = filename
def save(self, filename=None):
filename = Path(filename) if filename is not None else self.filename
with filename.open("w") as f:
f.write(self.yaml)
@property
def serialized(self):
to_ret = {}
for group, requirements in self.items():
to_ret[group] = [str(requirement) for requirement in requirements]
return to_ret
@property
def yaml(self):
return yaml.dump(self.serialized, default_flow_style=False,
encoding=None)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(name=key)
return ret
class Bower(object):
bower_base_uri = "https://bower.herokuapp.com"
@classmethod
def get_package_url(cls, package, session=None, silent=False):
response = get("{}/packages/{}".format(cls.bower_base_uri, package))
return response.json().get("url", None)
@classmethod
def clean_semver(cls, version_spec):
return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
class Hydrogen(object):
def __init__(self, assets_dir=None, requirements_file="requirements.yml"):
self.assets_dir = assets_dir or Path(".") / "assets"
self.requirements = GroupedRequirements()
self.requirements.load(requirements_file)
self.temp_dir = mkdtemp()
def extract_bower_zipfile(self, zip_file, dest, expected_version=None):
bower_json = None
root = None
deps_installed = []
for info in zip_file.infolist():
if PurePath(info.filename).name == "bower.json":
with zip_file.open(info) as f:
bower_json = json.load(f)
root = str(PurePath(info.filename).parent)
break
version = bower_json["version"]
if expected_version is not None:
expected_version = Bower.clean_semver(expected_version)
if not semver.match(version, expected_version):
click.secho("error: versions do not match ({} =/= {})".format(
version, expected_version))
raise InvalidPackageError
if "dependencies" in bower_json:
for package, version in bower_json["dependencies"].items():
url = Bower.get_package_url(package)
deps_installed.extend(self.get_bower_package(
url, dest=dest, version=version))
ignore_patterns = [GitIgnorePattern(ig) for ig in bower_json["ignore"]]
path_spec = PathSpec(ignore_patterns)
namelist = [path for path in zip_file.namelist()
if PurePath(path).parts[0] == root]
ignored = list(path_spec.match_files(namelist))
for path in namelist:
dest_path = PurePath(
bower_json["name"],
*PurePath(path).parts[1:])
if path in ignored:
continue
for path in ignored:
for parent in PurePath(path):
if parent in ignored:
continue
if path.endswith("/"):
if list(path_spec.match_files([str(dest_path)])):
ignored.append(PurePath(path))
elif not (dest / dest_path).is_dir():
(dest / dest_path).mkdir(parents=True)
else:
target_path = dest / dest_path.parent / dest_path.name
source = zip_file.open(path)
target = target_path.open("wb")
with source, target:
shutil.copyfileobj(source, target)
deps_installed.append((bower_json["name"], bower_json["version"]))
return deps_installed
def get_bower_package(self, url, dest=None, version=None,
process_deps=True):
dest = dest or Path(".") / "assets"
parsed_url = urlparse(url)
if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
if parsed_url.netloc == "github.com":
user, repo = parsed_url.path[1:-4].split("/")
response = get(github_api_uri +
"/repos/{}/{}/tags".format(user, repo))
tags = response.json()
target = None
if not len(tags):
click.secho("fatal: no tags exist for {}/{}".format(
user, repo), fg="red")
raise InvalidPackageError
if version is None:
target = tags[0]
else:
for tag in tags:
if semver.match(tag["name"],
Bower.clean_semver(version)):
target = tag
break
if not target:
click.secho(
"fatal: failed to find matching tag for "
"{user}/{repo} {version}".format(user, repo, version),
fg="red")
raise VersionNotFoundError
click.secho("installing {}/{}#{}".format(
user, repo, tags[0]["name"]), fg="green")
return self.get_bower_package(
url=target["zipball_url"],
dest=dest,
version=version)
raise NotImplementedError
click.echo("git clone {url}".format(url=url))
cmd = envoy.run('git clone {url} "{dest}"'.format(
url=url, dest=dest))
elif parsed_url.scheme in ("http", "https"):
zip_dest = download_file(url, dest=self.temp_dir,
label="{dest_basename}",
expected_extension="zip")
with zipfile.ZipFile(zip_dest, "r") as pkg:
return self.extract_bower_zipfile(pkg, dest,
expected_version=version)
# pkg.extractall(str(dest))
else:
click.secho("protocol currently unsupported :(")
sys.exit(1)
def install_bower(self, package, save=True, save_dev=False):
"""Installs a bower package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a list of tuples, containing all installed package names
and versions, including any dependencies.
"""
requirement = Requirement.coerce(package)
url = Bower.get_package_url(requirement.package)
installed = []
for name, _ in self.get_bower_package(url):
installed.append(Requirement(name, requirement.version))
for requirement in installed:
if save:
self.requirements["bower"].add(requirement, replace=True)
if save_dev:
self.requirements["bower-dev"].add(requirement, replace=True)
success("installed {}".format(str(requirement)))
if save or save_dev:
self.requirements.save()
return installed
def install_pip(self, package, save=True, save_dev=False):
"""Installs a pip package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a **single** :class:`Requirement` object, representing
the installed version of the given package.
"""
requirement = Requirement.coerce(package)
click.echo("pip install " + requirement.package)
cmd = envoy.run("pip install {}".format(str(requirement)))
if cmd.status_code == 0:
installed_packages = get_installed_pypackages()
package = installed_packages[requirement.package]
requirement.version = "=={}".format(package.version)
if save:
self.requirements["all"].add(requirement)
if save_dev:
self.requirements["dev"].add(requirement)
if save or save_dev:
self.requirements.save()
return requirement
else:
fatal(cmd.std_err)
def groups_option(f):
new_func = click.option("-g", "--groups",
help="Comma-separated list of requirement groups "
"to include.")(f)
return update_wrapper(new_func, f)
@click.group()
@click.version_option(prog_name=prog_name)
@click.pass_context
def main(ctx):
which = "where" if sys.platform == "win32" else "which"
if envoy.run(which + " git").status_code != 0:
click.secho("fatal: git not found in PATH", fg="red")
sys.exit(1)
ctx.obj = Hydrogen()
@main.command()
@click.pass_obj
@click.option("output_yaml", "--yaml", "-y", is_flag=True,
help="Show requirements in YAML format.")
@click.option("--resolve", "-r", is_flag=True,
help="Resolve version numbers for ambiguous packages.")
@groups_option
def freeze(h, output_yaml, resolve, groups):
"""Output installed packages."""
if not groups:
groups = filter(lambda group: not group.lower().startswith("bower"),
h.requirements.keys())
else:
groups = [text_type.strip(group) for group in groups.split(",")]
if output_yaml:
for requirements in h.requirements.values():
for requirement in requirements:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(h.requirements.yaml)
else:
for group in groups:
if not h.requirements[group]:
continue
click.echo("# {}".format(group))
for requirement in h.requirements[group]:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(str(requirement))
@main.command()
@click.pass_obj
@click.option("--pip/--bower", default=True)
@groups_option
@click.option("--save", is_flag=True)
@click.option("--save-dev", is_flag=True)
@click.argument("packages", nargs=-1)
def install(h, pip, groups, save, save_dev, packages):
"""Install a pip or bower package."""
if groups:
groups = [text_type.strip(group) for group in groups.split(",")]
else:
groups = h.requirements.keys()
if not packages:
for group in groups:
if group not in h.requirements:
warning("{} not in requirements".format(group))
continue
install = (h.install_bower if group.startswith("bower")
else h.install_pip)
for requirement in h.requirements[group]:
install(str(requirement), save=False, save_dev=False)
if pip:
for package in packages:
h.install_pip(package, save=save, save_dev=save_dev)
else:
for package in packages:
h.install_bower(package, save=save, save_dev=save_dev)
if __name__ == "__main__":
main()
| for requirement in requirements:
self[group].add(Requirement.coerce(requirement)) | conditional_block |
add.tsx | import * as React from 'react'
import * as Kb from '../../../../common-adapters'
import * as Styles from '../../../../styles'
import * as Types from '../../../../constants/types/teams'
import * as Teams from '../../../../constants/teams'
import * as Container from '../../../../util/container'
import * as ChatTypes from '../../../../constants/types/chat2'
type OwnProps = {
teamID: Types.TeamID
convID: ChatTypes.ConversationIDKey
filter: string
reloadEmojis: () => void
setFilter: (filter: string) => void
}
const AddEmoji = ({teamID, convID, filter, reloadEmojis, setFilter}: OwnProps) => {
const nav = Container.useSafeNavigation()
const dispatch = Container.useDispatch()
const canManageEmoji = Container.useSelector(s => Teams.getCanPerformByID(s, teamID).manageEmojis)
const onAddEmoji = () =>
dispatch(
nav.safeNavigateAppendPayload({
path: [
{
props: {conversationIDKey: convID, onChange: reloadEmojis, teamID},
selected: 'teamAddEmoji',
},
],
})
)
const onAddAlias = () =>
dispatch(
nav.safeNavigateAppendPayload({
path: [
{
props: {conversationIDKey: convID, onChange: reloadEmojis},
selected: 'teamAddEmojiAlias',
},
],
})
)
// clear filter on unmount
return !canManageEmoji ? null : (
<Kb.Box2 direction="horizontal" fullWidth={true} alignItems="center" style={styles.containerNew}>
<Kb.Box2 direction="horizontal" gap="tiny">
<Kb.Button
mode="Secondary"
label="Add emoji"
onClick={onAddEmoji}
small={true}
style={styles.headerButton}
/>
<Kb.Button
mode="Secondary"
label="Add alias"
onClick={onAddAlias}
small={true}
style={styles.headerButton}
/>
</Kb.Box2>
{!Styles.isMobile && (
<Kb.SearchFilter
size="small"
placeholderText="Filter"
onChange={setFilter}
hotkey="k"
value={filter}
valueControlled={true}
style={styles.filterInput}
/>
)}
</Kb.Box2>
)
}
| ...Styles.padding(6, Styles.globalMargins.small),
backgroundColor: Styles.globalColors.blueGrey,
justifyContent: 'space-between',
},
filterInput: {
marginRight: Styles.globalMargins.tiny,
maxWidth: 148,
},
headerButton: Styles.platformStyles({
isMobile: {
flexGrow: 1,
},
}),
text: {padding: Styles.globalMargins.xtiny},
}))
export default AddEmoji | const styles = Styles.styleSheetCreate(() => ({
containerNew: { | random_line_split |
Nodes.tsx | import React from 'react';
import cx from 'classnames';
import { Group } from '@visx/group';
import DefaultNode from './DefaultNode';
import { NodeProvidedProps } from './types';
export type NodeProps<Node> = {
/** Array of links to render. */
nodes?: Node[];
/** Component for rendering a single link. */
nodeComponent:
| React.FunctionComponent<NodeProvidedProps<Node>>
| React.ComponentClass<NodeProvidedProps<Node>>;
/** Classname to add to each node parent g element. */
className?: string;
/** Returns the center x coordinate of a node. */ |
export default function Nodes<Node>({
nodes = [],
nodeComponent = DefaultNode,
className,
x = (d: any) => (d && d.x) || 0,
y = (d: any) => (d && d.y) || 0,
}: NodeProps<Node>) {
return (
<>
{nodes.map((node, i) => (
<Group
key={`network-node-${i}`}
className={cx('visx-network-node', className)}
left={x(node)}
top={y(node)}
>
{React.createElement(nodeComponent, { node })}
</Group>
))}
</>
);
} | x?: (d: Node) => number;
/** Returns the center y coordinate of a node. */
y?: (d: Node) => number;
}; | random_line_split |
Owasp.CsrfGuard.js | /**
* The OWASP CSRFGuard Project, BSD License
* Eric Sheridan ([email protected]), Copyright (c) 2011
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of OWASP nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
(function() {
/** string utility functions * */
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
/** hook using standards based prototype * */
function hijackStandard() {
XMLHttpRequest.prototype._open = XMLHttpRequest.prototype.open;
XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
this.url = url;
this._open.apply(this, arguments);
}
XMLHttpRequest.prototype._send = XMLHttpRequest.prototype.send;
XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) |
this._send.apply(this, arguments);
}
}
/** ie does not properly support prototype - wrap completely * */
function hijackExplorer() {
var _XMLHttpRequest = window.XMLHttpRequest;
function alloc_XMLHttpRequest() {
this.base = _XMLHttpRequest ? new _XMLHttpRequest : new window.ActiveXObject("Microsoft.XMLHTTP");
}
function init_XMLHttpRequest() {
return new alloc_XMLHttpRequest;
}
init_XMLHttpRequest.prototype = alloc_XMLHttpRequest.prototype;
/** constants * */
init_XMLHttpRequest.UNSENT = 0;
init_XMLHttpRequest.OPENED = 1;
init_XMLHttpRequest.HEADERS_RECEIVED = 2;
init_XMLHttpRequest.LOADING = 3;
init_XMLHttpRequest.DONE = 4;
/** properties * */
init_XMLHttpRequest.prototype.status = 0;
init_XMLHttpRequest.prototype.statusText = "";
init_XMLHttpRequest.prototype.readyState = init_XMLHttpRequest.UNSENT;
init_XMLHttpRequest.prototype.responseText = "";
init_XMLHttpRequest.prototype.responseXML = null;
init_XMLHttpRequest.prototype.onsend = null;
init_XMLHttpRequest.url = null;
init_XMLHttpRequest.onreadystatechange = null;
/** methods * */
init_XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
var self = this;
this.url = url;
this.base.open(method, url, async, user, pass);
this.base.onreadystatechange = function() {
try { self.status = self.base.status; } catch (e) { }
try { self.statusText = self.base.statusText; } catch (e) { }
try { self.readyState = self.base.readyState; } catch (e) { }
try { self.responseText = self.base.responseText; } catch(e) { }
try { self.responseXML = self.base.responseXML; } catch(e) { }
if(self.onreadystatechange != null) {
self.onreadystatechange.apply(this, arguments);
}
}
}
init_XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this.base.send(data);
}
init_XMLHttpRequest.prototype.abort = function() {
this.base.abort();
}
init_XMLHttpRequest.prototype.getAllResponseHeaders = function() {
return this.base.getAllResponseHeaders();
}
init_XMLHttpRequest.prototype.getResponseHeader = function(name) {
return this.base.getResponseHeader(name);
}
init_XMLHttpRequest.prototype.setRequestHeader = function(name, value) {
return this.base.setRequestHeader(name, value);
}
/** hook * */
window.XMLHttpRequest = init_XMLHttpRequest;
}
/** check if valid domain based on domainStrict * */
function isValidDomain(current, target) {
var result = false;
/** check exact or subdomain match * */
if(current == target) {
result = true;
} else if(%DOMAIN_STRICT% == false) {
if(target.charAt(0) == '.') {
result = current.endsWith(target);
} else {
result = current.endsWith('.' + target);
}
}
return result;
}
/** determine if uri/url points to valid domain * */
function isValidUrl(src) {
var result = false;
/** parse out domain to make sure it points to our own * */
if(src.substring(0, 7) == "http://" || src.substring(0, 8) == "https://") {
var token = "://";
var index = src.indexOf(token);
var part = src.substring(index + token.length);
var domain = "";
/** parse up to end, first slash, or anchor * */
for(i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/' || character == ':' || character == '#') {
break;
} else {
domain += character;
}
}
result = isValidDomain(document.domain, domain);
/** explicitly skip anchors * */
} else if(src.charAt(0) == '#') {
result = false;
/** ensure it is a local resource without a protocol * */
} else if(!src.startsWith("//") && (src.charAt(0) == '/' || src.indexOf(':') == -1)) {
result = true;
}
return result;
}
/** parse uri from url * */
function parseUri(url) {
var uri = "";
var token = "://";
var index = url.indexOf(token);
var part = "";
/**
* ensure to skip protocol and prepend context path for non-qualified
* resources (ex: "protect.html" vs
* "/Owasp.CsrfGuard.Test/protect.html").
*/
if(index > 0) {
part = url.substring(index + token.length);
} else if(url.charAt(0) != '/') {
part = "%CONTEXT_PATH%/" + url;
} else {
part = url;
}
/** parse up to end or query string * */
var uriContext = (index == -1);
for(var i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/') {
uriContext = true;
} else if(uriContext == true && (character == '?' || character == '#')) {
uriContext = false;
break;
}
if(uriContext == true) {
uri += character;
}
}
return uri;
}
/** inject tokens as hidden fields into forms * */
function injectTokenForm(form, tokenName, tokenValue, pageTokens) {
var action = form.getAttribute("action");
if(action != null && isValidUrl(action)) {
var uri = parseUri(action);
var hidden = document.createElement("input");
hidden.setAttribute("type", "hidden");
hidden.setAttribute("name", tokenName);
hidden.setAttribute("value", (pageTokens[uri] != null ? pageTokens[uri] : tokenValue));
form.appendChild(hidden);
}
}
/** inject tokens as query string parameters into url * */
function injectTokenAttribute(element, attr, tokenName, tokenValue, pageTokens) {
var location = element.getAttribute(attr);
if(location != null && isValidUrl(location)) {
var uri = parseUri(location);
var value = (pageTokens[uri] != null ? pageTokens[uri] : tokenValue);
// alert("injectTokenAttribute: location=" + location + " uri=" +
// uri + " value=" + value);
if(location.indexOf('?') != -1) {
location = location + '&' + tokenName + '=' + value;
} else {
location = location + '?' + tokenName + '=' + value;
}
try {
element.setAttribute(attr, location);
} catch (e) {
// attempted to set/update unsupported attribute
}
}
}
/** inject csrf prevention tokens throughout dom * */
function injectTokens(tokenName, tokenValue, pageTokens) {
/** iterate over all elements and injection token * */
var all = document.all ? document.all : document.getElementsByTagName('*');
var len = all.length;
for(var i=0; i<len; i++) {
var element = all[i];
/** inject into form * */
if(element.tagName.toLowerCase() == "form") {
if(%INJECT_FORMS% == true) {
injectTokenForm(element, tokenName, tokenValue, pageTokens);
}
/** inject into attribute * */
} else if(%INJECT_ATTRIBUTES% == true) {
injectTokenAttribute(element, "src", tokenName, tokenValue, pageTokens);
injectTokenAttribute(element, "href", tokenName, tokenValue, pageTokens);
}
}
}
/** obtain array of page specific tokens * */
function requestPageTokens() {
var xhr = new XMLHttpRequest();
var pageTokens = {};
xhr.open("POST", "%SERVLET_PATH%", false);
xhr.send(null);
var text = xhr.responseText;
// alert("master: %TOKEN_VALUE%");
// alert("requestPageTokens: " + text);
var name = "";
var value = "";
var nameContext = true;
for(var i=0; i<text.length; i++) {
var character = text.charAt(i);
if(character == ':') {
nameContext = false;
} else if(character != ',') {
if(nameContext == true) {
name += character;
} else {
value += character;
}
}
if(character == ',' || (i + 1) >= text.length) {
pageTokens[name] = value;
name = "";
value = "";
nameContext = true;
}
}
return pageTokens;
}
/** utility method to register window.onload * */
function addLoadEvent(func) {
var oldonload = window.onload;
if (typeof window.onload != "function") {
window.onload = func;
} else {
window.onload = function() {
oldonload();
func();
}
}
}
/**
* Only inject the tokens if the JavaScript was referenced from HTML that
* was served by us. Otherwise, the code was referenced from malicious HTML
* which may be trying to steal tokens using JavaScript hijacking
* techniques.
*/
if(isValidDomain(document.domain, "%DOMAIN_ORIGIN%")) {
/** optionally include Ajax support * */
if(%INJECT_XHR% == true) {
if(navigator.appName == "Microsoft Internet Explorer") {
hijackExplorer();
} else {
hijackStandard();
}
XMLHttpRequest.prototype.onsend = function(data) {
if(isValidUrl(this.url)) {
this.setRequestHeader("X-Requested-With", "%X_REQUESTED_WITH%")
this.setRequestHeader("%TOKEN_NAME%", "%TOKEN_VALUE%");
}
};
}
/** update nodes in DOM after load * */
addLoadEvent(function() {
injectTokens("%TOKEN_NAME%", "%TOKEN_VALUE%", requestPageTokens());
});
}
})(); | {
this.onsend.apply(this, arguments);
} | conditional_block |
Owasp.CsrfGuard.js | /**
* The OWASP CSRFGuard Project, BSD License
* Eric Sheridan ([email protected]), Copyright (c) 2011
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of OWASP nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
(function() {
/** string utility functions * */
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
/** hook using standards based prototype * */
function hijackStandard() {
XMLHttpRequest.prototype._open = XMLHttpRequest.prototype.open;
XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
this.url = url;
this._open.apply(this, arguments);
}
XMLHttpRequest.prototype._send = XMLHttpRequest.prototype.send;
XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this._send.apply(this, arguments);
}
}
/** ie does not properly support prototype - wrap completely * */
function hijackExplorer() {
var _XMLHttpRequest = window.XMLHttpRequest;
function alloc_XMLHttpRequest() {
this.base = _XMLHttpRequest ? new _XMLHttpRequest : new window.ActiveXObject("Microsoft.XMLHTTP");
| }
function init_XMLHttpRequest() {
return new alloc_XMLHttpRequest;
}
init_XMLHttpRequest.prototype = alloc_XMLHttpRequest.prototype;
/** constants * */
init_XMLHttpRequest.UNSENT = 0;
init_XMLHttpRequest.OPENED = 1;
init_XMLHttpRequest.HEADERS_RECEIVED = 2;
init_XMLHttpRequest.LOADING = 3;
init_XMLHttpRequest.DONE = 4;
/** properties * */
init_XMLHttpRequest.prototype.status = 0;
init_XMLHttpRequest.prototype.statusText = "";
init_XMLHttpRequest.prototype.readyState = init_XMLHttpRequest.UNSENT;
init_XMLHttpRequest.prototype.responseText = "";
init_XMLHttpRequest.prototype.responseXML = null;
init_XMLHttpRequest.prototype.onsend = null;
init_XMLHttpRequest.url = null;
init_XMLHttpRequest.onreadystatechange = null;
/** methods * */
init_XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
var self = this;
this.url = url;
this.base.open(method, url, async, user, pass);
this.base.onreadystatechange = function() {
try { self.status = self.base.status; } catch (e) { }
try { self.statusText = self.base.statusText; } catch (e) { }
try { self.readyState = self.base.readyState; } catch (e) { }
try { self.responseText = self.base.responseText; } catch(e) { }
try { self.responseXML = self.base.responseXML; } catch(e) { }
if(self.onreadystatechange != null) {
self.onreadystatechange.apply(this, arguments);
}
}
}
init_XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this.base.send(data);
}
init_XMLHttpRequest.prototype.abort = function() {
this.base.abort();
}
init_XMLHttpRequest.prototype.getAllResponseHeaders = function() {
return this.base.getAllResponseHeaders();
}
init_XMLHttpRequest.prototype.getResponseHeader = function(name) {
return this.base.getResponseHeader(name);
}
init_XMLHttpRequest.prototype.setRequestHeader = function(name, value) {
return this.base.setRequestHeader(name, value);
}
/** hook * */
window.XMLHttpRequest = init_XMLHttpRequest;
}
/** check if valid domain based on domainStrict * */
function isValidDomain(current, target) {
var result = false;
/** check exact or subdomain match * */
if(current == target) {
result = true;
} else if(%DOMAIN_STRICT% == false) {
if(target.charAt(0) == '.') {
result = current.endsWith(target);
} else {
result = current.endsWith('.' + target);
}
}
return result;
}
/** determine if uri/url points to valid domain * */
function isValidUrl(src) {
var result = false;
/** parse out domain to make sure it points to our own * */
if(src.substring(0, 7) == "http://" || src.substring(0, 8) == "https://") {
var token = "://";
var index = src.indexOf(token);
var part = src.substring(index + token.length);
var domain = "";
/** parse up to end, first slash, or anchor * */
for(i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/' || character == ':' || character == '#') {
break;
} else {
domain += character;
}
}
result = isValidDomain(document.domain, domain);
/** explicitly skip anchors * */
} else if(src.charAt(0) == '#') {
result = false;
/** ensure it is a local resource without a protocol * */
} else if(!src.startsWith("//") && (src.charAt(0) == '/' || src.indexOf(':') == -1)) {
result = true;
}
return result;
}
/** parse uri from url * */
function parseUri(url) {
var uri = "";
var token = "://";
var index = url.indexOf(token);
var part = "";
/**
* ensure to skip protocol and prepend context path for non-qualified
* resources (ex: "protect.html" vs
* "/Owasp.CsrfGuard.Test/protect.html").
*/
if(index > 0) {
part = url.substring(index + token.length);
} else if(url.charAt(0) != '/') {
part = "%CONTEXT_PATH%/" + url;
} else {
part = url;
}
/** parse up to end or query string * */
var uriContext = (index == -1);
for(var i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/') {
uriContext = true;
} else if(uriContext == true && (character == '?' || character == '#')) {
uriContext = false;
break;
}
if(uriContext == true) {
uri += character;
}
}
return uri;
}
/** inject tokens as hidden fields into forms * */
function injectTokenForm(form, tokenName, tokenValue, pageTokens) {
var action = form.getAttribute("action");
if(action != null && isValidUrl(action)) {
var uri = parseUri(action);
var hidden = document.createElement("input");
hidden.setAttribute("type", "hidden");
hidden.setAttribute("name", tokenName);
hidden.setAttribute("value", (pageTokens[uri] != null ? pageTokens[uri] : tokenValue));
form.appendChild(hidden);
}
}
/** inject tokens as query string parameters into url * */
function injectTokenAttribute(element, attr, tokenName, tokenValue, pageTokens) {
var location = element.getAttribute(attr);
if(location != null && isValidUrl(location)) {
var uri = parseUri(location);
var value = (pageTokens[uri] != null ? pageTokens[uri] : tokenValue);
// alert("injectTokenAttribute: location=" + location + " uri=" +
// uri + " value=" + value);
if(location.indexOf('?') != -1) {
location = location + '&' + tokenName + '=' + value;
} else {
location = location + '?' + tokenName + '=' + value;
}
try {
element.setAttribute(attr, location);
} catch (e) {
// attempted to set/update unsupported attribute
}
}
}
/** inject csrf prevention tokens throughout dom * */
function injectTokens(tokenName, tokenValue, pageTokens) {
/** iterate over all elements and injection token * */
var all = document.all ? document.all : document.getElementsByTagName('*');
var len = all.length;
for(var i=0; i<len; i++) {
var element = all[i];
/** inject into form * */
if(element.tagName.toLowerCase() == "form") {
if(%INJECT_FORMS% == true) {
injectTokenForm(element, tokenName, tokenValue, pageTokens);
}
/** inject into attribute * */
} else if(%INJECT_ATTRIBUTES% == true) {
injectTokenAttribute(element, "src", tokenName, tokenValue, pageTokens);
injectTokenAttribute(element, "href", tokenName, tokenValue, pageTokens);
}
}
}
/** obtain array of page specific tokens * */
function requestPageTokens() {
var xhr = new XMLHttpRequest();
var pageTokens = {};
xhr.open("POST", "%SERVLET_PATH%", false);
xhr.send(null);
var text = xhr.responseText;
// alert("master: %TOKEN_VALUE%");
// alert("requestPageTokens: " + text);
var name = "";
var value = "";
var nameContext = true;
for(var i=0; i<text.length; i++) {
var character = text.charAt(i);
if(character == ':') {
nameContext = false;
} else if(character != ',') {
if(nameContext == true) {
name += character;
} else {
value += character;
}
}
if(character == ',' || (i + 1) >= text.length) {
pageTokens[name] = value;
name = "";
value = "";
nameContext = true;
}
}
return pageTokens;
}
/** utility method to register window.onload * */
function addLoadEvent(func) {
var oldonload = window.onload;
if (typeof window.onload != "function") {
window.onload = func;
} else {
window.onload = function() {
oldonload();
func();
}
}
}
/**
* Only inject the tokens if the JavaScript was referenced from HTML that
* was served by us. Otherwise, the code was referenced from malicious HTML
* which may be trying to steal tokens using JavaScript hijacking
* techniques.
*/
if(isValidDomain(document.domain, "%DOMAIN_ORIGIN%")) {
/** optionally include Ajax support * */
if(%INJECT_XHR% == true) {
if(navigator.appName == "Microsoft Internet Explorer") {
hijackExplorer();
} else {
hijackStandard();
}
XMLHttpRequest.prototype.onsend = function(data) {
if(isValidUrl(this.url)) {
this.setRequestHeader("X-Requested-With", "%X_REQUESTED_WITH%")
this.setRequestHeader("%TOKEN_NAME%", "%TOKEN_VALUE%");
}
};
}
/** update nodes in DOM after load * */
addLoadEvent(function() {
injectTokens("%TOKEN_NAME%", "%TOKEN_VALUE%", requestPageTokens());
});
}
})(); | random_line_split |
|
Owasp.CsrfGuard.js | /**
* The OWASP CSRFGuard Project, BSD License
* Eric Sheridan ([email protected]), Copyright (c) 2011
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of OWASP nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
(function() {
/** string utility functions * */
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
/** hook using standards based prototype * */
function hijackStandard() {
XMLHttpRequest.prototype._open = XMLHttpRequest.prototype.open;
XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
this.url = url;
this._open.apply(this, arguments);
}
XMLHttpRequest.prototype._send = XMLHttpRequest.prototype.send;
XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this._send.apply(this, arguments);
}
}
/** ie does not properly support prototype - wrap completely * */
function hijackExplorer() {
var _XMLHttpRequest = window.XMLHttpRequest;
function alloc_XMLHttpRequest() {
this.base = _XMLHttpRequest ? new _XMLHttpRequest : new window.ActiveXObject("Microsoft.XMLHTTP");
}
function init_XMLHttpRequest() {
return new alloc_XMLHttpRequest;
}
init_XMLHttpRequest.prototype = alloc_XMLHttpRequest.prototype;
/** constants * */
init_XMLHttpRequest.UNSENT = 0;
init_XMLHttpRequest.OPENED = 1;
init_XMLHttpRequest.HEADERS_RECEIVED = 2;
init_XMLHttpRequest.LOADING = 3;
init_XMLHttpRequest.DONE = 4;
/** properties * */
init_XMLHttpRequest.prototype.status = 0;
init_XMLHttpRequest.prototype.statusText = "";
init_XMLHttpRequest.prototype.readyState = init_XMLHttpRequest.UNSENT;
init_XMLHttpRequest.prototype.responseText = "";
init_XMLHttpRequest.prototype.responseXML = null;
init_XMLHttpRequest.prototype.onsend = null;
init_XMLHttpRequest.url = null;
init_XMLHttpRequest.onreadystatechange = null;
/** methods * */
init_XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
var self = this;
this.url = url;
this.base.open(method, url, async, user, pass);
this.base.onreadystatechange = function() {
try { self.status = self.base.status; } catch (e) { }
try { self.statusText = self.base.statusText; } catch (e) { }
try { self.readyState = self.base.readyState; } catch (e) { }
try { self.responseText = self.base.responseText; } catch(e) { }
try { self.responseXML = self.base.responseXML; } catch(e) { }
if(self.onreadystatechange != null) {
self.onreadystatechange.apply(this, arguments);
}
}
}
init_XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this.base.send(data);
}
init_XMLHttpRequest.prototype.abort = function() {
this.base.abort();
}
init_XMLHttpRequest.prototype.getAllResponseHeaders = function() {
return this.base.getAllResponseHeaders();
}
init_XMLHttpRequest.prototype.getResponseHeader = function(name) {
return this.base.getResponseHeader(name);
}
init_XMLHttpRequest.prototype.setRequestHeader = function(name, value) {
return this.base.setRequestHeader(name, value);
}
/** hook * */
window.XMLHttpRequest = init_XMLHttpRequest;
}
/** check if valid domain based on domainStrict * */
function isValidDomain(current, target) {
var result = false;
/** check exact or subdomain match * */
if(current == target) {
result = true;
} else if(%DOMAIN_STRICT% == false) {
if(target.charAt(0) == '.') {
result = current.endsWith(target);
} else {
result = current.endsWith('.' + target);
}
}
return result;
}
/** determine if uri/url points to valid domain * */
function isValidUrl(src) {
var result = false;
/** parse out domain to make sure it points to our own * */
if(src.substring(0, 7) == "http://" || src.substring(0, 8) == "https://") {
var token = "://";
var index = src.indexOf(token);
var part = src.substring(index + token.length);
var domain = "";
/** parse up to end, first slash, or anchor * */
for(i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/' || character == ':' || character == '#') {
break;
} else {
domain += character;
}
}
result = isValidDomain(document.domain, domain);
/** explicitly skip anchors * */
} else if(src.charAt(0) == '#') {
result = false;
/** ensure it is a local resource without a protocol * */
} else if(!src.startsWith("//") && (src.charAt(0) == '/' || src.indexOf(':') == -1)) {
result = true;
}
return result;
}
/** parse uri from url * */
function parseUri(url) {
var uri = "";
var token = "://";
var index = url.indexOf(token);
var part = "";
/**
* ensure to skip protocol and prepend context path for non-qualified
* resources (ex: "protect.html" vs
* "/Owasp.CsrfGuard.Test/protect.html").
*/
if(index > 0) {
part = url.substring(index + token.length);
} else if(url.charAt(0) != '/') {
part = "%CONTEXT_PATH%/" + url;
} else {
part = url;
}
/** parse up to end or query string * */
var uriContext = (index == -1);
for(var i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/') {
uriContext = true;
} else if(uriContext == true && (character == '?' || character == '#')) {
uriContext = false;
break;
}
if(uriContext == true) {
uri += character;
}
}
return uri;
}
/** inject tokens as hidden fields into forms * */
function | (form, tokenName, tokenValue, pageTokens) {
var action = form.getAttribute("action");
if(action != null && isValidUrl(action)) {
var uri = parseUri(action);
var hidden = document.createElement("input");
hidden.setAttribute("type", "hidden");
hidden.setAttribute("name", tokenName);
hidden.setAttribute("value", (pageTokens[uri] != null ? pageTokens[uri] : tokenValue));
form.appendChild(hidden);
}
}
/** inject tokens as query string parameters into url * */
function injectTokenAttribute(element, attr, tokenName, tokenValue, pageTokens) {
var location = element.getAttribute(attr);
if(location != null && isValidUrl(location)) {
var uri = parseUri(location);
var value = (pageTokens[uri] != null ? pageTokens[uri] : tokenValue);
// alert("injectTokenAttribute: location=" + location + " uri=" +
// uri + " value=" + value);
if(location.indexOf('?') != -1) {
location = location + '&' + tokenName + '=' + value;
} else {
location = location + '?' + tokenName + '=' + value;
}
try {
element.setAttribute(attr, location);
} catch (e) {
// attempted to set/update unsupported attribute
}
}
}
/** inject csrf prevention tokens throughout dom * */
function injectTokens(tokenName, tokenValue, pageTokens) {
/** iterate over all elements and injection token * */
var all = document.all ? document.all : document.getElementsByTagName('*');
var len = all.length;
for(var i=0; i<len; i++) {
var element = all[i];
/** inject into form * */
if(element.tagName.toLowerCase() == "form") {
if(%INJECT_FORMS% == true) {
injectTokenForm(element, tokenName, tokenValue, pageTokens);
}
/** inject into attribute * */
} else if(%INJECT_ATTRIBUTES% == true) {
injectTokenAttribute(element, "src", tokenName, tokenValue, pageTokens);
injectTokenAttribute(element, "href", tokenName, tokenValue, pageTokens);
}
}
}
/** obtain array of page specific tokens * */
function requestPageTokens() {
var xhr = new XMLHttpRequest();
var pageTokens = {};
xhr.open("POST", "%SERVLET_PATH%", false);
xhr.send(null);
var text = xhr.responseText;
// alert("master: %TOKEN_VALUE%");
// alert("requestPageTokens: " + text);
var name = "";
var value = "";
var nameContext = true;
for(var i=0; i<text.length; i++) {
var character = text.charAt(i);
if(character == ':') {
nameContext = false;
} else if(character != ',') {
if(nameContext == true) {
name += character;
} else {
value += character;
}
}
if(character == ',' || (i + 1) >= text.length) {
pageTokens[name] = value;
name = "";
value = "";
nameContext = true;
}
}
return pageTokens;
}
/** utility method to register window.onload * */
function addLoadEvent(func) {
var oldonload = window.onload;
if (typeof window.onload != "function") {
window.onload = func;
} else {
window.onload = function() {
oldonload();
func();
}
}
}
/**
* Only inject the tokens if the JavaScript was referenced from HTML that
* was served by us. Otherwise, the code was referenced from malicious HTML
* which may be trying to steal tokens using JavaScript hijacking
* techniques.
*/
if(isValidDomain(document.domain, "%DOMAIN_ORIGIN%")) {
/** optionally include Ajax support * */
if(%INJECT_XHR% == true) {
if(navigator.appName == "Microsoft Internet Explorer") {
hijackExplorer();
} else {
hijackStandard();
}
XMLHttpRequest.prototype.onsend = function(data) {
if(isValidUrl(this.url)) {
this.setRequestHeader("X-Requested-With", "%X_REQUESTED_WITH%")
this.setRequestHeader("%TOKEN_NAME%", "%TOKEN_VALUE%");
}
};
}
/** update nodes in DOM after load * */
addLoadEvent(function() {
injectTokens("%TOKEN_NAME%", "%TOKEN_VALUE%", requestPageTokens());
});
}
})(); | injectTokenForm | identifier_name |
Owasp.CsrfGuard.js | /**
* The OWASP CSRFGuard Project, BSD License
* Eric Sheridan ([email protected]), Copyright (c) 2011
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of OWASP nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
(function() {
/** string utility functions * */
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
/** hook using standards based prototype * */
function hijackStandard() {
XMLHttpRequest.prototype._open = XMLHttpRequest.prototype.open;
XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
this.url = url;
this._open.apply(this, arguments);
}
XMLHttpRequest.prototype._send = XMLHttpRequest.prototype.send;
XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this._send.apply(this, arguments);
}
}
/** ie does not properly support prototype - wrap completely * */
function hijackExplorer() {
var _XMLHttpRequest = window.XMLHttpRequest;
function alloc_XMLHttpRequest() {
this.base = _XMLHttpRequest ? new _XMLHttpRequest : new window.ActiveXObject("Microsoft.XMLHTTP");
}
function init_XMLHttpRequest() {
return new alloc_XMLHttpRequest;
}
init_XMLHttpRequest.prototype = alloc_XMLHttpRequest.prototype;
/** constants * */
init_XMLHttpRequest.UNSENT = 0;
init_XMLHttpRequest.OPENED = 1;
init_XMLHttpRequest.HEADERS_RECEIVED = 2;
init_XMLHttpRequest.LOADING = 3;
init_XMLHttpRequest.DONE = 4;
/** properties * */
init_XMLHttpRequest.prototype.status = 0;
init_XMLHttpRequest.prototype.statusText = "";
init_XMLHttpRequest.prototype.readyState = init_XMLHttpRequest.UNSENT;
init_XMLHttpRequest.prototype.responseText = "";
init_XMLHttpRequest.prototype.responseXML = null;
init_XMLHttpRequest.prototype.onsend = null;
init_XMLHttpRequest.url = null;
init_XMLHttpRequest.onreadystatechange = null;
/** methods * */
init_XMLHttpRequest.prototype.open = function(method, url, async, user, pass) {
var self = this;
this.url = url;
this.base.open(method, url, async, user, pass);
this.base.onreadystatechange = function() {
try { self.status = self.base.status; } catch (e) { }
try { self.statusText = self.base.statusText; } catch (e) { }
try { self.readyState = self.base.readyState; } catch (e) { }
try { self.responseText = self.base.responseText; } catch(e) { }
try { self.responseXML = self.base.responseXML; } catch(e) { }
if(self.onreadystatechange != null) {
self.onreadystatechange.apply(this, arguments);
}
}
}
init_XMLHttpRequest.prototype.send = function(data) {
if(this.onsend != null) {
this.onsend.apply(this, arguments);
}
this.base.send(data);
}
init_XMLHttpRequest.prototype.abort = function() {
this.base.abort();
}
init_XMLHttpRequest.prototype.getAllResponseHeaders = function() {
return this.base.getAllResponseHeaders();
}
init_XMLHttpRequest.prototype.getResponseHeader = function(name) {
return this.base.getResponseHeader(name);
}
init_XMLHttpRequest.prototype.setRequestHeader = function(name, value) {
return this.base.setRequestHeader(name, value);
}
/** hook * */
window.XMLHttpRequest = init_XMLHttpRequest;
}
/** check if valid domain based on domainStrict * */
function isValidDomain(current, target) {
var result = false;
/** check exact or subdomain match * */
if(current == target) {
result = true;
} else if(%DOMAIN_STRICT% == false) {
if(target.charAt(0) == '.') {
result = current.endsWith(target);
} else {
result = current.endsWith('.' + target);
}
}
return result;
}
/** determine if uri/url points to valid domain * */
function isValidUrl(src) {
var result = false;
/** parse out domain to make sure it points to our own * */
if(src.substring(0, 7) == "http://" || src.substring(0, 8) == "https://") {
var token = "://";
var index = src.indexOf(token);
var part = src.substring(index + token.length);
var domain = "";
/** parse up to end, first slash, or anchor * */
for(i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/' || character == ':' || character == '#') {
break;
} else {
domain += character;
}
}
result = isValidDomain(document.domain, domain);
/** explicitly skip anchors * */
} else if(src.charAt(0) == '#') {
result = false;
/** ensure it is a local resource without a protocol * */
} else if(!src.startsWith("//") && (src.charAt(0) == '/' || src.indexOf(':') == -1)) {
result = true;
}
return result;
}
/** parse uri from url * */
function parseUri(url) |
/** inject tokens as hidden fields into forms * */
function injectTokenForm(form, tokenName, tokenValue, pageTokens) {
var action = form.getAttribute("action");
if(action != null && isValidUrl(action)) {
var uri = parseUri(action);
var hidden = document.createElement("input");
hidden.setAttribute("type", "hidden");
hidden.setAttribute("name", tokenName);
hidden.setAttribute("value", (pageTokens[uri] != null ? pageTokens[uri] : tokenValue));
form.appendChild(hidden);
}
}
/** inject tokens as query string parameters into url * */
function injectTokenAttribute(element, attr, tokenName, tokenValue, pageTokens) {
var location = element.getAttribute(attr);
if(location != null && isValidUrl(location)) {
var uri = parseUri(location);
var value = (pageTokens[uri] != null ? pageTokens[uri] : tokenValue);
// alert("injectTokenAttribute: location=" + location + " uri=" +
// uri + " value=" + value);
if(location.indexOf('?') != -1) {
location = location + '&' + tokenName + '=' + value;
} else {
location = location + '?' + tokenName + '=' + value;
}
try {
element.setAttribute(attr, location);
} catch (e) {
// attempted to set/update unsupported attribute
}
}
}
/** inject csrf prevention tokens throughout dom * */
function injectTokens(tokenName, tokenValue, pageTokens) {
/** iterate over all elements and injection token * */
var all = document.all ? document.all : document.getElementsByTagName('*');
var len = all.length;
for(var i=0; i<len; i++) {
var element = all[i];
/** inject into form * */
if(element.tagName.toLowerCase() == "form") {
if(%INJECT_FORMS% == true) {
injectTokenForm(element, tokenName, tokenValue, pageTokens);
}
/** inject into attribute * */
} else if(%INJECT_ATTRIBUTES% == true) {
injectTokenAttribute(element, "src", tokenName, tokenValue, pageTokens);
injectTokenAttribute(element, "href", tokenName, tokenValue, pageTokens);
}
}
}
/** obtain array of page specific tokens * */
function requestPageTokens() {
var xhr = new XMLHttpRequest();
var pageTokens = {};
xhr.open("POST", "%SERVLET_PATH%", false);
xhr.send(null);
var text = xhr.responseText;
// alert("master: %TOKEN_VALUE%");
// alert("requestPageTokens: " + text);
var name = "";
var value = "";
var nameContext = true;
for(var i=0; i<text.length; i++) {
var character = text.charAt(i);
if(character == ':') {
nameContext = false;
} else if(character != ',') {
if(nameContext == true) {
name += character;
} else {
value += character;
}
}
if(character == ',' || (i + 1) >= text.length) {
pageTokens[name] = value;
name = "";
value = "";
nameContext = true;
}
}
return pageTokens;
}
/** utility method to register window.onload * */
function addLoadEvent(func) {
var oldonload = window.onload;
if (typeof window.onload != "function") {
window.onload = func;
} else {
window.onload = function() {
oldonload();
func();
}
}
}
/**
* Only inject the tokens if the JavaScript was referenced from HTML that
* was served by us. Otherwise, the code was referenced from malicious HTML
* which may be trying to steal tokens using JavaScript hijacking
* techniques.
*/
if(isValidDomain(document.domain, "%DOMAIN_ORIGIN%")) {
/** optionally include Ajax support * */
if(%INJECT_XHR% == true) {
if(navigator.appName == "Microsoft Internet Explorer") {
hijackExplorer();
} else {
hijackStandard();
}
XMLHttpRequest.prototype.onsend = function(data) {
if(isValidUrl(this.url)) {
this.setRequestHeader("X-Requested-With", "%X_REQUESTED_WITH%")
this.setRequestHeader("%TOKEN_NAME%", "%TOKEN_VALUE%");
}
};
}
/** update nodes in DOM after load * */
addLoadEvent(function() {
injectTokens("%TOKEN_NAME%", "%TOKEN_VALUE%", requestPageTokens());
});
}
})(); | {
var uri = "";
var token = "://";
var index = url.indexOf(token);
var part = "";
/**
* ensure to skip protocol and prepend context path for non-qualified
* resources (ex: "protect.html" vs
* "/Owasp.CsrfGuard.Test/protect.html").
*/
if(index > 0) {
part = url.substring(index + token.length);
} else if(url.charAt(0) != '/') {
part = "%CONTEXT_PATH%/" + url;
} else {
part = url;
}
/** parse up to end or query string * */
var uriContext = (index == -1);
for(var i=0; i<part.length; i++) {
var character = part.charAt(i);
if(character == '/') {
uriContext = true;
} else if(uriContext == true && (character == '?' || character == '#')) {
uriContext = false;
break;
}
if(uriContext == true) {
uri += character;
}
}
return uri;
} | identifier_body |
ContactClearDataConfirmModal.tsx | import { useState } from 'react';
import { c } from 'ttag';
import { noop } from '@proton/shared/lib/helpers/function';
import { Key } from '@proton/shared/lib/interfaces';
import { Alert, ErrorButton, FormModal, Input, Row } from '../../../components';
import { useModals } from '../../../hooks';
import ContactClearDataExecutionModal from './ContactClearDataExecutionModal';
interface Props {
errorKey: Key;
onClose?: () => void;
}
const ContactClearDataConfirmModal = ({ onClose = noop, errorKey, ...rest }: Props) => {
const { createModal } = useModals();
const [dangerInput, setDangerInput] = useState('');
const dangerWord = 'DANGER';
const handleSubmit = () => {
createModal(<ContactClearDataExecutionModal errorKey={errorKey} />);
onClose?.();
};
const boldDanger = <strong key="danger">{dangerWord}</strong>;
return ( | onSubmit={handleSubmit}
onClose={onClose}
submit={
<ErrorButton disabled={dangerInput !== dangerWord} type="submit">{c('Action')
.t`Clear data`}</ErrorButton>
}
{...rest}
>
<Alert className="mb1" type="info">{c('Warning')
.t`If you don’t remember your password, it is impossible to re-activate your key. We can help you dismiss the alert banner but in the process you will permanently lose access to all the data encrypted with that key.`}</Alert>
<Alert className="mb1" type="error">
{c('Warning')
.jt`This action is irreversible. Please enter the word ${boldDanger} in the field to proceed.`}
</Alert>
<Row>
<Input
value={dangerInput}
placeholder={dangerWord}
onChange={(event) => setDangerInput(event.target.value)}
/>
</Row>
</FormModal>
);
};
export default ContactClearDataConfirmModal; | <FormModal
title={c('Title').t`Warning`} | random_line_split |
ceres.py | # Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
| :returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior | random_line_split |
|
ceres.py | # Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class | (object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior
| CeresSlice | identifier_name |
ceres.py | # Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
|
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior
| pass | identifier_body |
ceres.py | # Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
|
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior
| raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior) | conditional_block |
NotificationDetail.js | import React from "react";
import { Link } from "react-router";
import Loader from "../../core/Loader";
import DataComponent from "../base/DataComponent";
import DataError from "../base/DataError";
import ResourceAction from "./../base/ResourceAction";
import history from "../../history";
/**
* @author Niklas Keller
*/
class NotificationDetail extends DataComponent {
getDataUri() {
return "notifications/" + this.props.params.id;
}
componentWillReceiveProps(next) {
let oldId = this.props.params.id;
let newId = next.params.id;
if (oldId !== newId) {
this.fetchData();
}
}
render() |
}
export default NotificationDetail; | {
let content = null;
if (this.state.loaded) {
if (this.state.failed) {
content = (
<DataError />
);
} else {
let emails = this.state.data.emails.map((item) => (
<li><code>{item}</code></li>
));
emails = (
<ul>
{emails}
</ul>
);
content = (
<div>
<div className="actions">
<Link to={this.props.location.pathname + "/edit"} className="action">
<i className="fa fa-pencil icon"/>
Edit
</Link>
<ResourceAction icon="trash" method="DELETE" uri={"notifications/" + this.props.params.id}
onClick={() => window.confirm("Do you really want to delete this notification?")}
onSuccess={() => history.replaceState(null, "/notifications")}
onError={(e) => {
let error = typeof e === "object" && "data" in e ? e.data.detail : "Unknown error.";
window.alert("Deletion failed. " + error);
}}
backend={this.props.backend}>
Delete
</ResourceAction>
</div>
<h1>Notification: {this.state.data.name}</h1>
<label className="input-label">Description</label>
<pre>{this.state.data.description}</pre>
<label className="input-label">SQL Query</label>
<pre>{this.state.data.query}</pre>
<label className="input-label">Check Period</label>
<div>Checked every <code>{this.state.data.checkPeriod}</code> seconds.</div>
<label className="input-label">E-Mails</label>
<div>{emails}</div>
<label className="input-label">Send Once</label>
<div>{this.state.data.sendOnce.value ? "Yes, will be sent once and then be deleted." : "No, will be sent on every change."}</div>
</div>
);
}
}
return (
<Loader loaded={this.state.loaded} className="loader">
{content}
</Loader>
);
} | identifier_body |
NotificationDetail.js | import React from "react";
import { Link } from "react-router";
import Loader from "../../core/Loader";
import DataComponent from "../base/DataComponent";
import DataError from "../base/DataError";
import ResourceAction from "./../base/ResourceAction"; | /**
* @author Niklas Keller
*/
class NotificationDetail extends DataComponent {
getDataUri() {
return "notifications/" + this.props.params.id;
}
componentWillReceiveProps(next) {
let oldId = this.props.params.id;
let newId = next.params.id;
if (oldId !== newId) {
this.fetchData();
}
}
render() {
let content = null;
if (this.state.loaded) {
if (this.state.failed) {
content = (
<DataError />
);
} else {
let emails = this.state.data.emails.map((item) => (
<li><code>{item}</code></li>
));
emails = (
<ul>
{emails}
</ul>
);
content = (
<div>
<div className="actions">
<Link to={this.props.location.pathname + "/edit"} className="action">
<i className="fa fa-pencil icon"/>
Edit
</Link>
<ResourceAction icon="trash" method="DELETE" uri={"notifications/" + this.props.params.id}
onClick={() => window.confirm("Do you really want to delete this notification?")}
onSuccess={() => history.replaceState(null, "/notifications")}
onError={(e) => {
let error = typeof e === "object" && "data" in e ? e.data.detail : "Unknown error.";
window.alert("Deletion failed. " + error);
}}
backend={this.props.backend}>
Delete
</ResourceAction>
</div>
<h1>Notification: {this.state.data.name}</h1>
<label className="input-label">Description</label>
<pre>{this.state.data.description}</pre>
<label className="input-label">SQL Query</label>
<pre>{this.state.data.query}</pre>
<label className="input-label">Check Period</label>
<div>Checked every <code>{this.state.data.checkPeriod}</code> seconds.</div>
<label className="input-label">E-Mails</label>
<div>{emails}</div>
<label className="input-label">Send Once</label>
<div>{this.state.data.sendOnce.value ? "Yes, will be sent once and then be deleted." : "No, will be sent on every change."}</div>
</div>
);
}
}
return (
<Loader loaded={this.state.loaded} className="loader">
{content}
</Loader>
);
}
}
export default NotificationDetail; | import history from "../../history";
| random_line_split |
NotificationDetail.js | import React from "react";
import { Link } from "react-router";
import Loader from "../../core/Loader";
import DataComponent from "../base/DataComponent";
import DataError from "../base/DataError";
import ResourceAction from "./../base/ResourceAction";
import history from "../../history";
/**
* @author Niklas Keller
*/
class NotificationDetail extends DataComponent {
getDataUri() {
return "notifications/" + this.props.params.id;
}
componentWillReceiveProps(next) {
let oldId = this.props.params.id;
let newId = next.params.id;
if (oldId !== newId) {
this.fetchData();
}
}
| () {
let content = null;
if (this.state.loaded) {
if (this.state.failed) {
content = (
<DataError />
);
} else {
let emails = this.state.data.emails.map((item) => (
<li><code>{item}</code></li>
));
emails = (
<ul>
{emails}
</ul>
);
content = (
<div>
<div className="actions">
<Link to={this.props.location.pathname + "/edit"} className="action">
<i className="fa fa-pencil icon"/>
Edit
</Link>
<ResourceAction icon="trash" method="DELETE" uri={"notifications/" + this.props.params.id}
onClick={() => window.confirm("Do you really want to delete this notification?")}
onSuccess={() => history.replaceState(null, "/notifications")}
onError={(e) => {
let error = typeof e === "object" && "data" in e ? e.data.detail : "Unknown error.";
window.alert("Deletion failed. " + error);
}}
backend={this.props.backend}>
Delete
</ResourceAction>
</div>
<h1>Notification: {this.state.data.name}</h1>
<label className="input-label">Description</label>
<pre>{this.state.data.description}</pre>
<label className="input-label">SQL Query</label>
<pre>{this.state.data.query}</pre>
<label className="input-label">Check Period</label>
<div>Checked every <code>{this.state.data.checkPeriod}</code> seconds.</div>
<label className="input-label">E-Mails</label>
<div>{emails}</div>
<label className="input-label">Send Once</label>
<div>{this.state.data.sendOnce.value ? "Yes, will be sent once and then be deleted." : "No, will be sent on every change."}</div>
</div>
);
}
}
return (
<Loader loaded={this.state.loaded} className="loader">
{content}
</Loader>
);
}
}
export default NotificationDetail; | render | identifier_name |
NotificationDetail.js | import React from "react";
import { Link } from "react-router";
import Loader from "../../core/Loader";
import DataComponent from "../base/DataComponent";
import DataError from "../base/DataError";
import ResourceAction from "./../base/ResourceAction";
import history from "../../history";
/**
* @author Niklas Keller
*/
class NotificationDetail extends DataComponent {
getDataUri() {
return "notifications/" + this.props.params.id;
}
componentWillReceiveProps(next) {
let oldId = this.props.params.id;
let newId = next.params.id;
if (oldId !== newId) |
}
render() {
let content = null;
if (this.state.loaded) {
if (this.state.failed) {
content = (
<DataError />
);
} else {
let emails = this.state.data.emails.map((item) => (
<li><code>{item}</code></li>
));
emails = (
<ul>
{emails}
</ul>
);
content = (
<div>
<div className="actions">
<Link to={this.props.location.pathname + "/edit"} className="action">
<i className="fa fa-pencil icon"/>
Edit
</Link>
<ResourceAction icon="trash" method="DELETE" uri={"notifications/" + this.props.params.id}
onClick={() => window.confirm("Do you really want to delete this notification?")}
onSuccess={() => history.replaceState(null, "/notifications")}
onError={(e) => {
let error = typeof e === "object" && "data" in e ? e.data.detail : "Unknown error.";
window.alert("Deletion failed. " + error);
}}
backend={this.props.backend}>
Delete
</ResourceAction>
</div>
<h1>Notification: {this.state.data.name}</h1>
<label className="input-label">Description</label>
<pre>{this.state.data.description}</pre>
<label className="input-label">SQL Query</label>
<pre>{this.state.data.query}</pre>
<label className="input-label">Check Period</label>
<div>Checked every <code>{this.state.data.checkPeriod}</code> seconds.</div>
<label className="input-label">E-Mails</label>
<div>{emails}</div>
<label className="input-label">Send Once</label>
<div>{this.state.data.sendOnce.value ? "Yes, will be sent once and then be deleted." : "No, will be sent on every change."}</div>
</div>
);
}
}
return (
<Loader loaded={this.state.loaded} className="loader">
{content}
</Loader>
);
}
}
export default NotificationDetail; | {
this.fetchData();
} | conditional_block |
ProjectModel.ts | const sander = require("sander");
import { basename, dirname, join } from "path";
const glob = require("glob-promise");
import { PageSettings, BitSettings, BlockSettings, ProjectSettings, Material, BitRef, PageTree, PageContext, BlockContext, CacheableMat, SettingsType } from "./interfaces";
import { flatten } from "./utils";
import { ProjectFactory } from "./ProjectFactory";
export class ProjectModel {
materials: Material[];
pages: PageSettings[];
blocks: BlockSettings[];
bits: BitSettings[];
project: ProjectSettings;
constructor(public workingDir: string) {
this.pages = [];
this.materials = [];
this.blocks = [];
this.bits = [];
this.project = null;
};
// Here we are removing a file, need to find it in the array and take it out.
async remove({type, path, factory}): Promise<string[]> {
let collection;
let item;
let affectedPages: string[];
switch (type) {
case "project":
throw new Error("Missing projectSettings file");
case "block":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Block))[1];
collection = this.blocks;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = this.pages
.filter(p => p.blocks.indexOf(`${namespace}/${name}`) > -1)
.map(p => p.name);
break;
}
case "bit":
{
const namespace = await factory.getProjectName();
const name = basename(dirname(path));
collection = this.bits;
item = collection.find(x => x.name === name && x.namespace === namespace);
const affectedBlocks = this.blocks
.filter(b => b.bits.map(x => x.bit).indexOf(`${namespace}/${name}`) > -1);
affectedPages = this.pages
.filter(p => {
let isAffected = false;
affectedBlocks.forEach(bl => {
if (p.blocks.indexOf(`${bl.namespace}/${bl.name}`) > -1) isAffected = true;
});
return isAffected;
})
.map(p => p.name);
break;
}
case "page":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Page))[1];
collection = this.pages;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = [name];
break;
}
default:
{
const namespace = await factory.getProjectName();
const name = basename(path);
collection = this.materials;
item = collection.find(x => x.name === name && x.namespace === namespace && x.type === type);
affectedPages = this.pages.map(p => p.name);
break;
}
}
// remove item from collection
const idx = collection.indexOf(item);
collection.splice(idx, 1);
return affectedPages;
}
// Here we are getting a new file. Need to instantiate it via ProjectFactory and then add it to the proper array
async add({type, path, factory}): Promise<string[]> {
if (type === "project") {
await Promise.all(this.blocks.map(this.updateAml));
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
let collection;
switch (type) {
case "block":
collection = this.blocks;
break;
case "bit":
collection = this.bits;
const file = await glob("*.bitSettings.js", {cwd: dirname(path)});
path = join(dirname(path), file[0]);
break;
case "page":
collection = this.pages;
break;
default:
collection = this.materials;
break;
}
const item = await factory.instantiate({type, path});
collection.push(item);
if (type === "page") {
return [item.name];
}
return [];
}
async updateAml(block) {
if (block.source) {
block.bits = await fetchGDOC({tries: 0, settings: block});
}
// Implements exponential backoff
function fetchGDOC({tries, settings}): Promise<BitRef[]> {
return new Promise((resolve, reject) => {
if (tries < 5) {
settings.source.fetch().then(resolve)
.catch(err => {
if (err.code !== 403) { // Code 403: Rate limit exceeded
return reject(err);
} else {
tries += 1;
const timer = Math.pow(2, tries) * 1000 + Math.random() * 100;
console.log(`Hit google rate limit, automatically trying again in ${timer / 1000} seconds`);
setTimeout(() => {
return fetchGDOC({tries, settings}).then(resolve).catch(reject);
}, timer);
}
});
} else {
return reject(new Error("Gdocs rate limit exceeded. Try again in a few minutes."));
}
});
}
}
// Updating a component. First instantiate it with ProjectFactory and then find the old component and replace it.
async refresh({type, path, factory}): Promise<string[]> {
if (type === "project") {
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
const affectedPages = await this.remove({type, path, factory});
await this.add({type, path, factory});
return affectedPages;
}
async getPageTree({name, debug}: {name: string, debug?: boolean}): Promise<PageTree> {
const page = this.pages.find(x => x.name === name);
const blocks = [...this.project.defaults.blocks, ...page.blocks];
const scripts = [...this.project.defaults.scripts, ...page.materials.scripts];
const styles = [...this.project.defaults.styles, ...page.materials.styles];
const assets = [...this.project.defaults.assets, ...page.materials.assets];
if (!page) throw new Error(`Page ${name} not found`);
const context = await this.buildContext({page, blocks, debug});
const mats = this.buildMats({scripts, styles, assets, blocks});
return {
workingDir: this.workingDir,
context,
styles: mats.styles,
scripts: mats.scripts,
assets: mats.assets
};
};
private assembleGlobalMats(mats, type): Material[] {
return mats.reduce((state: Material[], mat: {id: string, as?: string}) => {
const indexOfPresent = state.map(x => x.overridableName).indexOf(mat.as);
let material;
try {
material = this.retrieveMaterial({type, id: mat.id, overridableName: mat.as || null});
} catch (e) {
throw new Error(`Cannot parse material ${mat.id}`);
}
if (indexOfPresent < 0) {
state.push(material);
} else {
state[indexOfPresent] = material;
}
return state;
}, []);
}
private retrieveMaterial({type, id, overridableName}: {type: string, id: string, overridableName?: string}): Material {
const { name, namespace } = this.parseId(id);
const mat = this.materials.find(m => m.namespace === namespace && m.name === name && m.type === type);
return Object.assign({}, mat, {overridableName: overridableName || basename(mat.path)});
}
private buildMats({scripts, styles, assets, blocks}): {scripts: CacheableMat, styles: CacheableMat, assets: Material[]} {
const globalScripts = this.assembleGlobalMats(scripts, "script");
const globalAssets = this.assembleGlobalMats(assets, "asset");
const globalStyles = this.assembleGlobalMats(styles, "style");
const { bitStyles, bitScripts } = this.assembleBitMats(blocks);
const { styleCache, scriptCache } = this.getMatCache({styles, scripts});
return {
scripts: { globals: globalScripts, bits: bitScripts, cache: scriptCache },
styles: { globals: globalStyles, bits: bitStyles, cache: styleCache },
assets: globalAssets
};
}
private getMatCache({styles, scripts}): { scriptCache: Material[], styleCache: Material[] } {
// Using JSON here to clone by value, not reference
const styleMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "style")));
const scriptMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "script")));
const reduceFn = (collection, replacers) => {
return replacers.reduce((state: Material[], mat) => {
const {name, namespace} = this.parseId(mat.id);
const toUpdate = state.find(x => x.namespace === namespace && x.name === name);
toUpdate.overridableName = mat.as || null;
return state;
}, collection);
};
return {
scriptCache: reduceFn(scriptMats, scripts),
styleCache: reduceFn(styleMats, styles)
};
}
private assembleBitMats(blocks): { bitStyles: string[], bitScripts: string[] } {
const bitsWithDupes = blocks.map(block => {
try {
block = this.parseId(block);
} catch (err) {
throw new Error(`Cannot parse block ${block}`);
}
return this.blocks.find(x => x.namespace === block.namespace && x.name === block.name).bits
.map((bitref: BitRef) => {
const {namespace, name} = this.parseId(bitref.bit);
const bit = this.bits.find(x => x.namespace === namespace && x.name === name);
return { script: bit.script, style: bit.style };
});
});
// Flatten and dedupe
const styles = flatten(bitsWithDupes).map(x => x.style);
const scripts = flatten(bitsWithDupes).map(x => x.script);
return { bitStyles: [... new Set([...styles])], bitScripts: [... new Set([...scripts])] };
}
private async buildContext({page, blocks, debug}): Promise<PageContext> {
const pageCtx = {
$name: page.name,
$meta: [...this.project.defaults.metaTags, ...page.meta],
$resources: {
head: [...this.project.defaults.resources.head, ...page.resources.head],
body: [...this.project.defaults.resources.body, ...page.resources.body]
},
$template: page.template,
$deployPath: page.deployPath,
};
const projCtx = {
$name: this.project.name,
$deployRoot: this.project.deployRoot,
$template: this.project.template,
$debug: debug || false
};
const pageBlox = <BlockContext[]><any>await Promise.all(
blocks.map(b => {
try {
b = this.parseId(b);
} catch (err) {
throw new Error(`Cannot parse block ${b}`);
}
const block = Object.assign({}, this.blocks.find(x => x.namespace === b.namespace && x.name === b.name));
if (!block.name && !block.namespace) throw new Error(`Block ${b.namespace}/${b.name} not found`);
return new Promise((resolve, reject) => {
Promise.all(
block.bits.map(b => {
let parsedB;
try {
parsedB = this.parseId(b.bit);
} catch (e) {
throw new Error(`Cannot parse bit ${b.bit}`);
}
const bit = this.bits.find(x => {
return x.namespace === parsedB.namespace && x.name === parsedB.name;
});
if (!bit) throw new Error(`Bit ${b.bit} not found (from block ${block.namespace}/${block.name})`);
return new Promise((resolve, reject) => {
sander.readFile(bit.html, { encoding: "utf-8" }).then($template => {
resolve(Object.assign({}, bit.context, b.context, {$name: bit.name, $template}));
}).catch(reject);
});
})
).then($BITS => {
resolve(Object.assign({}, block.context, {$name: block.name, $template: block.template, $BITS}));
})
.catch(reject);
});
})
);
return {
$PROJECT: Object.assign({}, this.project.context, projCtx),
$PAGE: Object.assign({}, page.context, pageCtx),
$BLOCKS: pageBlox
};
}
private parseId(id: string): { name: string, namespace: string } {
const splitId = id.split("/");
let namespace, name;
if (splitId.length === 1) {
namespace = this.project.name;
name = id;
} else if (splitId.length > 2) {
namespace = splitId[0];
name = splitId.slice(1).join("/");
} else |
return { namespace, name };
}
}
| {
namespace = splitId[0];
name = splitId[1];
} | conditional_block |
ProjectModel.ts | const sander = require("sander");
import { basename, dirname, join } from "path";
const glob = require("glob-promise");
import { PageSettings, BitSettings, BlockSettings, ProjectSettings, Material, BitRef, PageTree, PageContext, BlockContext, CacheableMat, SettingsType } from "./interfaces";
import { flatten } from "./utils";
import { ProjectFactory } from "./ProjectFactory";
export class ProjectModel {
materials: Material[];
pages: PageSettings[];
blocks: BlockSettings[];
bits: BitSettings[];
project: ProjectSettings;
constructor(public workingDir: string) {
this.pages = [];
this.materials = [];
this.blocks = [];
this.bits = [];
this.project = null;
};
// Here we are removing a file, need to find it in the array and take it out.
async remove({type, path, factory}): Promise<string[]> {
let collection;
let item;
let affectedPages: string[];
switch (type) {
case "project":
throw new Error("Missing projectSettings file");
case "block":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Block))[1];
collection = this.blocks;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = this.pages
.filter(p => p.blocks.indexOf(`${namespace}/${name}`) > -1)
.map(p => p.name);
break;
}
case "bit":
{
const namespace = await factory.getProjectName();
const name = basename(dirname(path));
collection = this.bits;
item = collection.find(x => x.name === name && x.namespace === namespace);
const affectedBlocks = this.blocks
.filter(b => b.bits.map(x => x.bit).indexOf(`${namespace}/${name}`) > -1);
affectedPages = this.pages
.filter(p => {
let isAffected = false;
affectedBlocks.forEach(bl => {
if (p.blocks.indexOf(`${bl.namespace}/${bl.name}`) > -1) isAffected = true;
});
return isAffected;
}) | case "page":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Page))[1];
collection = this.pages;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = [name];
break;
}
default:
{
const namespace = await factory.getProjectName();
const name = basename(path);
collection = this.materials;
item = collection.find(x => x.name === name && x.namespace === namespace && x.type === type);
affectedPages = this.pages.map(p => p.name);
break;
}
}
// remove item from collection
const idx = collection.indexOf(item);
collection.splice(idx, 1);
return affectedPages;
}
// Here we are getting a new file. Need to instantiate it via ProjectFactory and then add it to the proper array
async add({type, path, factory}): Promise<string[]> {
if (type === "project") {
await Promise.all(this.blocks.map(this.updateAml));
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
let collection;
switch (type) {
case "block":
collection = this.blocks;
break;
case "bit":
collection = this.bits;
const file = await glob("*.bitSettings.js", {cwd: dirname(path)});
path = join(dirname(path), file[0]);
break;
case "page":
collection = this.pages;
break;
default:
collection = this.materials;
break;
}
const item = await factory.instantiate({type, path});
collection.push(item);
if (type === "page") {
return [item.name];
}
return [];
}
async updateAml(block) {
if (block.source) {
block.bits = await fetchGDOC({tries: 0, settings: block});
}
// Implements exponential backoff
function fetchGDOC({tries, settings}): Promise<BitRef[]> {
return new Promise((resolve, reject) => {
if (tries < 5) {
settings.source.fetch().then(resolve)
.catch(err => {
if (err.code !== 403) { // Code 403: Rate limit exceeded
return reject(err);
} else {
tries += 1;
const timer = Math.pow(2, tries) * 1000 + Math.random() * 100;
console.log(`Hit google rate limit, automatically trying again in ${timer / 1000} seconds`);
setTimeout(() => {
return fetchGDOC({tries, settings}).then(resolve).catch(reject);
}, timer);
}
});
} else {
return reject(new Error("Gdocs rate limit exceeded. Try again in a few minutes."));
}
});
}
}
// Updating a component. First instantiate it with ProjectFactory and then find the old component and replace it.
async refresh({type, path, factory}): Promise<string[]> {
if (type === "project") {
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
const affectedPages = await this.remove({type, path, factory});
await this.add({type, path, factory});
return affectedPages;
}
async getPageTree({name, debug}: {name: string, debug?: boolean}): Promise<PageTree> {
const page = this.pages.find(x => x.name === name);
const blocks = [...this.project.defaults.blocks, ...page.blocks];
const scripts = [...this.project.defaults.scripts, ...page.materials.scripts];
const styles = [...this.project.defaults.styles, ...page.materials.styles];
const assets = [...this.project.defaults.assets, ...page.materials.assets];
if (!page) throw new Error(`Page ${name} not found`);
const context = await this.buildContext({page, blocks, debug});
const mats = this.buildMats({scripts, styles, assets, blocks});
return {
workingDir: this.workingDir,
context,
styles: mats.styles,
scripts: mats.scripts,
assets: mats.assets
};
};
private assembleGlobalMats(mats, type): Material[] {
return mats.reduce((state: Material[], mat: {id: string, as?: string}) => {
const indexOfPresent = state.map(x => x.overridableName).indexOf(mat.as);
let material;
try {
material = this.retrieveMaterial({type, id: mat.id, overridableName: mat.as || null});
} catch (e) {
throw new Error(`Cannot parse material ${mat.id}`);
}
if (indexOfPresent < 0) {
state.push(material);
} else {
state[indexOfPresent] = material;
}
return state;
}, []);
}
private retrieveMaterial({type, id, overridableName}: {type: string, id: string, overridableName?: string}): Material {
const { name, namespace } = this.parseId(id);
const mat = this.materials.find(m => m.namespace === namespace && m.name === name && m.type === type);
return Object.assign({}, mat, {overridableName: overridableName || basename(mat.path)});
}
private buildMats({scripts, styles, assets, blocks}): {scripts: CacheableMat, styles: CacheableMat, assets: Material[]} {
const globalScripts = this.assembleGlobalMats(scripts, "script");
const globalAssets = this.assembleGlobalMats(assets, "asset");
const globalStyles = this.assembleGlobalMats(styles, "style");
const { bitStyles, bitScripts } = this.assembleBitMats(blocks);
const { styleCache, scriptCache } = this.getMatCache({styles, scripts});
return {
scripts: { globals: globalScripts, bits: bitScripts, cache: scriptCache },
styles: { globals: globalStyles, bits: bitStyles, cache: styleCache },
assets: globalAssets
};
}
private getMatCache({styles, scripts}): { scriptCache: Material[], styleCache: Material[] } {
// Using JSON here to clone by value, not reference
const styleMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "style")));
const scriptMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "script")));
const reduceFn = (collection, replacers) => {
return replacers.reduce((state: Material[], mat) => {
const {name, namespace} = this.parseId(mat.id);
const toUpdate = state.find(x => x.namespace === namespace && x.name === name);
toUpdate.overridableName = mat.as || null;
return state;
}, collection);
};
return {
scriptCache: reduceFn(scriptMats, scripts),
styleCache: reduceFn(styleMats, styles)
};
}
private assembleBitMats(blocks): { bitStyles: string[], bitScripts: string[] } {
const bitsWithDupes = blocks.map(block => {
try {
block = this.parseId(block);
} catch (err) {
throw new Error(`Cannot parse block ${block}`);
}
return this.blocks.find(x => x.namespace === block.namespace && x.name === block.name).bits
.map((bitref: BitRef) => {
const {namespace, name} = this.parseId(bitref.bit);
const bit = this.bits.find(x => x.namespace === namespace && x.name === name);
return { script: bit.script, style: bit.style };
});
});
// Flatten and dedupe
const styles = flatten(bitsWithDupes).map(x => x.style);
const scripts = flatten(bitsWithDupes).map(x => x.script);
return { bitStyles: [... new Set([...styles])], bitScripts: [... new Set([...scripts])] };
}
private async buildContext({page, blocks, debug}): Promise<PageContext> {
const pageCtx = {
$name: page.name,
$meta: [...this.project.defaults.metaTags, ...page.meta],
$resources: {
head: [...this.project.defaults.resources.head, ...page.resources.head],
body: [...this.project.defaults.resources.body, ...page.resources.body]
},
$template: page.template,
$deployPath: page.deployPath,
};
const projCtx = {
$name: this.project.name,
$deployRoot: this.project.deployRoot,
$template: this.project.template,
$debug: debug || false
};
const pageBlox = <BlockContext[]><any>await Promise.all(
blocks.map(b => {
try {
b = this.parseId(b);
} catch (err) {
throw new Error(`Cannot parse block ${b}`);
}
const block = Object.assign({}, this.blocks.find(x => x.namespace === b.namespace && x.name === b.name));
if (!block.name && !block.namespace) throw new Error(`Block ${b.namespace}/${b.name} not found`);
return new Promise((resolve, reject) => {
Promise.all(
block.bits.map(b => {
let parsedB;
try {
parsedB = this.parseId(b.bit);
} catch (e) {
throw new Error(`Cannot parse bit ${b.bit}`);
}
const bit = this.bits.find(x => {
return x.namespace === parsedB.namespace && x.name === parsedB.name;
});
if (!bit) throw new Error(`Bit ${b.bit} not found (from block ${block.namespace}/${block.name})`);
return new Promise((resolve, reject) => {
sander.readFile(bit.html, { encoding: "utf-8" }).then($template => {
resolve(Object.assign({}, bit.context, b.context, {$name: bit.name, $template}));
}).catch(reject);
});
})
).then($BITS => {
resolve(Object.assign({}, block.context, {$name: block.name, $template: block.template, $BITS}));
})
.catch(reject);
});
})
);
return {
$PROJECT: Object.assign({}, this.project.context, projCtx),
$PAGE: Object.assign({}, page.context, pageCtx),
$BLOCKS: pageBlox
};
}
private parseId(id: string): { name: string, namespace: string } {
const splitId = id.split("/");
let namespace, name;
if (splitId.length === 1) {
namespace = this.project.name;
name = id;
} else if (splitId.length > 2) {
namespace = splitId[0];
name = splitId.slice(1).join("/");
} else {
namespace = splitId[0];
name = splitId[1];
}
return { namespace, name };
}
} | .map(p => p.name);
break;
} | random_line_split |
ProjectModel.ts | const sander = require("sander");
import { basename, dirname, join } from "path";
const glob = require("glob-promise");
import { PageSettings, BitSettings, BlockSettings, ProjectSettings, Material, BitRef, PageTree, PageContext, BlockContext, CacheableMat, SettingsType } from "./interfaces";
import { flatten } from "./utils";
import { ProjectFactory } from "./ProjectFactory";
export class ProjectModel {
materials: Material[];
pages: PageSettings[];
blocks: BlockSettings[];
bits: BitSettings[];
project: ProjectSettings;
constructor(public workingDir: string) {
this.pages = [];
this.materials = [];
this.blocks = [];
this.bits = [];
this.project = null;
};
// Here we are removing a file, need to find it in the array and take it out.
async remove({type, path, factory}): Promise<string[]> {
let collection;
let item;
let affectedPages: string[];
switch (type) {
case "project":
throw new Error("Missing projectSettings file");
case "block":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Block))[1];
collection = this.blocks;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = this.pages
.filter(p => p.blocks.indexOf(`${namespace}/${name}`) > -1)
.map(p => p.name);
break;
}
case "bit":
{
const namespace = await factory.getProjectName();
const name = basename(dirname(path));
collection = this.bits;
item = collection.find(x => x.name === name && x.namespace === namespace);
const affectedBlocks = this.blocks
.filter(b => b.bits.map(x => x.bit).indexOf(`${namespace}/${name}`) > -1);
affectedPages = this.pages
.filter(p => {
let isAffected = false;
affectedBlocks.forEach(bl => {
if (p.blocks.indexOf(`${bl.namespace}/${bl.name}`) > -1) isAffected = true;
});
return isAffected;
})
.map(p => p.name);
break;
}
case "page":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Page))[1];
collection = this.pages;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = [name];
break;
}
default:
{
const namespace = await factory.getProjectName();
const name = basename(path);
collection = this.materials;
item = collection.find(x => x.name === name && x.namespace === namespace && x.type === type);
affectedPages = this.pages.map(p => p.name);
break;
}
}
// remove item from collection
const idx = collection.indexOf(item);
collection.splice(idx, 1);
return affectedPages;
}
// Here we are getting a new file. Need to instantiate it via ProjectFactory and then add it to the proper array
async add({type, path, factory}): Promise<string[]> {
if (type === "project") {
await Promise.all(this.blocks.map(this.updateAml));
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
let collection;
switch (type) {
case "block":
collection = this.blocks;
break;
case "bit":
collection = this.bits;
const file = await glob("*.bitSettings.js", {cwd: dirname(path)});
path = join(dirname(path), file[0]);
break;
case "page":
collection = this.pages;
break;
default:
collection = this.materials;
break;
}
const item = await factory.instantiate({type, path});
collection.push(item);
if (type === "page") {
return [item.name];
}
return [];
}
async updateAml(block) {
if (block.source) {
block.bits = await fetchGDOC({tries: 0, settings: block});
}
// Implements exponential backoff
function | ({tries, settings}): Promise<BitRef[]> {
return new Promise((resolve, reject) => {
if (tries < 5) {
settings.source.fetch().then(resolve)
.catch(err => {
if (err.code !== 403) { // Code 403: Rate limit exceeded
return reject(err);
} else {
tries += 1;
const timer = Math.pow(2, tries) * 1000 + Math.random() * 100;
console.log(`Hit google rate limit, automatically trying again in ${timer / 1000} seconds`);
setTimeout(() => {
return fetchGDOC({tries, settings}).then(resolve).catch(reject);
}, timer);
}
});
} else {
return reject(new Error("Gdocs rate limit exceeded. Try again in a few minutes."));
}
});
}
}
// Updating a component. First instantiate it with ProjectFactory and then find the old component and replace it.
async refresh({type, path, factory}): Promise<string[]> {
if (type === "project") {
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
const affectedPages = await this.remove({type, path, factory});
await this.add({type, path, factory});
return affectedPages;
}
async getPageTree({name, debug}: {name: string, debug?: boolean}): Promise<PageTree> {
const page = this.pages.find(x => x.name === name);
const blocks = [...this.project.defaults.blocks, ...page.blocks];
const scripts = [...this.project.defaults.scripts, ...page.materials.scripts];
const styles = [...this.project.defaults.styles, ...page.materials.styles];
const assets = [...this.project.defaults.assets, ...page.materials.assets];
if (!page) throw new Error(`Page ${name} not found`);
const context = await this.buildContext({page, blocks, debug});
const mats = this.buildMats({scripts, styles, assets, blocks});
return {
workingDir: this.workingDir,
context,
styles: mats.styles,
scripts: mats.scripts,
assets: mats.assets
};
};
private assembleGlobalMats(mats, type): Material[] {
return mats.reduce((state: Material[], mat: {id: string, as?: string}) => {
const indexOfPresent = state.map(x => x.overridableName).indexOf(mat.as);
let material;
try {
material = this.retrieveMaterial({type, id: mat.id, overridableName: mat.as || null});
} catch (e) {
throw new Error(`Cannot parse material ${mat.id}`);
}
if (indexOfPresent < 0) {
state.push(material);
} else {
state[indexOfPresent] = material;
}
return state;
}, []);
}
private retrieveMaterial({type, id, overridableName}: {type: string, id: string, overridableName?: string}): Material {
const { name, namespace } = this.parseId(id);
const mat = this.materials.find(m => m.namespace === namespace && m.name === name && m.type === type);
return Object.assign({}, mat, {overridableName: overridableName || basename(mat.path)});
}
private buildMats({scripts, styles, assets, blocks}): {scripts: CacheableMat, styles: CacheableMat, assets: Material[]} {
const globalScripts = this.assembleGlobalMats(scripts, "script");
const globalAssets = this.assembleGlobalMats(assets, "asset");
const globalStyles = this.assembleGlobalMats(styles, "style");
const { bitStyles, bitScripts } = this.assembleBitMats(blocks);
const { styleCache, scriptCache } = this.getMatCache({styles, scripts});
return {
scripts: { globals: globalScripts, bits: bitScripts, cache: scriptCache },
styles: { globals: globalStyles, bits: bitStyles, cache: styleCache },
assets: globalAssets
};
}
private getMatCache({styles, scripts}): { scriptCache: Material[], styleCache: Material[] } {
// Using JSON here to clone by value, not reference
const styleMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "style")));
const scriptMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "script")));
const reduceFn = (collection, replacers) => {
return replacers.reduce((state: Material[], mat) => {
const {name, namespace} = this.parseId(mat.id);
const toUpdate = state.find(x => x.namespace === namespace && x.name === name);
toUpdate.overridableName = mat.as || null;
return state;
}, collection);
};
return {
scriptCache: reduceFn(scriptMats, scripts),
styleCache: reduceFn(styleMats, styles)
};
}
private assembleBitMats(blocks): { bitStyles: string[], bitScripts: string[] } {
const bitsWithDupes = blocks.map(block => {
try {
block = this.parseId(block);
} catch (err) {
throw new Error(`Cannot parse block ${block}`);
}
return this.blocks.find(x => x.namespace === block.namespace && x.name === block.name).bits
.map((bitref: BitRef) => {
const {namespace, name} = this.parseId(bitref.bit);
const bit = this.bits.find(x => x.namespace === namespace && x.name === name);
return { script: bit.script, style: bit.style };
});
});
// Flatten and dedupe
const styles = flatten(bitsWithDupes).map(x => x.style);
const scripts = flatten(bitsWithDupes).map(x => x.script);
return { bitStyles: [... new Set([...styles])], bitScripts: [... new Set([...scripts])] };
}
private async buildContext({page, blocks, debug}): Promise<PageContext> {
const pageCtx = {
$name: page.name,
$meta: [...this.project.defaults.metaTags, ...page.meta],
$resources: {
head: [...this.project.defaults.resources.head, ...page.resources.head],
body: [...this.project.defaults.resources.body, ...page.resources.body]
},
$template: page.template,
$deployPath: page.deployPath,
};
const projCtx = {
$name: this.project.name,
$deployRoot: this.project.deployRoot,
$template: this.project.template,
$debug: debug || false
};
const pageBlox = <BlockContext[]><any>await Promise.all(
blocks.map(b => {
try {
b = this.parseId(b);
} catch (err) {
throw new Error(`Cannot parse block ${b}`);
}
const block = Object.assign({}, this.blocks.find(x => x.namespace === b.namespace && x.name === b.name));
if (!block.name && !block.namespace) throw new Error(`Block ${b.namespace}/${b.name} not found`);
return new Promise((resolve, reject) => {
Promise.all(
block.bits.map(b => {
let parsedB;
try {
parsedB = this.parseId(b.bit);
} catch (e) {
throw new Error(`Cannot parse bit ${b.bit}`);
}
const bit = this.bits.find(x => {
return x.namespace === parsedB.namespace && x.name === parsedB.name;
});
if (!bit) throw new Error(`Bit ${b.bit} not found (from block ${block.namespace}/${block.name})`);
return new Promise((resolve, reject) => {
sander.readFile(bit.html, { encoding: "utf-8" }).then($template => {
resolve(Object.assign({}, bit.context, b.context, {$name: bit.name, $template}));
}).catch(reject);
});
})
).then($BITS => {
resolve(Object.assign({}, block.context, {$name: block.name, $template: block.template, $BITS}));
})
.catch(reject);
});
})
);
return {
$PROJECT: Object.assign({}, this.project.context, projCtx),
$PAGE: Object.assign({}, page.context, pageCtx),
$BLOCKS: pageBlox
};
}
private parseId(id: string): { name: string, namespace: string } {
const splitId = id.split("/");
let namespace, name;
if (splitId.length === 1) {
namespace = this.project.name;
name = id;
} else if (splitId.length > 2) {
namespace = splitId[0];
name = splitId.slice(1).join("/");
} else {
namespace = splitId[0];
name = splitId[1];
}
return { namespace, name };
}
}
| fetchGDOC | identifier_name |
ProjectModel.ts | const sander = require("sander");
import { basename, dirname, join } from "path";
const glob = require("glob-promise");
import { PageSettings, BitSettings, BlockSettings, ProjectSettings, Material, BitRef, PageTree, PageContext, BlockContext, CacheableMat, SettingsType } from "./interfaces";
import { flatten } from "./utils";
import { ProjectFactory } from "./ProjectFactory";
export class ProjectModel {
materials: Material[];
pages: PageSettings[];
blocks: BlockSettings[];
bits: BitSettings[];
project: ProjectSettings;
constructor(public workingDir: string) {
this.pages = [];
this.materials = [];
this.blocks = [];
this.bits = [];
this.project = null;
};
// Here we are removing a file, need to find it in the array and take it out.
async remove({type, path, factory}): Promise<string[]> {
let collection;
let item;
let affectedPages: string[];
switch (type) {
case "project":
throw new Error("Missing projectSettings file");
case "block":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Block))[1];
collection = this.blocks;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = this.pages
.filter(p => p.blocks.indexOf(`${namespace}/${name}`) > -1)
.map(p => p.name);
break;
}
case "bit":
{
const namespace = await factory.getProjectName();
const name = basename(dirname(path));
collection = this.bits;
item = collection.find(x => x.name === name && x.namespace === namespace);
const affectedBlocks = this.blocks
.filter(b => b.bits.map(x => x.bit).indexOf(`${namespace}/${name}`) > -1);
affectedPages = this.pages
.filter(p => {
let isAffected = false;
affectedBlocks.forEach(bl => {
if (p.blocks.indexOf(`${bl.namespace}/${bl.name}`) > -1) isAffected = true;
});
return isAffected;
})
.map(p => p.name);
break;
}
case "page":
{
const namespace = await factory.getProjectName();
const name = basename(path).match(ProjectFactory.getNameRegex(SettingsType.Page))[1];
collection = this.pages;
item = collection.find(x => x.name === name && x.namespace === namespace);
affectedPages = [name];
break;
}
default:
{
const namespace = await factory.getProjectName();
const name = basename(path);
collection = this.materials;
item = collection.find(x => x.name === name && x.namespace === namespace && x.type === type);
affectedPages = this.pages.map(p => p.name);
break;
}
}
// remove item from collection
const idx = collection.indexOf(item);
collection.splice(idx, 1);
return affectedPages;
}
// Here we are getting a new file. Need to instantiate it via ProjectFactory and then add it to the proper array
async add({type, path, factory}): Promise<string[]> {
if (type === "project") {
await Promise.all(this.blocks.map(this.updateAml));
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
let collection;
switch (type) {
case "block":
collection = this.blocks;
break;
case "bit":
collection = this.bits;
const file = await glob("*.bitSettings.js", {cwd: dirname(path)});
path = join(dirname(path), file[0]);
break;
case "page":
collection = this.pages;
break;
default:
collection = this.materials;
break;
}
const item = await factory.instantiate({type, path});
collection.push(item);
if (type === "page") {
return [item.name];
}
return [];
}
async updateAml(block) {
if (block.source) {
block.bits = await fetchGDOC({tries: 0, settings: block});
}
// Implements exponential backoff
function fetchGDOC({tries, settings}): Promise<BitRef[]> {
return new Promise((resolve, reject) => {
if (tries < 5) {
settings.source.fetch().then(resolve)
.catch(err => {
if (err.code !== 403) { // Code 403: Rate limit exceeded
return reject(err);
} else {
tries += 1;
const timer = Math.pow(2, tries) * 1000 + Math.random() * 100;
console.log(`Hit google rate limit, automatically trying again in ${timer / 1000} seconds`);
setTimeout(() => {
return fetchGDOC({tries, settings}).then(resolve).catch(reject);
}, timer);
}
});
} else {
return reject(new Error("Gdocs rate limit exceeded. Try again in a few minutes."));
}
});
}
}
// Updating a component. First instantiate it with ProjectFactory and then find the old component and replace it.
async refresh({type, path, factory}): Promise<string[]> {
if (type === "project") {
this.project = await factory.instantiate({type, path});
return this.pages.map(p => p.name);
}
const affectedPages = await this.remove({type, path, factory});
await this.add({type, path, factory});
return affectedPages;
}
async getPageTree({name, debug}: {name: string, debug?: boolean}): Promise<PageTree> {
const page = this.pages.find(x => x.name === name);
const blocks = [...this.project.defaults.blocks, ...page.blocks];
const scripts = [...this.project.defaults.scripts, ...page.materials.scripts];
const styles = [...this.project.defaults.styles, ...page.materials.styles];
const assets = [...this.project.defaults.assets, ...page.materials.assets];
if (!page) throw new Error(`Page ${name} not found`);
const context = await this.buildContext({page, blocks, debug});
const mats = this.buildMats({scripts, styles, assets, blocks});
return {
workingDir: this.workingDir,
context,
styles: mats.styles,
scripts: mats.scripts,
assets: mats.assets
};
};
private assembleGlobalMats(mats, type): Material[] {
return mats.reduce((state: Material[], mat: {id: string, as?: string}) => {
const indexOfPresent = state.map(x => x.overridableName).indexOf(mat.as);
let material;
try {
material = this.retrieveMaterial({type, id: mat.id, overridableName: mat.as || null});
} catch (e) {
throw new Error(`Cannot parse material ${mat.id}`);
}
if (indexOfPresent < 0) {
state.push(material);
} else {
state[indexOfPresent] = material;
}
return state;
}, []);
}
private retrieveMaterial({type, id, overridableName}: {type: string, id: string, overridableName?: string}): Material {
const { name, namespace } = this.parseId(id);
const mat = this.materials.find(m => m.namespace === namespace && m.name === name && m.type === type);
return Object.assign({}, mat, {overridableName: overridableName || basename(mat.path)});
}
private buildMats({scripts, styles, assets, blocks}): {scripts: CacheableMat, styles: CacheableMat, assets: Material[]} |
private getMatCache({styles, scripts}): { scriptCache: Material[], styleCache: Material[] } {
// Using JSON here to clone by value, not reference
const styleMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "style")));
const scriptMats = JSON.parse(JSON.stringify(this.materials.filter(x => x.type === "script")));
const reduceFn = (collection, replacers) => {
return replacers.reduce((state: Material[], mat) => {
const {name, namespace} = this.parseId(mat.id);
const toUpdate = state.find(x => x.namespace === namespace && x.name === name);
toUpdate.overridableName = mat.as || null;
return state;
}, collection);
};
return {
scriptCache: reduceFn(scriptMats, scripts),
styleCache: reduceFn(styleMats, styles)
};
}
private assembleBitMats(blocks): { bitStyles: string[], bitScripts: string[] } {
const bitsWithDupes = blocks.map(block => {
try {
block = this.parseId(block);
} catch (err) {
throw new Error(`Cannot parse block ${block}`);
}
return this.blocks.find(x => x.namespace === block.namespace && x.name === block.name).bits
.map((bitref: BitRef) => {
const {namespace, name} = this.parseId(bitref.bit);
const bit = this.bits.find(x => x.namespace === namespace && x.name === name);
return { script: bit.script, style: bit.style };
});
});
// Flatten and dedupe
const styles = flatten(bitsWithDupes).map(x => x.style);
const scripts = flatten(bitsWithDupes).map(x => x.script);
return { bitStyles: [... new Set([...styles])], bitScripts: [... new Set([...scripts])] };
}
private async buildContext({page, blocks, debug}): Promise<PageContext> {
const pageCtx = {
$name: page.name,
$meta: [...this.project.defaults.metaTags, ...page.meta],
$resources: {
head: [...this.project.defaults.resources.head, ...page.resources.head],
body: [...this.project.defaults.resources.body, ...page.resources.body]
},
$template: page.template,
$deployPath: page.deployPath,
};
const projCtx = {
$name: this.project.name,
$deployRoot: this.project.deployRoot,
$template: this.project.template,
$debug: debug || false
};
const pageBlox = <BlockContext[]><any>await Promise.all(
blocks.map(b => {
try {
b = this.parseId(b);
} catch (err) {
throw new Error(`Cannot parse block ${b}`);
}
const block = Object.assign({}, this.blocks.find(x => x.namespace === b.namespace && x.name === b.name));
if (!block.name && !block.namespace) throw new Error(`Block ${b.namespace}/${b.name} not found`);
return new Promise((resolve, reject) => {
Promise.all(
block.bits.map(b => {
let parsedB;
try {
parsedB = this.parseId(b.bit);
} catch (e) {
throw new Error(`Cannot parse bit ${b.bit}`);
}
const bit = this.bits.find(x => {
return x.namespace === parsedB.namespace && x.name === parsedB.name;
});
if (!bit) throw new Error(`Bit ${b.bit} not found (from block ${block.namespace}/${block.name})`);
return new Promise((resolve, reject) => {
sander.readFile(bit.html, { encoding: "utf-8" }).then($template => {
resolve(Object.assign({}, bit.context, b.context, {$name: bit.name, $template}));
}).catch(reject);
});
})
).then($BITS => {
resolve(Object.assign({}, block.context, {$name: block.name, $template: block.template, $BITS}));
})
.catch(reject);
});
})
);
return {
$PROJECT: Object.assign({}, this.project.context, projCtx),
$PAGE: Object.assign({}, page.context, pageCtx),
$BLOCKS: pageBlox
};
}
private parseId(id: string): { name: string, namespace: string } {
const splitId = id.split("/");
let namespace, name;
if (splitId.length === 1) {
namespace = this.project.name;
name = id;
} else if (splitId.length > 2) {
namespace = splitId[0];
name = splitId.slice(1).join("/");
} else {
namespace = splitId[0];
name = splitId[1];
}
return { namespace, name };
}
}
| {
const globalScripts = this.assembleGlobalMats(scripts, "script");
const globalAssets = this.assembleGlobalMats(assets, "asset");
const globalStyles = this.assembleGlobalMats(styles, "style");
const { bitStyles, bitScripts } = this.assembleBitMats(blocks);
const { styleCache, scriptCache } = this.getMatCache({styles, scripts});
return {
scripts: { globals: globalScripts, bits: bitScripts, cache: scriptCache },
styles: { globals: globalStyles, bits: bitStyles, cache: styleCache },
assets: globalAssets
};
} | identifier_body |
test_run.py | """
This Test will run through benchbuild's execution pipeline.
"""
import os
import unittest
from contextlib import contextmanager
from benchbuild.utils import cmd
def shadow_commands(command):
def shadow_command_fun(func):
def shadow_command_wrapped_fun(self, *args, **kwargs):
cmd.__override_all__ = command
res = func(self, *args, **kwargs)
cmd.__override_all__ = None
return res
return shadow_command_wrapped_fun
return shadow_command_fun
class TestShadow(unittest.TestCase):
def test_shadow(self):
|
class TestRun(unittest.TestCase):
@shadow_commands("true")
def test_run(self):
from benchbuild import experiment
from benchbuild.utils.actions import Experiment
class MockExp(experiment.Experiment):
NAME = "mock-exp"
def actions_for_project(self, project):
from benchbuild.utils.actions import (
Prepare, Download, Configure, Build, Run, Clean)
inside = None
actns = []
project.builddir = "/tmp/throwaway"
actns = [Prepare(project),
Download(project),
Configure(project),
Build(project),
Run(project),
Clean(project)]
return actns
exp = MockExp(group="polybench")
eactn = Experiment(exp, exp.actions())
old_exists = os.path.exists
os.path.exists = lambda p: True
print(eactn)
eactn()
os.path.exists = old_exists
if __name__ == "__main__":
from benchbuild.utils import log
log.configure()
TestRun().test_run()
| inside = None
true = cmd.true
mkdir = cmd.mkdir
class test_class(object):
@shadow_commands("true")
def shadow_hook(self):
return cmd.mkdir
outside = cmd.mkdir
inside = test_class().shadow_hook()
self.assertEqual(inside.formulate(), true.formulate(),
msg="true (before) is not the same as true (inside)")
self.assertNotEqual(mkdir.formulate(), inside.formulate(),
msg="mkdir (before) is not the same as mkdir (inside)")
self.assertNotEqual(inside.formulate(), outside.formulate(),
msg="true (before) is not the same as true (after)")
self.assertEqual(mkdir.formulate(), outside.formulate(),
msg="mkdir (before) is not the same as mkdir (after)") | identifier_body |
test_run.py | """
This Test will run through benchbuild's execution pipeline.
"""
import os
import unittest
from contextlib import contextmanager
from benchbuild.utils import cmd
def shadow_commands(command):
def shadow_command_fun(func):
def shadow_command_wrapped_fun(self, *args, **kwargs):
cmd.__override_all__ = command
res = func(self, *args, **kwargs)
cmd.__override_all__ = None
return res
return shadow_command_wrapped_fun
return shadow_command_fun
class TestShadow(unittest.TestCase):
def test_shadow(self):
inside = None
true = cmd.true
mkdir = cmd.mkdir
class test_class(object): | return cmd.mkdir
outside = cmd.mkdir
inside = test_class().shadow_hook()
self.assertEqual(inside.formulate(), true.formulate(),
msg="true (before) is not the same as true (inside)")
self.assertNotEqual(mkdir.formulate(), inside.formulate(),
msg="mkdir (before) is not the same as mkdir (inside)")
self.assertNotEqual(inside.formulate(), outside.formulate(),
msg="true (before) is not the same as true (after)")
self.assertEqual(mkdir.formulate(), outside.formulate(),
msg="mkdir (before) is not the same as mkdir (after)")
class TestRun(unittest.TestCase):
@shadow_commands("true")
def test_run(self):
from benchbuild import experiment
from benchbuild.utils.actions import Experiment
class MockExp(experiment.Experiment):
NAME = "mock-exp"
def actions_for_project(self, project):
from benchbuild.utils.actions import (
Prepare, Download, Configure, Build, Run, Clean)
inside = None
actns = []
project.builddir = "/tmp/throwaway"
actns = [Prepare(project),
Download(project),
Configure(project),
Build(project),
Run(project),
Clean(project)]
return actns
exp = MockExp(group="polybench")
eactn = Experiment(exp, exp.actions())
old_exists = os.path.exists
os.path.exists = lambda p: True
print(eactn)
eactn()
os.path.exists = old_exists
if __name__ == "__main__":
from benchbuild.utils import log
log.configure()
TestRun().test_run() | @shadow_commands("true")
def shadow_hook(self): | random_line_split |
test_run.py | """
This Test will run through benchbuild's execution pipeline.
"""
import os
import unittest
from contextlib import contextmanager
from benchbuild.utils import cmd
def shadow_commands(command):
def | (func):
def shadow_command_wrapped_fun(self, *args, **kwargs):
cmd.__override_all__ = command
res = func(self, *args, **kwargs)
cmd.__override_all__ = None
return res
return shadow_command_wrapped_fun
return shadow_command_fun
class TestShadow(unittest.TestCase):
def test_shadow(self):
inside = None
true = cmd.true
mkdir = cmd.mkdir
class test_class(object):
@shadow_commands("true")
def shadow_hook(self):
return cmd.mkdir
outside = cmd.mkdir
inside = test_class().shadow_hook()
self.assertEqual(inside.formulate(), true.formulate(),
msg="true (before) is not the same as true (inside)")
self.assertNotEqual(mkdir.formulate(), inside.formulate(),
msg="mkdir (before) is not the same as mkdir (inside)")
self.assertNotEqual(inside.formulate(), outside.formulate(),
msg="true (before) is not the same as true (after)")
self.assertEqual(mkdir.formulate(), outside.formulate(),
msg="mkdir (before) is not the same as mkdir (after)")
class TestRun(unittest.TestCase):
@shadow_commands("true")
def test_run(self):
from benchbuild import experiment
from benchbuild.utils.actions import Experiment
class MockExp(experiment.Experiment):
NAME = "mock-exp"
def actions_for_project(self, project):
from benchbuild.utils.actions import (
Prepare, Download, Configure, Build, Run, Clean)
inside = None
actns = []
project.builddir = "/tmp/throwaway"
actns = [Prepare(project),
Download(project),
Configure(project),
Build(project),
Run(project),
Clean(project)]
return actns
exp = MockExp(group="polybench")
eactn = Experiment(exp, exp.actions())
old_exists = os.path.exists
os.path.exists = lambda p: True
print(eactn)
eactn()
os.path.exists = old_exists
if __name__ == "__main__":
from benchbuild.utils import log
log.configure()
TestRun().test_run()
| shadow_command_fun | identifier_name |
test_run.py | """
This Test will run through benchbuild's execution pipeline.
"""
import os
import unittest
from contextlib import contextmanager
from benchbuild.utils import cmd
def shadow_commands(command):
def shadow_command_fun(func):
def shadow_command_wrapped_fun(self, *args, **kwargs):
cmd.__override_all__ = command
res = func(self, *args, **kwargs)
cmd.__override_all__ = None
return res
return shadow_command_wrapped_fun
return shadow_command_fun
class TestShadow(unittest.TestCase):
def test_shadow(self):
inside = None
true = cmd.true
mkdir = cmd.mkdir
class test_class(object):
@shadow_commands("true")
def shadow_hook(self):
return cmd.mkdir
outside = cmd.mkdir
inside = test_class().shadow_hook()
self.assertEqual(inside.formulate(), true.formulate(),
msg="true (before) is not the same as true (inside)")
self.assertNotEqual(mkdir.formulate(), inside.formulate(),
msg="mkdir (before) is not the same as mkdir (inside)")
self.assertNotEqual(inside.formulate(), outside.formulate(),
msg="true (before) is not the same as true (after)")
self.assertEqual(mkdir.formulate(), outside.formulate(),
msg="mkdir (before) is not the same as mkdir (after)")
class TestRun(unittest.TestCase):
@shadow_commands("true")
def test_run(self):
from benchbuild import experiment
from benchbuild.utils.actions import Experiment
class MockExp(experiment.Experiment):
NAME = "mock-exp"
def actions_for_project(self, project):
from benchbuild.utils.actions import (
Prepare, Download, Configure, Build, Run, Clean)
inside = None
actns = []
project.builddir = "/tmp/throwaway"
actns = [Prepare(project),
Download(project),
Configure(project),
Build(project),
Run(project),
Clean(project)]
return actns
exp = MockExp(group="polybench")
eactn = Experiment(exp, exp.actions())
old_exists = os.path.exists
os.path.exists = lambda p: True
print(eactn)
eactn()
os.path.exists = old_exists
if __name__ == "__main__":
| from benchbuild.utils import log
log.configure()
TestRun().test_run() | conditional_block |
|
workflows-selection-service.ts | import { flatten, isNull } from "underscore";
import { Step, Workflow } from "../models";
import { WorkflowSelectionStore } from "./workflow-selection-store";
export type WfChainWrapper = {
workflow: Workflow;
isBeforeRunWorkflow?: boolean;
isAfterRunWorkflow?: boolean;
selectedWorkflowBeforeRunWorkflowIndex?: number;
selectedWorkflowAfterRunWorkflowIndex?: number;
};
type WorkflowViewModel = {
selectedWorkflow: Workflow;
workflows: Array<Workflow>;
editedWorkflow: Workflow;
editWorkflowAtIndex: (arg0: number | null) => void;
stepSelected: (arg0: Step, wfIndex: number | undefined, scrollToStep: boolean) => void;
selectedWorkflowChain: Array<WfChainWrapper>;
};
type AngularLocationService = {
search: () => { workflow_id: string };
};
const wfChainWrapper = (wrapper: WfChainWrapper): WfChainWrapper =>
Object.assign(
{
workflow: null,
isBeforeRunWorkflow: true,
isAfterRunWorkflow: false,
selectedWorkflowBeforeRunWorkflowIndex: -1,
selectedWorkflowAfterRunWorkflowIndex: -1
},
wrapper
);
export class WorkflowsSelectionService {
private store: WorkflowSelectionStore;
private location: AngularLocationService;
private static primaryWorkflowName = "primary";
constructor(store: WorkflowSelectionStore, locationService: AngularLocationService) {
this.store = store;
this.location = locationService;
}
private verifySelectedIndex = <T>(
potentialIndex: number | null,
list: Array<T> | null,
checker: (arg0: T) => boolean
): boolean => {
if (isNull(potentialIndex)) {
return false;
}
const entity = list && list[potentialIndex!];
return !!entity && checker(entity);
};
findSelectedWorkflow = (viewModel: WorkflowViewModel): Workflow => {
const idsTotry = [
viewModel.selectedWorkflow?.id,
this.store.lastSelectedWorkflowID,
this.location.search().workflow_id,
WorkflowsSelectionService.primaryWorkflowName
];
let selectedWf = null;
let idIndex = 0;
while (!selectedWf && idIndex < idsTotry.length) {
selectedWf = viewModel.workflows.find(item => item.id === idsTotry[idIndex]);
idIndex++;
}
return selectedWf || viewModel.workflows[0];
};
restoreSelection = (viewModel: WorkflowViewModel): void => {
this.rearrangeSelection(
viewModel,
this.findSelectedWorkflow(viewModel),
this.store.lastEditedWorkflowID || undefined
);
if (
this.verifySelectedIndex(
this.store.lastEditedWorkflowIndex,
viewModel.selectedWorkflowChain,
this.store.checkLastSelectedWorkflow
)
) |
const editedWorkflow = viewModel.editedWorkflow;
if (
this.store.lastSelectedStepIndex !== null &&
this.verifySelectedIndex(
this.store.lastSelectedStepIndex,
editedWorkflow?.steps,
this.store.checkLastSelectedstep
)
) {
const step = editedWorkflow?.steps[this.store.lastSelectedStepIndex];
const scrollToStep = !(this.store.lastEditedWorkflowIndex === 0 && this.store.lastSelectedStepIndex === 0);
viewModel.stepSelected(step, undefined, scrollToStep);
}
};
rearrangeSelection = (viewModel: WorkflowViewModel, wf: Workflow, editedId?: string): void => {
viewModel.selectedWorkflow = wf;
// update selection chain
viewModel.selectedWorkflowChain = [];
const constructWorkflowChain = (wfs: Array<Workflow>, before: boolean): Array<WfChainWrapper> =>
flatten(
wfs.map((innerWf: Workflow, index: number) =>
innerWf.workflowChain(viewModel.workflows).map((aWorkflow: Workflow) =>
wfChainWrapper({
workflow: aWorkflow,
isBeforeRunWorkflow: before,
isAfterRunWorkflow: !before,
selectedWorkflowBeforeRunWorkflowIndex: before && aWorkflow == innerWf ? index : -1,
selectedWorkflowAfterRunWorkflowIndex: !before && aWorkflow == innerWf ? index : -1
})
)
)
);
const beforeWfs = constructWorkflowChain(wf.beforeRunWorkflows(viewModel.workflows), true);
const afterWfs = constructWorkflowChain(wf.afterRunWorkflows(viewModel.workflows), false);
viewModel.selectedWorkflowChain.push(
...beforeWfs,
wfChainWrapper({ workflow: viewModel.selectedWorkflow }),
...afterWfs
);
// save it to the store
editedId = editedId || wf.id;
let editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === editedId);
if (editedIndex === -1) {
editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === wf.id);
}
viewModel.editWorkflowAtIndex(editedIndex);
};
}
export default (store: WorkflowSelectionStore, location: AngularLocationService): WorkflowsSelectionService =>
new WorkflowsSelectionService(store, location);
| {
viewModel.editWorkflowAtIndex(this.store.lastEditedWorkflowIndex);
} | conditional_block |
workflows-selection-service.ts | import { flatten, isNull } from "underscore";
import { Step, Workflow } from "../models";
import { WorkflowSelectionStore } from "./workflow-selection-store";
export type WfChainWrapper = {
workflow: Workflow;
isBeforeRunWorkflow?: boolean;
isAfterRunWorkflow?: boolean;
selectedWorkflowBeforeRunWorkflowIndex?: number;
selectedWorkflowAfterRunWorkflowIndex?: number;
};
type WorkflowViewModel = {
selectedWorkflow: Workflow;
workflows: Array<Workflow>;
editedWorkflow: Workflow;
editWorkflowAtIndex: (arg0: number | null) => void;
stepSelected: (arg0: Step, wfIndex: number | undefined, scrollToStep: boolean) => void;
selectedWorkflowChain: Array<WfChainWrapper>;
};
type AngularLocationService = {
search: () => { workflow_id: string };
};
const wfChainWrapper = (wrapper: WfChainWrapper): WfChainWrapper =>
Object.assign(
{
workflow: null,
isBeforeRunWorkflow: true,
isAfterRunWorkflow: false,
selectedWorkflowBeforeRunWorkflowIndex: -1,
selectedWorkflowAfterRunWorkflowIndex: -1
},
wrapper
);
export class | {
private store: WorkflowSelectionStore;
private location: AngularLocationService;
private static primaryWorkflowName = "primary";
constructor(store: WorkflowSelectionStore, locationService: AngularLocationService) {
this.store = store;
this.location = locationService;
}
private verifySelectedIndex = <T>(
potentialIndex: number | null,
list: Array<T> | null,
checker: (arg0: T) => boolean
): boolean => {
if (isNull(potentialIndex)) {
return false;
}
const entity = list && list[potentialIndex!];
return !!entity && checker(entity);
};
findSelectedWorkflow = (viewModel: WorkflowViewModel): Workflow => {
const idsTotry = [
viewModel.selectedWorkflow?.id,
this.store.lastSelectedWorkflowID,
this.location.search().workflow_id,
WorkflowsSelectionService.primaryWorkflowName
];
let selectedWf = null;
let idIndex = 0;
while (!selectedWf && idIndex < idsTotry.length) {
selectedWf = viewModel.workflows.find(item => item.id === idsTotry[idIndex]);
idIndex++;
}
return selectedWf || viewModel.workflows[0];
};
restoreSelection = (viewModel: WorkflowViewModel): void => {
this.rearrangeSelection(
viewModel,
this.findSelectedWorkflow(viewModel),
this.store.lastEditedWorkflowID || undefined
);
if (
this.verifySelectedIndex(
this.store.lastEditedWorkflowIndex,
viewModel.selectedWorkflowChain,
this.store.checkLastSelectedWorkflow
)
) {
viewModel.editWorkflowAtIndex(this.store.lastEditedWorkflowIndex);
}
const editedWorkflow = viewModel.editedWorkflow;
if (
this.store.lastSelectedStepIndex !== null &&
this.verifySelectedIndex(
this.store.lastSelectedStepIndex,
editedWorkflow?.steps,
this.store.checkLastSelectedstep
)
) {
const step = editedWorkflow?.steps[this.store.lastSelectedStepIndex];
const scrollToStep = !(this.store.lastEditedWorkflowIndex === 0 && this.store.lastSelectedStepIndex === 0);
viewModel.stepSelected(step, undefined, scrollToStep);
}
};
rearrangeSelection = (viewModel: WorkflowViewModel, wf: Workflow, editedId?: string): void => {
viewModel.selectedWorkflow = wf;
// update selection chain
viewModel.selectedWorkflowChain = [];
const constructWorkflowChain = (wfs: Array<Workflow>, before: boolean): Array<WfChainWrapper> =>
flatten(
wfs.map((innerWf: Workflow, index: number) =>
innerWf.workflowChain(viewModel.workflows).map((aWorkflow: Workflow) =>
wfChainWrapper({
workflow: aWorkflow,
isBeforeRunWorkflow: before,
isAfterRunWorkflow: !before,
selectedWorkflowBeforeRunWorkflowIndex: before && aWorkflow == innerWf ? index : -1,
selectedWorkflowAfterRunWorkflowIndex: !before && aWorkflow == innerWf ? index : -1
})
)
)
);
const beforeWfs = constructWorkflowChain(wf.beforeRunWorkflows(viewModel.workflows), true);
const afterWfs = constructWorkflowChain(wf.afterRunWorkflows(viewModel.workflows), false);
viewModel.selectedWorkflowChain.push(
...beforeWfs,
wfChainWrapper({ workflow: viewModel.selectedWorkflow }),
...afterWfs
);
// save it to the store
editedId = editedId || wf.id;
let editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === editedId);
if (editedIndex === -1) {
editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === wf.id);
}
viewModel.editWorkflowAtIndex(editedIndex);
};
}
export default (store: WorkflowSelectionStore, location: AngularLocationService): WorkflowsSelectionService =>
new WorkflowsSelectionService(store, location);
| WorkflowsSelectionService | identifier_name |
workflows-selection-service.ts | import { flatten, isNull } from "underscore";
import { Step, Workflow } from "../models";
import { WorkflowSelectionStore } from "./workflow-selection-store";
export type WfChainWrapper = {
workflow: Workflow;
isBeforeRunWorkflow?: boolean;
isAfterRunWorkflow?: boolean;
selectedWorkflowBeforeRunWorkflowIndex?: number;
selectedWorkflowAfterRunWorkflowIndex?: number;
};
type WorkflowViewModel = {
selectedWorkflow: Workflow;
workflows: Array<Workflow>;
editedWorkflow: Workflow;
editWorkflowAtIndex: (arg0: number | null) => void;
stepSelected: (arg0: Step, wfIndex: number | undefined, scrollToStep: boolean) => void;
selectedWorkflowChain: Array<WfChainWrapper>;
};
type AngularLocationService = {
search: () => { workflow_id: string };
};
const wfChainWrapper = (wrapper: WfChainWrapper): WfChainWrapper =>
Object.assign(
{ | selectedWorkflowAfterRunWorkflowIndex: -1
},
wrapper
);
export class WorkflowsSelectionService {
private store: WorkflowSelectionStore;
private location: AngularLocationService;
private static primaryWorkflowName = "primary";
constructor(store: WorkflowSelectionStore, locationService: AngularLocationService) {
this.store = store;
this.location = locationService;
}
private verifySelectedIndex = <T>(
potentialIndex: number | null,
list: Array<T> | null,
checker: (arg0: T) => boolean
): boolean => {
if (isNull(potentialIndex)) {
return false;
}
const entity = list && list[potentialIndex!];
return !!entity && checker(entity);
};
findSelectedWorkflow = (viewModel: WorkflowViewModel): Workflow => {
const idsTotry = [
viewModel.selectedWorkflow?.id,
this.store.lastSelectedWorkflowID,
this.location.search().workflow_id,
WorkflowsSelectionService.primaryWorkflowName
];
let selectedWf = null;
let idIndex = 0;
while (!selectedWf && idIndex < idsTotry.length) {
selectedWf = viewModel.workflows.find(item => item.id === idsTotry[idIndex]);
idIndex++;
}
return selectedWf || viewModel.workflows[0];
};
restoreSelection = (viewModel: WorkflowViewModel): void => {
this.rearrangeSelection(
viewModel,
this.findSelectedWorkflow(viewModel),
this.store.lastEditedWorkflowID || undefined
);
if (
this.verifySelectedIndex(
this.store.lastEditedWorkflowIndex,
viewModel.selectedWorkflowChain,
this.store.checkLastSelectedWorkflow
)
) {
viewModel.editWorkflowAtIndex(this.store.lastEditedWorkflowIndex);
}
const editedWorkflow = viewModel.editedWorkflow;
if (
this.store.lastSelectedStepIndex !== null &&
this.verifySelectedIndex(
this.store.lastSelectedStepIndex,
editedWorkflow?.steps,
this.store.checkLastSelectedstep
)
) {
const step = editedWorkflow?.steps[this.store.lastSelectedStepIndex];
const scrollToStep = !(this.store.lastEditedWorkflowIndex === 0 && this.store.lastSelectedStepIndex === 0);
viewModel.stepSelected(step, undefined, scrollToStep);
}
};
rearrangeSelection = (viewModel: WorkflowViewModel, wf: Workflow, editedId?: string): void => {
viewModel.selectedWorkflow = wf;
// update selection chain
viewModel.selectedWorkflowChain = [];
const constructWorkflowChain = (wfs: Array<Workflow>, before: boolean): Array<WfChainWrapper> =>
flatten(
wfs.map((innerWf: Workflow, index: number) =>
innerWf.workflowChain(viewModel.workflows).map((aWorkflow: Workflow) =>
wfChainWrapper({
workflow: aWorkflow,
isBeforeRunWorkflow: before,
isAfterRunWorkflow: !before,
selectedWorkflowBeforeRunWorkflowIndex: before && aWorkflow == innerWf ? index : -1,
selectedWorkflowAfterRunWorkflowIndex: !before && aWorkflow == innerWf ? index : -1
})
)
)
);
const beforeWfs = constructWorkflowChain(wf.beforeRunWorkflows(viewModel.workflows), true);
const afterWfs = constructWorkflowChain(wf.afterRunWorkflows(viewModel.workflows), false);
viewModel.selectedWorkflowChain.push(
...beforeWfs,
wfChainWrapper({ workflow: viewModel.selectedWorkflow }),
...afterWfs
);
// save it to the store
editedId = editedId || wf.id;
let editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === editedId);
if (editedIndex === -1) {
editedIndex = viewModel.selectedWorkflowChain.findIndex(({ workflow }) => workflow.id === wf.id);
}
viewModel.editWorkflowAtIndex(editedIndex);
};
}
export default (store: WorkflowSelectionStore, location: AngularLocationService): WorkflowsSelectionService =>
new WorkflowsSelectionService(store, location); | workflow: null,
isBeforeRunWorkflow: true,
isAfterRunWorkflow: false,
selectedWorkflowBeforeRunWorkflowIndex: -1, | random_line_split |
wrap.js | var fs = require('fs');
var dot = require('dot');
var defaults = require('defaults');
var Block = require('glint-block');
var Style = require('glint-plugin-block-style-editable');
var TextBlock = require('glint-block-text');
var MDBlock = require('glint-block-markdown');
var MetaBlock = require('glint-block-meta');
var CKEditorBlock = require('glint-block-ckeditor');
var Adapter = require('glint-adapter');
var PageAdapter = require('page-adapter');
var Container = require('glint-container');
var Wrap = require('glint-wrap');
var Widget = require('glint-widget');
var LayoutWrap = require('wrap-layout');
var template = fs.readFileSync(__dirname + '/index.dot', 'utf-8');
var compiled = dot.template(template);
function text() {
return Block(TextBlock()).use(Style());
}
function markdown() |
function editor() {
return Block(CKEditorBlock()).use(Style());
}
exports = module.exports = function wrap(o) {
o = o || {};
var wrap = Wrap();
var blocks = {
'home-title': text().selector('[data-id=home-title]'),
'home-teaser': editor().selector('[data-id=home-teaser]'),
'home-subtitle': markdown().selector('[data-id=home-subtitle]'),
'home-box-1': markdown().selector('[data-id=home-box-1]'),
'home-box-2': markdown().selector('[data-id=home-box-2]'),
'home-box-3': markdown().selector('[data-id=home-box-3]'),
'home-box-4': markdown().selector('[data-id=home-box-4]'),
'home-box-5': markdown().selector('[data-id=home-box-5]'),
'home-box-6': markdown().selector('[data-id=home-box-6]'),
'www-title': text().selector('[data-id=www-title]'),
'www-content': editor().selector('[data-id=www-content]'),
'bb-title': text().selector('[data-id=bb-title]'),
'bb-content': markdown().selector('[data-id=bb-content]'),
'doc-title': text().selector('[data-id=doc-title]'),
'doc-content': markdown().selector('[data-id=doc-content]'),
'img-title': text().selector('[data-id=img-title]'),
'img-content': editor().selector('[data-id=img-content]'),
'contact-title': text().selector('[data-id=contact-title]'),
'contact-content': markdown().selector('[data-id=doc-content]'),
meta: Block(MetaBlock())
};
var adapter = o.adapter || PageAdapter(o);
var db = o.db || 'glint';
var type = o.type || 'main';
var id = o.id || 'main';
var templateData = o.templateData || '__template__';
var homeAdapter = Adapter(adapter)
.db(db)
.type(type)
var container = Container(blocks, homeAdapter)
.id(id)
.template(templateData);
wrap
.parallel(container)
.series('content', Widget(function(options) {
return compiled(options)
}).place('force:server'))
.series(LayoutWrap(o.layout).place('force:server'))
wrap.routes = adapter.routes;
return wrap;
};
| {
return Block(MDBlock()).use(Style());
} | identifier_body |
wrap.js | var fs = require('fs');
var dot = require('dot');
var defaults = require('defaults');
var Block = require('glint-block');
var Style = require('glint-plugin-block-style-editable');
var TextBlock = require('glint-block-text');
var MDBlock = require('glint-block-markdown');
var MetaBlock = require('glint-block-meta');
var CKEditorBlock = require('glint-block-ckeditor');
var Adapter = require('glint-adapter');
var PageAdapter = require('page-adapter');
var Container = require('glint-container');
var Wrap = require('glint-wrap');
var Widget = require('glint-widget');
var LayoutWrap = require('wrap-layout');
var template = fs.readFileSync(__dirname + '/index.dot', 'utf-8');
var compiled = dot.template(template);
function text() {
return Block(TextBlock()).use(Style());
}
function markdown() {
return Block(MDBlock()).use(Style());
}
function | () {
return Block(CKEditorBlock()).use(Style());
}
exports = module.exports = function wrap(o) {
o = o || {};
var wrap = Wrap();
var blocks = {
'home-title': text().selector('[data-id=home-title]'),
'home-teaser': editor().selector('[data-id=home-teaser]'),
'home-subtitle': markdown().selector('[data-id=home-subtitle]'),
'home-box-1': markdown().selector('[data-id=home-box-1]'),
'home-box-2': markdown().selector('[data-id=home-box-2]'),
'home-box-3': markdown().selector('[data-id=home-box-3]'),
'home-box-4': markdown().selector('[data-id=home-box-4]'),
'home-box-5': markdown().selector('[data-id=home-box-5]'),
'home-box-6': markdown().selector('[data-id=home-box-6]'),
'www-title': text().selector('[data-id=www-title]'),
'www-content': editor().selector('[data-id=www-content]'),
'bb-title': text().selector('[data-id=bb-title]'),
'bb-content': markdown().selector('[data-id=bb-content]'),
'doc-title': text().selector('[data-id=doc-title]'),
'doc-content': markdown().selector('[data-id=doc-content]'),
'img-title': text().selector('[data-id=img-title]'),
'img-content': editor().selector('[data-id=img-content]'),
'contact-title': text().selector('[data-id=contact-title]'),
'contact-content': markdown().selector('[data-id=doc-content]'),
meta: Block(MetaBlock())
};
var adapter = o.adapter || PageAdapter(o);
var db = o.db || 'glint';
var type = o.type || 'main';
var id = o.id || 'main';
var templateData = o.templateData || '__template__';
var homeAdapter = Adapter(adapter)
.db(db)
.type(type)
var container = Container(blocks, homeAdapter)
.id(id)
.template(templateData);
wrap
.parallel(container)
.series('content', Widget(function(options) {
return compiled(options)
}).place('force:server'))
.series(LayoutWrap(o.layout).place('force:server'))
wrap.routes = adapter.routes;
return wrap;
};
| editor | identifier_name |
wrap.js | var fs = require('fs');
var dot = require('dot');
var defaults = require('defaults');
var Block = require('glint-block');
var Style = require('glint-plugin-block-style-editable');
var TextBlock = require('glint-block-text');
var MDBlock = require('glint-block-markdown');
var MetaBlock = require('glint-block-meta');
var CKEditorBlock = require('glint-block-ckeditor');
var Adapter = require('glint-adapter');
var PageAdapter = require('page-adapter');
var Container = require('glint-container');
var Wrap = require('glint-wrap');
var Widget = require('glint-widget');
var LayoutWrap = require('wrap-layout');
var template = fs.readFileSync(__dirname + '/index.dot', 'utf-8');
var compiled = dot.template(template);
function text() {
return Block(TextBlock()).use(Style());
}
function markdown() {
return Block(MDBlock()).use(Style());
}
function editor() {
return Block(CKEditorBlock()).use(Style());
}
exports = module.exports = function wrap(o) {
o = o || {};
var wrap = Wrap();
var blocks = {
'home-title': text().selector('[data-id=home-title]'),
'home-teaser': editor().selector('[data-id=home-teaser]'),
'home-subtitle': markdown().selector('[data-id=home-subtitle]'),
'home-box-1': markdown().selector('[data-id=home-box-1]'),
'home-box-2': markdown().selector('[data-id=home-box-2]'),
'home-box-3': markdown().selector('[data-id=home-box-3]'),
'home-box-4': markdown().selector('[data-id=home-box-4]'), |
'bb-title': text().selector('[data-id=bb-title]'),
'bb-content': markdown().selector('[data-id=bb-content]'),
'doc-title': text().selector('[data-id=doc-title]'),
'doc-content': markdown().selector('[data-id=doc-content]'),
'img-title': text().selector('[data-id=img-title]'),
'img-content': editor().selector('[data-id=img-content]'),
'contact-title': text().selector('[data-id=contact-title]'),
'contact-content': markdown().selector('[data-id=doc-content]'),
meta: Block(MetaBlock())
};
var adapter = o.adapter || PageAdapter(o);
var db = o.db || 'glint';
var type = o.type || 'main';
var id = o.id || 'main';
var templateData = o.templateData || '__template__';
var homeAdapter = Adapter(adapter)
.db(db)
.type(type)
var container = Container(blocks, homeAdapter)
.id(id)
.template(templateData);
wrap
.parallel(container)
.series('content', Widget(function(options) {
return compiled(options)
}).place('force:server'))
.series(LayoutWrap(o.layout).place('force:server'))
wrap.routes = adapter.routes;
return wrap;
}; | 'home-box-5': markdown().selector('[data-id=home-box-5]'),
'home-box-6': markdown().selector('[data-id=home-box-6]'),
'www-title': text().selector('[data-id=www-title]'),
'www-content': editor().selector('[data-id=www-content]'), | random_line_split |
textedit.rs | //! Editing text in this library is handled by either `nk_edit_string` or
//! `nk_edit_buffer`. But like almost everything in this library there are multiple
//! ways of doing it and a balance between control and ease of use with memory
//! as well as functionality controlled by flags.
//!
//! This library generally allows three different levels of memory control:
//! First of is the most basic way of just providing a simple char array with
//! string length. This method is probably the easiest way of handling simple
//! user text input. Main upside is complete control over memory while the biggest
//! downside in comparsion with the other two approaches is missing undo/redo.
//!
//! For UIs that require undo/redo the second way was created. It is based on
//! a fixed size nk_text_edit struct, which has an internal undo/redo stack.
//! This is mainly useful if you want something more like a text editor but don't want
//! to have a dynamically growing buffer.
//!
| //! The final way is using a dynamically growing nk_text_edit struct, which
//! has both a default version if you don't care where memory comes from and an
//! allocator version if you do. While the text editor is quite powerful for its
//! complexity I would not recommend editing gigabytes of data with it.
//! It is rather designed for uses cases which make sense for a GUI library not for
//! an full blown text editor. | random_line_split |
|
IconButtonProps.ts | import React from 'react';
import {
OverridableComponent,
OverridableStringUnion,
OverridableTypeMap,
OverrideProps,
} from '@mui/types';
import { SxProps } from '../styles/defaultTheme';
import { ColorPaletteProp, VariantProp } from '../styles/types';
export type IconButtonSlot = 'root';
export interface IconButtonPropsVariantOverrides {}
export interface IconButtonPropsColorOverrides {}
export interface IconButtonPropsSizeOverrides {}
export interface IconButtonTypeMap<P = {}, D extends React.ElementType = 'button'> {
props: P & {
/**
* A ref for imperative actions. It currently only supports `focusVisible()` action.
*/
action?: React.Ref<{
focusVisible(): void;
}>;
/**
* The color of the component. It supports those theme colors that make sense for this component.
* @default 'primary'
*/
color?: OverridableStringUnion<ColorPaletteProp, IconButtonPropsColorOverrides>;
/**
* If `true`, the component is disabled.
* @default false
*/
disabled?: boolean;
/**
* This prop can help identify which element has keyboard focus.
* The class name will be applied when the element gains the focus through keyboard interaction.
* It's a polyfill for the [CSS :focus-visible selector](https://drafts.csswg.org/selectors-4/#the-focus-visible-pseudo).
* The rationale for using this feature [is explained here](https://github.com/WICG/focus-visible/blob/HEAD/explainer.md).
* A [polyfill can be used](https://github.com/WICG/focus-visible) to apply a `focus-visible` class to other components
* if needed.
*/
focusVisibleClassName?: string;
/**
* The size of the component.
*/
size?: OverridableStringUnion<'sm' | 'md' | 'lg', IconButtonPropsSizeOverrides>;
/** | /**
* @default 0
*/
tabIndex?: NonNullable<React.HTMLAttributes<any>['tabIndex']>;
/**
* The variant to use.
* @default 'light'
*/
variant?: OverridableStringUnion<VariantProp, IconButtonPropsVariantOverrides>;
};
defaultComponent: D;
}
export interface ExtendIconButtonTypeMap<M extends OverridableTypeMap> {
props: M['props'] & IconButtonTypeMap['props'];
defaultComponent: M['defaultComponent'];
}
export type IconButtonProps<
D extends React.ElementType = IconButtonTypeMap['defaultComponent'],
P = {
component?: React.ElementType;
},
> = OverrideProps<IconButtonTypeMap<P, D>, D>;
export type ExtendIconButton<M extends OverridableTypeMap> = ((
props: OverrideProps<ExtendIconButtonTypeMap<M>, 'a'>,
) => JSX.Element) &
OverridableComponent<ExtendIconButtonTypeMap<M>>; | * The system prop that allows defining system overrides as well as additional CSS styles.
*/
sx?: SxProps; | random_line_split |
context.rs | //
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` execution contexts.
//!
//! This is inteded to be general-purpose and composable, so that the same
//! code can be reused for interrupts and for multithreading.
use core::mem;
use core::fmt;
use super::flags::{Flags as RFlags};
use super::segment;
/// Registers pushed to the stack when handling an interrupt or context switch.
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct Registers { pub rsi: u64
, pub rdi: u64
, pub r11: u64
, pub r10: u64
, pub r9: u64
, pub r8: u64
, pub rdx: u64
, pub rcx: u64
, pub rax: u64
}
impl Registers {
/// Transform this struct into an array of `u64`s
/// (if you would ever want to do this)
/// TODO: rewrite this to be a `convert::Into` implementation.
// - eliza, 03/09/2017
pub unsafe fn to_array(&self) -> [u64; 9] {
// [ self.rsi, self.rdi, self.r11
// , self.r10, self.r9, self.r8
// , self.rdx, self.rcx, self.rax
// ]
// using transmute is probably faster and we're already unsafe...
mem::transmute(*self)
}
/// Create a new empty set of Registers
pub const fn empty() -> Self {
Registers { rsi: 0, rdi: 0, r11: 0
, r10: 0, r9: 0, r8: 0
, rdx: 0, rcx: 0, rax: 0
}
}
/// Push the caller-saved registers to the stack
/// (such as when handling a context switch or interrupt).
///
/// THIS FUNCTION IS NAKED. DO NOT CALL IT NORMALLY.
#[naked]
#[inline(always)]
pub unsafe fn push() {
asm!( "push rax
push rcx
push rdx
push r8
push r9
push r10
push r11
push rdi
push rsi"
:::: "intel"
, "volatile");
}
/// Push the caller-saved registers off the stack
/// (such as when handling a context switch or interrupt).
///
/// THIS FUNCTION IS NAKED. DO NOT CALL IT NORMALLY.
#[naked]
#[inline(always)]
pub unsafe fn pop() {
asm!( "pop rsi
pop rdi
pop r11
pop r10
pop r9
pop r8
pop rdx
pop rcx
pop rax"
:::: "intel"
, "volatile");
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( f
, " RSI: {:#018x} RDI: {:#018x} R11: {:#018x}\n \
R10: {:#018x} R9: {:#018x} R8: {:#018x}\n \
RDX: {:#018x} RCX: {:#018x} RAX: {:#018x}"
, self.rsi, self.rdi, self.r11
, self.r10, self.r9, self.r8
, self.rdx, self.rcx, self.rax)
}
}
#[repr(C, packed)]
pub struct InterruptFrame {
// this is the actual value of the interrupt stack frame context,
// not the old one (which is wrong). note that the old one seems to cause
// stack misalignment.
// -- eliza, october 4th, 2016
/// Value of the instruction pointer (`$rip`) register
pub rip: *const u8
, /// Value of the code segment (`$cs`) register
pub cs: segment::Selector
, __pad_1: u32
, __pad_2: u16
, /// Value of the CPU flags (`$rflags`) register
pub rflags: RFlags
, /// Value of the stack pointer (`$rsp`) register
// TODO: should this actually be a pointer?
pub rsp: *const u8
, /// Value of the stack segment (`$ss`) register
pub ss: segment::Selector
, __pad_3: u32
, __pad_4: u16
}
| use super::InterruptFrame;
assert_eq!(size_of::<InterruptFrame>(), 32);
}
}
impl fmt::Debug for InterruptFrame {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( f
, "Interrupt Frame: \
\n instruction pointer: {:p} \
\n code segment: {} \
\n rflags: {:?} \
\n stack pointer: {:p} \
\n stack segment: {}"
, self.rip
// , self.__pad_1, self.__pad_2
, self.cs
, self.rflags
, self.rsp
// , self.__pad_3, self.__pad_4
, self.ss)
}
}
/// Thread execution context
#[repr(C, packed)]
pub struct Context { /// Value of the stack pointer (`rsp`) register
pub rsp: *mut u8
, /// Value of the caller-saved registers
pub registers: Registers
, /// Value of the instruction pointer (`rip`) register
pub rip: *mut u8
//, pub stack: [u8] // TODO: should be box
}
impl Context {
pub fn empty() -> Self {
unsafe {
Context { rsp: mem::transmute(0u64)
, registers: Registers::empty()
, rip: mem::transmute(0u64)
//, stack: [0u8; 8]
}
}
}
} | #[cfg(test)]
mod test {
#[test]
fn test_interrupt_frame_correct_size() {
use core::mem::size_of; | random_line_split |
context.rs | //
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` execution contexts.
//!
//! This is inteded to be general-purpose and composable, so that the same
//! code can be reused for interrupts and for multithreading.
use core::mem;
use core::fmt;
use super::flags::{Flags as RFlags};
use super::segment;
/// Registers pushed to the stack when handling an interrupt or context switch.
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct Registers { pub rsi: u64
, pub rdi: u64
, pub r11: u64
, pub r10: u64
, pub r9: u64
, pub r8: u64
, pub rdx: u64
, pub rcx: u64
, pub rax: u64
}
impl Registers {
/// Transform this struct into an array of `u64`s
/// (if you would ever want to do this)
/// TODO: rewrite this to be a `convert::Into` implementation.
// - eliza, 03/09/2017
pub unsafe fn to_array(&self) -> [u64; 9] {
// [ self.rsi, self.rdi, self.r11
// , self.r10, self.r9, self.r8
// , self.rdx, self.rcx, self.rax
// ]
// using transmute is probably faster and we're already unsafe...
mem::transmute(*self)
}
/// Create a new empty set of Registers
pub const fn empty() -> Self {
Registers { rsi: 0, rdi: 0, r11: 0
, r10: 0, r9: 0, r8: 0
, rdx: 0, rcx: 0, rax: 0
}
}
/// Push the caller-saved registers to the stack
/// (such as when handling a context switch or interrupt).
///
/// THIS FUNCTION IS NAKED. DO NOT CALL IT NORMALLY.
#[naked]
#[inline(always)]
pub unsafe fn push() {
asm!( "push rax
push rcx
push rdx
push r8
push r9
push r10
push r11
push rdi
push rsi"
:::: "intel"
, "volatile");
}
/// Push the caller-saved registers off the stack
/// (such as when handling a context switch or interrupt).
///
/// THIS FUNCTION IS NAKED. DO NOT CALL IT NORMALLY.
#[naked]
#[inline(always)]
pub unsafe fn pop() {
asm!( "pop rsi
pop rdi
pop r11
pop r10
pop r9
pop r8
pop rdx
pop rcx
pop rax"
:::: "intel"
, "volatile");
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( f
, " RSI: {:#018x} RDI: {:#018x} R11: {:#018x}\n \
R10: {:#018x} R9: {:#018x} R8: {:#018x}\n \
RDX: {:#018x} RCX: {:#018x} RAX: {:#018x}"
, self.rsi, self.rdi, self.r11
, self.r10, self.r9, self.r8
, self.rdx, self.rcx, self.rax)
}
}
#[repr(C, packed)]
pub struct InterruptFrame {
// this is the actual value of the interrupt stack frame context,
// not the old one (which is wrong). note that the old one seems to cause
// stack misalignment.
// -- eliza, october 4th, 2016
/// Value of the instruction pointer (`$rip`) register
pub rip: *const u8
, /// Value of the code segment (`$cs`) register
pub cs: segment::Selector
, __pad_1: u32
, __pad_2: u16
, /// Value of the CPU flags (`$rflags`) register
pub rflags: RFlags
, /// Value of the stack pointer (`$rsp`) register
// TODO: should this actually be a pointer?
pub rsp: *const u8
, /// Value of the stack segment (`$ss`) register
pub ss: segment::Selector
, __pad_3: u32
, __pad_4: u16
}
#[cfg(test)]
mod test {
#[test]
fn | () {
use core::mem::size_of;
use super::InterruptFrame;
assert_eq!(size_of::<InterruptFrame>(), 32);
}
}
impl fmt::Debug for InterruptFrame {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!( f
, "Interrupt Frame: \
\n instruction pointer: {:p} \
\n code segment: {} \
\n rflags: {:?} \
\n stack pointer: {:p} \
\n stack segment: {}"
, self.rip
// , self.__pad_1, self.__pad_2
, self.cs
, self.rflags
, self.rsp
// , self.__pad_3, self.__pad_4
, self.ss)
}
}
/// Thread execution context
#[repr(C, packed)]
pub struct Context { /// Value of the stack pointer (`rsp`) register
pub rsp: *mut u8
, /// Value of the caller-saved registers
pub registers: Registers
, /// Value of the instruction pointer (`rip`) register
pub rip: *mut u8
//, pub stack: [u8] // TODO: should be box
}
impl Context {
pub fn empty() -> Self {
unsafe {
Context { rsp: mem::transmute(0u64)
, registers: Registers::empty()
, rip: mem::transmute(0u64)
//, stack: [0u8; 8]
}
}
}
}
| test_interrupt_frame_correct_size | identifier_name |
mod.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! VM Output display utils.
use std::time::Duration;
use bigint::prelude::U256;
pub mod json;
pub mod simple;
/// Formats duration into human readable format.
pub fn | (time: &Duration) -> String {
format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos())
}
/// Formats the time as microseconds.
pub fn as_micros(time: &Duration) -> u64 {
time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000
}
/// Converts U256 into string.
/// TODO Overcomes: https://github.com/paritytech/bigint/issues/13
pub fn u256_as_str(v: &U256) -> String {
if v.is_zero() {
"\"0x0\"".into()
} else {
format!("\"{:x}\"", v)
}
}
| format_time | identifier_name |
mod.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! VM Output display utils.
use std::time::Duration;
use bigint::prelude::U256;
pub mod json;
pub mod simple;
/// Formats duration into human readable format.
pub fn format_time(time: &Duration) -> String {
format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos())
}
/// Formats the time as microseconds.
pub fn as_micros(time: &Duration) -> u64 {
time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000
}
/// Converts U256 into string.
/// TODO Overcomes: https://github.com/paritytech/bigint/issues/13
pub fn u256_as_str(v: &U256) -> String {
if v.is_zero() | else {
format!("\"{:x}\"", v)
}
}
| {
"\"0x0\"".into()
} | conditional_block |
mod.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity. | // (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! VM Output display utils.
use std::time::Duration;
use bigint::prelude::U256;
pub mod json;
pub mod simple;
/// Formats duration into human readable format.
pub fn format_time(time: &Duration) -> String {
format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos())
}
/// Formats the time as microseconds.
pub fn as_micros(time: &Duration) -> u64 {
time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000
}
/// Converts U256 into string.
/// TODO Overcomes: https://github.com/paritytech/bigint/issues/13
pub fn u256_as_str(v: &U256) -> String {
if v.is_zero() {
"\"0x0\"".into()
} else {
format!("\"{:x}\"", v)
}
} |
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or | random_line_split |
mod.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! VM Output display utils.
use std::time::Duration;
use bigint::prelude::U256;
pub mod json;
pub mod simple;
/// Formats duration into human readable format.
pub fn format_time(time: &Duration) -> String |
/// Formats the time as microseconds.
pub fn as_micros(time: &Duration) -> u64 {
time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000
}
/// Converts U256 into string.
/// TODO Overcomes: https://github.com/paritytech/bigint/issues/13
pub fn u256_as_str(v: &U256) -> String {
if v.is_zero() {
"\"0x0\"".into()
} else {
format!("\"{:x}\"", v)
}
}
| {
format!("{}.{:.9}s", time.as_secs(), time.subsec_nanos())
} | identifier_body |
PersonalityReactions.ts | module TK.SpaceTac {
// Reaction triggered
export type PersonalityReaction = PersonalityReactionConversation
// Condition to check if a reaction may happen, returning involved ships (order is important)
export type ReactionCondition = (player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null) => Ship[]
// Reaction profile, giving a probability for types of personality, and an associated reaction constructor
export type ReactionProfile = [(traits: IPersonalityTraits) => number, (ships: Ship[]) => PersonalityReaction]
// Reaction config (condition, chance, profiles)
export type ReactionConfig = [ReactionCondition, number, ReactionProfile[]]
// Pool of reaction config
export type ReactionPool = { [code: string]: ReactionConfig }
/**
* Reactions to external events according to personalities.
*
* This allows for a more "alive" world, as characters tend to speak to react to events.
*
* This object will store the previous reactions to avoid too much recurrence, and should be global to a whole
* game session.
*/
export class PersonalityReactions {
done: string[] = []
random = RandomGenerator.global
/**
* Check for a reaction.
*
* This will return a reaction to display, and add it to the done list
*/
check(player: Player, battle: Battle | null = null, ship: Ship | null = null, event: BaseBattleDiff | null = null, pool: ReactionPool = BUILTIN_REACTION_POOL): PersonalityReaction | null {
let codes = difference(keys(pool), this.done);
let candidates = nna(codes.map((code: string): [string, Ship[], ReactionProfile[]] | null => {
let [condition, chance, profiles] = pool[code];
if (this.random.random() <= chance) {
let involved = condition(player, battle, ship, event);
if (involved.length > 0) {
return [code, involved, profiles];
} else {
return null;
}
} else {
return null;
}
}));
if (candidates.length > 0) {
let [code, involved, profiles] = this.random.choice(candidates);
let primary = involved[0];
let weights = profiles.map(([evaluator, _]) => evaluator(primary.personality));
let action_number = this.random.weighted(weights);
if (action_number >= 0) {
this.done.push(code);
let reaction_constructor = profiles[action_number][1];
return reaction_constructor(involved);
} else {
return null;
}
} else {
return null;
}
}
}
/**
* One kind of personality reaction: saying something out loud
*/
export class PersonalityReactionConversation {
messages: { interlocutor: Ship, message: string }[]
constructor(messages: { interlocutor: Ship, message: string }[]) {
this.messages = messages;
}
}
/**
* Standard reaction pool
*/
export const BUILTIN_REACTION_POOL: ReactionPool = {
friendly_fire: [cond_friendly_fire, 1, [
[traits => 1, ships => new PersonalityReactionConversation([
{ interlocutor: ships[0], message: "Hey !!! Watch where you're shooting !" },
{ interlocutor: ships[1], message: "Sorry mate..." },
])]
]]
}
/**
* Check for a friendly fire condition (one of player's ships fired on another)
*/
function cond_friendly_fire(player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null): Ship[] |
}
| {
if (battle && ship && event) {
if (event instanceof ShipDamageDiff && player.is(ship.fleet.player) && !ship.is(event.ship_id)) {
let hurt = battle.getShip(event.ship_id);
return (hurt && player.is(hurt.fleet.player)) ? [hurt, ship] : [];
} else {
return [];
}
} else {
return [];
}
} | identifier_body |
PersonalityReactions.ts | module TK.SpaceTac {
// Reaction triggered
export type PersonalityReaction = PersonalityReactionConversation
// Condition to check if a reaction may happen, returning involved ships (order is important)
export type ReactionCondition = (player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null) => Ship[]
// Reaction profile, giving a probability for types of personality, and an associated reaction constructor
export type ReactionProfile = [(traits: IPersonalityTraits) => number, (ships: Ship[]) => PersonalityReaction]
// Reaction config (condition, chance, profiles)
export type ReactionConfig = [ReactionCondition, number, ReactionProfile[]]
// Pool of reaction config
export type ReactionPool = { [code: string]: ReactionConfig }
/**
* Reactions to external events according to personalities.
*
* This allows for a more "alive" world, as characters tend to speak to react to events.
*
* This object will store the previous reactions to avoid too much recurrence, and should be global to a whole
* game session.
*/
export class PersonalityReactions {
done: string[] = []
random = RandomGenerator.global
/**
* Check for a reaction.
*
* This will return a reaction to display, and add it to the done list
*/ | let codes = difference(keys(pool), this.done);
let candidates = nna(codes.map((code: string): [string, Ship[], ReactionProfile[]] | null => {
let [condition, chance, profiles] = pool[code];
if (this.random.random() <= chance) {
let involved = condition(player, battle, ship, event);
if (involved.length > 0) {
return [code, involved, profiles];
} else {
return null;
}
} else {
return null;
}
}));
if (candidates.length > 0) {
let [code, involved, profiles] = this.random.choice(candidates);
let primary = involved[0];
let weights = profiles.map(([evaluator, _]) => evaluator(primary.personality));
let action_number = this.random.weighted(weights);
if (action_number >= 0) {
this.done.push(code);
let reaction_constructor = profiles[action_number][1];
return reaction_constructor(involved);
} else {
return null;
}
} else {
return null;
}
}
}
/**
* One kind of personality reaction: saying something out loud
*/
export class PersonalityReactionConversation {
messages: { interlocutor: Ship, message: string }[]
constructor(messages: { interlocutor: Ship, message: string }[]) {
this.messages = messages;
}
}
/**
* Standard reaction pool
*/
export const BUILTIN_REACTION_POOL: ReactionPool = {
friendly_fire: [cond_friendly_fire, 1, [
[traits => 1, ships => new PersonalityReactionConversation([
{ interlocutor: ships[0], message: "Hey !!! Watch where you're shooting !" },
{ interlocutor: ships[1], message: "Sorry mate..." },
])]
]]
}
/**
* Check for a friendly fire condition (one of player's ships fired on another)
*/
function cond_friendly_fire(player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null): Ship[] {
if (battle && ship && event) {
if (event instanceof ShipDamageDiff && player.is(ship.fleet.player) && !ship.is(event.ship_id)) {
let hurt = battle.getShip(event.ship_id);
return (hurt && player.is(hurt.fleet.player)) ? [hurt, ship] : [];
} else {
return [];
}
} else {
return [];
}
}
} | check(player: Player, battle: Battle | null = null, ship: Ship | null = null, event: BaseBattleDiff | null = null, pool: ReactionPool = BUILTIN_REACTION_POOL): PersonalityReaction | null { | random_line_split |
PersonalityReactions.ts | module TK.SpaceTac {
// Reaction triggered
export type PersonalityReaction = PersonalityReactionConversation
// Condition to check if a reaction may happen, returning involved ships (order is important)
export type ReactionCondition = (player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null) => Ship[]
// Reaction profile, giving a probability for types of personality, and an associated reaction constructor
export type ReactionProfile = [(traits: IPersonalityTraits) => number, (ships: Ship[]) => PersonalityReaction]
// Reaction config (condition, chance, profiles)
export type ReactionConfig = [ReactionCondition, number, ReactionProfile[]]
// Pool of reaction config
export type ReactionPool = { [code: string]: ReactionConfig }
/**
* Reactions to external events according to personalities.
*
* This allows for a more "alive" world, as characters tend to speak to react to events.
*
* This object will store the previous reactions to avoid too much recurrence, and should be global to a whole
* game session.
*/
export class PersonalityReactions {
done: string[] = []
random = RandomGenerator.global
/**
* Check for a reaction.
*
* This will return a reaction to display, and add it to the done list
*/
check(player: Player, battle: Battle | null = null, ship: Ship | null = null, event: BaseBattleDiff | null = null, pool: ReactionPool = BUILTIN_REACTION_POOL): PersonalityReaction | null {
let codes = difference(keys(pool), this.done);
let candidates = nna(codes.map((code: string): [string, Ship[], ReactionProfile[]] | null => {
let [condition, chance, profiles] = pool[code];
if (this.random.random() <= chance) {
let involved = condition(player, battle, ship, event);
if (involved.length > 0) {
return [code, involved, profiles];
} else {
return null;
}
} else {
return null;
}
}));
if (candidates.length > 0) {
let [code, involved, profiles] = this.random.choice(candidates);
let primary = involved[0];
let weights = profiles.map(([evaluator, _]) => evaluator(primary.personality));
let action_number = this.random.weighted(weights);
if (action_number >= 0) {
this.done.push(code);
let reaction_constructor = profiles[action_number][1];
return reaction_constructor(involved);
} else {
return null;
}
} else {
return null;
}
}
}
/**
* One kind of personality reaction: saying something out loud
*/
export class PersonalityReactionConversation {
messages: { interlocutor: Ship, message: string }[]
| (messages: { interlocutor: Ship, message: string }[]) {
this.messages = messages;
}
}
/**
* Standard reaction pool
*/
export const BUILTIN_REACTION_POOL: ReactionPool = {
friendly_fire: [cond_friendly_fire, 1, [
[traits => 1, ships => new PersonalityReactionConversation([
{ interlocutor: ships[0], message: "Hey !!! Watch where you're shooting !" },
{ interlocutor: ships[1], message: "Sorry mate..." },
])]
]]
}
/**
* Check for a friendly fire condition (one of player's ships fired on another)
*/
function cond_friendly_fire(player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null): Ship[] {
if (battle && ship && event) {
if (event instanceof ShipDamageDiff && player.is(ship.fleet.player) && !ship.is(event.ship_id)) {
let hurt = battle.getShip(event.ship_id);
return (hurt && player.is(hurt.fleet.player)) ? [hurt, ship] : [];
} else {
return [];
}
} else {
return [];
}
}
}
| constructor | identifier_name |
PersonalityReactions.ts | module TK.SpaceTac {
// Reaction triggered
export type PersonalityReaction = PersonalityReactionConversation
// Condition to check if a reaction may happen, returning involved ships (order is important)
export type ReactionCondition = (player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null) => Ship[]
// Reaction profile, giving a probability for types of personality, and an associated reaction constructor
export type ReactionProfile = [(traits: IPersonalityTraits) => number, (ships: Ship[]) => PersonalityReaction]
// Reaction config (condition, chance, profiles)
export type ReactionConfig = [ReactionCondition, number, ReactionProfile[]]
// Pool of reaction config
export type ReactionPool = { [code: string]: ReactionConfig }
/**
* Reactions to external events according to personalities.
*
* This allows for a more "alive" world, as characters tend to speak to react to events.
*
* This object will store the previous reactions to avoid too much recurrence, and should be global to a whole
* game session.
*/
export class PersonalityReactions {
done: string[] = []
random = RandomGenerator.global
/**
* Check for a reaction.
*
* This will return a reaction to display, and add it to the done list
*/
check(player: Player, battle: Battle | null = null, ship: Ship | null = null, event: BaseBattleDiff | null = null, pool: ReactionPool = BUILTIN_REACTION_POOL): PersonalityReaction | null {
let codes = difference(keys(pool), this.done);
let candidates = nna(codes.map((code: string): [string, Ship[], ReactionProfile[]] | null => {
let [condition, chance, profiles] = pool[code];
if (this.random.random() <= chance) {
let involved = condition(player, battle, ship, event);
if (involved.length > 0) {
return [code, involved, profiles];
} else {
return null;
}
} else {
return null;
}
}));
if (candidates.length > 0) {
let [code, involved, profiles] = this.random.choice(candidates);
let primary = involved[0];
let weights = profiles.map(([evaluator, _]) => evaluator(primary.personality));
let action_number = this.random.weighted(weights);
if (action_number >= 0) {
this.done.push(code);
let reaction_constructor = profiles[action_number][1];
return reaction_constructor(involved);
} else {
return null;
}
} else {
return null;
}
}
}
/**
* One kind of personality reaction: saying something out loud
*/
export class PersonalityReactionConversation {
messages: { interlocutor: Ship, message: string }[]
constructor(messages: { interlocutor: Ship, message: string }[]) {
this.messages = messages;
}
}
/**
* Standard reaction pool
*/
export const BUILTIN_REACTION_POOL: ReactionPool = {
friendly_fire: [cond_friendly_fire, 1, [
[traits => 1, ships => new PersonalityReactionConversation([
{ interlocutor: ships[0], message: "Hey !!! Watch where you're shooting !" },
{ interlocutor: ships[1], message: "Sorry mate..." },
])]
]]
}
/**
* Check for a friendly fire condition (one of player's ships fired on another)
*/
function cond_friendly_fire(player: Player, battle: Battle | null, ship: Ship | null, event: BaseBattleDiff | null): Ship[] {
if (battle && ship && event) | else {
return [];
}
}
}
| {
if (event instanceof ShipDamageDiff && player.is(ship.fleet.player) && !ship.is(event.ship_id)) {
let hurt = battle.getShip(event.ship_id);
return (hurt && player.is(hurt.fleet.player)) ? [hurt, ship] : [];
} else {
return [];
}
} | conditional_block |
register-device.js | /**
* Copyright 2019, Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const functions = require('firebase-functions');
const { google } = require('googleapis');
const { firestore } = require('../admin');
/**
* Return a Promise to obtain the device from Cloud IoT Core
*/
function getDevice(client, deviceId) {
return new Promise((resolve, reject) => {
const projectId = process.env.GCLOUD_PROJECT;
const parentName = `projects/${projectId}/locations/${functions.config().cloudiot.region}`;
const registryName = `${parentName}/registries/${functions.config().cloudiot.registry}`;
const request = {
name: `${registryName}/devices/${deviceId}`
};
client.projects.locations.registries.devices.get(request, (err, resp) => {
if (err) {
return reject(err);
} else {
resolve(resp.data);
}
});
});
}
/**
* Validate that the public key provided by the pending device matches
* the key currently stored in IoT Core for that device id.
*
* Method throws an error if the keys do not match.
*/
function verifyDeviceKey(pendingDevice, deviceKey) |
/**
* Cloud Function: Verify IoT device and add to user
*/
module.exports = functions.firestore.document('pending/{device}').onWrite(async (change, context) => {
const deviceId = context.params.device;
// Verify this is either a create or update
if (!change.after.exists) {
console.log(`Pending device removed for ${deviceId}`);
return;
}
console.log(`Pending device created for ${deviceId}`);
const pending = change.after.data();
// Create a new Cloud IoT client
const auth = await google.auth.getClient({
scopes: ['https://www.googleapis.com/auth/cloud-platform']
});
const client = google.cloudiot({
version: 'v1',
auth: auth
});
try {
// Verify device does NOT already exist in Firestore
const deviceRef = firestore.doc(`devices/${deviceId}`);
const deviceDoc = await deviceRef.get();
if (deviceDoc.exists) throw new Error(`${deviceId} is already registered to another user`);
// Verify device exists in IoT Core
const result = await getDevice(client, deviceId);
// Verify the device public key
verifyDeviceKey(pending, result.credentials[0].publicKey.key.trim());
// Verify the device type
let configValue = null;
switch (pending.type) {
case 'light':
configValue = require('./default-light.json');
break;
case 'thermostat':
configValue = require('./default-thermostat.json');
break;
default:
throw new Error(`Invalid device type found in ${deviceId}: ${pending.type}`);
}
// Commit the following changes together
const batch = firestore.batch();
// Insert valid device for the requested owner
const device = {
name: pending.serial_number,
owner: pending.owner,
type: pending.type,
online: false
};
batch.set(deviceRef, device);
// Generate a default configuration
const configRef = firestore.doc(`device-configs/${deviceId}`);
const config = {
owner: pending.owner,
value: configValue
};
batch.set(configRef, config);
// Remove the pending device entry
batch.delete(change.after.ref);
await batch.commit();
console.log(`Added device ${deviceId} for user ${pending.owner}`);
} catch (error) {
// Device does not exist in IoT Core or key doesn't match
console.error('Unable to register new device', error);
}
});
| {
// Convert the pending key into PEM format
const chunks = pendingDevice.public_key.match(/(.{1,64})/g);
chunks.unshift('-----BEGIN PUBLIC KEY-----');
chunks.push('-----END PUBLIC KEY-----');
const pendingKey = chunks.join('\n');
if (deviceKey !== pendingKey) throw new Error(`Public Key Mismatch:\nExpected: ${deviceKey}\nReceived: ${pendingKey}`);
} | identifier_body |
register-device.js | /**
* Copyright 2019, Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const functions = require('firebase-functions');
const { google } = require('googleapis');
const { firestore } = require('../admin');
/**
* Return a Promise to obtain the device from Cloud IoT Core | const projectId = process.env.GCLOUD_PROJECT;
const parentName = `projects/${projectId}/locations/${functions.config().cloudiot.region}`;
const registryName = `${parentName}/registries/${functions.config().cloudiot.registry}`;
const request = {
name: `${registryName}/devices/${deviceId}`
};
client.projects.locations.registries.devices.get(request, (err, resp) => {
if (err) {
return reject(err);
} else {
resolve(resp.data);
}
});
});
}
/**
* Validate that the public key provided by the pending device matches
* the key currently stored in IoT Core for that device id.
*
* Method throws an error if the keys do not match.
*/
function verifyDeviceKey(pendingDevice, deviceKey) {
// Convert the pending key into PEM format
const chunks = pendingDevice.public_key.match(/(.{1,64})/g);
chunks.unshift('-----BEGIN PUBLIC KEY-----');
chunks.push('-----END PUBLIC KEY-----');
const pendingKey = chunks.join('\n');
if (deviceKey !== pendingKey) throw new Error(`Public Key Mismatch:\nExpected: ${deviceKey}\nReceived: ${pendingKey}`);
}
/**
* Cloud Function: Verify IoT device and add to user
*/
module.exports = functions.firestore.document('pending/{device}').onWrite(async (change, context) => {
const deviceId = context.params.device;
// Verify this is either a create or update
if (!change.after.exists) {
console.log(`Pending device removed for ${deviceId}`);
return;
}
console.log(`Pending device created for ${deviceId}`);
const pending = change.after.data();
// Create a new Cloud IoT client
const auth = await google.auth.getClient({
scopes: ['https://www.googleapis.com/auth/cloud-platform']
});
const client = google.cloudiot({
version: 'v1',
auth: auth
});
try {
// Verify device does NOT already exist in Firestore
const deviceRef = firestore.doc(`devices/${deviceId}`);
const deviceDoc = await deviceRef.get();
if (deviceDoc.exists) throw new Error(`${deviceId} is already registered to another user`);
// Verify device exists in IoT Core
const result = await getDevice(client, deviceId);
// Verify the device public key
verifyDeviceKey(pending, result.credentials[0].publicKey.key.trim());
// Verify the device type
let configValue = null;
switch (pending.type) {
case 'light':
configValue = require('./default-light.json');
break;
case 'thermostat':
configValue = require('./default-thermostat.json');
break;
default:
throw new Error(`Invalid device type found in ${deviceId}: ${pending.type}`);
}
// Commit the following changes together
const batch = firestore.batch();
// Insert valid device for the requested owner
const device = {
name: pending.serial_number,
owner: pending.owner,
type: pending.type,
online: false
};
batch.set(deviceRef, device);
// Generate a default configuration
const configRef = firestore.doc(`device-configs/${deviceId}`);
const config = {
owner: pending.owner,
value: configValue
};
batch.set(configRef, config);
// Remove the pending device entry
batch.delete(change.after.ref);
await batch.commit();
console.log(`Added device ${deviceId} for user ${pending.owner}`);
} catch (error) {
// Device does not exist in IoT Core or key doesn't match
console.error('Unable to register new device', error);
}
}); | */
function getDevice(client, deviceId) {
return new Promise((resolve, reject) => { | random_line_split |
register-device.js | /**
* Copyright 2019, Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const functions = require('firebase-functions');
const { google } = require('googleapis');
const { firestore } = require('../admin');
/**
* Return a Promise to obtain the device from Cloud IoT Core
*/
function getDevice(client, deviceId) {
return new Promise((resolve, reject) => {
const projectId = process.env.GCLOUD_PROJECT;
const parentName = `projects/${projectId}/locations/${functions.config().cloudiot.region}`;
const registryName = `${parentName}/registries/${functions.config().cloudiot.registry}`;
const request = {
name: `${registryName}/devices/${deviceId}`
};
client.projects.locations.registries.devices.get(request, (err, resp) => {
if (err) {
return reject(err);
} else {
resolve(resp.data);
}
});
});
}
/**
* Validate that the public key provided by the pending device matches
* the key currently stored in IoT Core for that device id.
*
* Method throws an error if the keys do not match.
*/
function | (pendingDevice, deviceKey) {
// Convert the pending key into PEM format
const chunks = pendingDevice.public_key.match(/(.{1,64})/g);
chunks.unshift('-----BEGIN PUBLIC KEY-----');
chunks.push('-----END PUBLIC KEY-----');
const pendingKey = chunks.join('\n');
if (deviceKey !== pendingKey) throw new Error(`Public Key Mismatch:\nExpected: ${deviceKey}\nReceived: ${pendingKey}`);
}
/**
* Cloud Function: Verify IoT device and add to user
*/
module.exports = functions.firestore.document('pending/{device}').onWrite(async (change, context) => {
const deviceId = context.params.device;
// Verify this is either a create or update
if (!change.after.exists) {
console.log(`Pending device removed for ${deviceId}`);
return;
}
console.log(`Pending device created for ${deviceId}`);
const pending = change.after.data();
// Create a new Cloud IoT client
const auth = await google.auth.getClient({
scopes: ['https://www.googleapis.com/auth/cloud-platform']
});
const client = google.cloudiot({
version: 'v1',
auth: auth
});
try {
// Verify device does NOT already exist in Firestore
const deviceRef = firestore.doc(`devices/${deviceId}`);
const deviceDoc = await deviceRef.get();
if (deviceDoc.exists) throw new Error(`${deviceId} is already registered to another user`);
// Verify device exists in IoT Core
const result = await getDevice(client, deviceId);
// Verify the device public key
verifyDeviceKey(pending, result.credentials[0].publicKey.key.trim());
// Verify the device type
let configValue = null;
switch (pending.type) {
case 'light':
configValue = require('./default-light.json');
break;
case 'thermostat':
configValue = require('./default-thermostat.json');
break;
default:
throw new Error(`Invalid device type found in ${deviceId}: ${pending.type}`);
}
// Commit the following changes together
const batch = firestore.batch();
// Insert valid device for the requested owner
const device = {
name: pending.serial_number,
owner: pending.owner,
type: pending.type,
online: false
};
batch.set(deviceRef, device);
// Generate a default configuration
const configRef = firestore.doc(`device-configs/${deviceId}`);
const config = {
owner: pending.owner,
value: configValue
};
batch.set(configRef, config);
// Remove the pending device entry
batch.delete(change.after.ref);
await batch.commit();
console.log(`Added device ${deviceId} for user ${pending.owner}`);
} catch (error) {
// Device does not exist in IoT Core or key doesn't match
console.error('Unable to register new device', error);
}
});
| verifyDeviceKey | identifier_name |
__init__.py | import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template
from flask_login import LoginManager
from flask_restful import Api
from flask_wtf.csrf import CsrfProtect
from itsdangerous import URLSafeTimedSerializer
from sqlalchemy import create_engine
import AppConfig
from RestResources.Resources import PostsList, Posts
from services.Services import UserService
from views import Login, Common, Post, Admin
app = Flask(__name__)
CsrfProtect(app)
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# set the secret key. keep this really secret:
app.secret_key = AppConfig.APPSECRETKEY
def register_mods():
app.register_blueprint(Common.mod)
app.register_blueprint(Login.mod)
app.register_blueprint(Post.mod)
app.register_blueprint(Admin.mod)
def create_db_engine():
return create_engine(AppConfig.CONNECTIONSTRING, pool_recycle=3600, echo=True)
def | ():
AppConfig.DBENGINE = create_db_engine()
def init_login():
login_manager = LoginManager()
login_manager.init_app(app)
AppConfig.LOGINMANAGER = login_manager
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return UserService().getAll().filter_by(id=user_id).first()
@login_manager.token_loader
def get_user_token(token):
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
userService = UserService()
#Find the User
user = userService.getById(data[0])
#Check Password and return user or None
if user and userService.validate(user.username, user.password):
return user
return None
def init_logger():
handler = RotatingFileHandler('FlaskTest.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def register_rest_api():
return Api(app)
def register_rest_resources():
api.add_resource(PostsList, '/api/posts')
api.add_resource(Posts, '/api/posts/<string:post_id>')
def set_app_configuration():
app.config['REMEMBER_COOKIE_DURATION'] = AppConfig.REMEMBER_COOKIE_DURATION
register_mods()
api = register_rest_api()
register_rest_resources()
build_db_engine()
init_login()
init_logger()
set_app_configuration()
app.run(AppConfig.APPHOST, AppConfig.APPPORT) | build_db_engine | identifier_name |
__init__.py | import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template
from flask_login import LoginManager
from flask_restful import Api
from flask_wtf.csrf import CsrfProtect
from itsdangerous import URLSafeTimedSerializer
from sqlalchemy import create_engine
import AppConfig
from RestResources.Resources import PostsList, Posts
from services.Services import UserService
from views import Login, Common, Post, Admin
app = Flask(__name__)
CsrfProtect(app)
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# set the secret key. keep this really secret:
app.secret_key = AppConfig.APPSECRETKEY
def register_mods():
app.register_blueprint(Common.mod)
app.register_blueprint(Login.mod)
app.register_blueprint(Post.mod)
app.register_blueprint(Admin.mod)
def create_db_engine():
return create_engine(AppConfig.CONNECTIONSTRING, pool_recycle=3600, echo=True)
def build_db_engine():
AppConfig.DBENGINE = create_db_engine()
def init_login():
login_manager = LoginManager()
login_manager.init_app(app)
AppConfig.LOGINMANAGER = login_manager
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return UserService().getAll().filter_by(id=user_id).first()
@login_manager.token_loader
def get_user_token(token):
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
userService = UserService()
#Find the User
user = userService.getById(data[0])
#Check Password and return user or None
if user and userService.validate(user.username, user.password):
return user
return None
def init_logger():
handler = RotatingFileHandler('FlaskTest.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def register_rest_api():
return Api(app)
def register_rest_resources():
api.add_resource(PostsList, '/api/posts')
api.add_resource(Posts, '/api/posts/<string:post_id>')
def set_app_configuration():
|
register_mods()
api = register_rest_api()
register_rest_resources()
build_db_engine()
init_login()
init_logger()
set_app_configuration()
app.run(AppConfig.APPHOST, AppConfig.APPPORT) | app.config['REMEMBER_COOKIE_DURATION'] = AppConfig.REMEMBER_COOKIE_DURATION | identifier_body |
__init__.py | import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template
from flask_login import LoginManager
from flask_restful import Api
from flask_wtf.csrf import CsrfProtect
from itsdangerous import URLSafeTimedSerializer
from sqlalchemy import create_engine
import AppConfig
from RestResources.Resources import PostsList, Posts
from services.Services import UserService
from views import Login, Common, Post, Admin
app = Flask(__name__)
CsrfProtect(app) |
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# set the secret key. keep this really secret:
app.secret_key = AppConfig.APPSECRETKEY
def register_mods():
app.register_blueprint(Common.mod)
app.register_blueprint(Login.mod)
app.register_blueprint(Post.mod)
app.register_blueprint(Admin.mod)
def create_db_engine():
return create_engine(AppConfig.CONNECTIONSTRING, pool_recycle=3600, echo=True)
def build_db_engine():
AppConfig.DBENGINE = create_db_engine()
def init_login():
login_manager = LoginManager()
login_manager.init_app(app)
AppConfig.LOGINMANAGER = login_manager
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return UserService().getAll().filter_by(id=user_id).first()
@login_manager.token_loader
def get_user_token(token):
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
userService = UserService()
#Find the User
user = userService.getById(data[0])
#Check Password and return user or None
if user and userService.validate(user.username, user.password):
return user
return None
def init_logger():
handler = RotatingFileHandler('FlaskTest.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def register_rest_api():
return Api(app)
def register_rest_resources():
api.add_resource(PostsList, '/api/posts')
api.add_resource(Posts, '/api/posts/<string:post_id>')
def set_app_configuration():
app.config['REMEMBER_COOKIE_DURATION'] = AppConfig.REMEMBER_COOKIE_DURATION
register_mods()
api = register_rest_api()
register_rest_resources()
build_db_engine()
init_login()
init_logger()
set_app_configuration()
app.run(AppConfig.APPHOST, AppConfig.APPPORT) |
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
| random_line_split |
__init__.py | import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template
from flask_login import LoginManager
from flask_restful import Api
from flask_wtf.csrf import CsrfProtect
from itsdangerous import URLSafeTimedSerializer
from sqlalchemy import create_engine
import AppConfig
from RestResources.Resources import PostsList, Posts
from services.Services import UserService
from views import Login, Common, Post, Admin
app = Flask(__name__)
CsrfProtect(app)
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# set the secret key. keep this really secret:
app.secret_key = AppConfig.APPSECRETKEY
def register_mods():
app.register_blueprint(Common.mod)
app.register_blueprint(Login.mod)
app.register_blueprint(Post.mod)
app.register_blueprint(Admin.mod)
def create_db_engine():
return create_engine(AppConfig.CONNECTIONSTRING, pool_recycle=3600, echo=True)
def build_db_engine():
AppConfig.DBENGINE = create_db_engine()
def init_login():
login_manager = LoginManager()
login_manager.init_app(app)
AppConfig.LOGINMANAGER = login_manager
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return UserService().getAll().filter_by(id=user_id).first()
@login_manager.token_loader
def get_user_token(token):
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
userService = UserService()
#Find the User
user = userService.getById(data[0])
#Check Password and return user or None
if user and userService.validate(user.username, user.password):
|
return None
def init_logger():
handler = RotatingFileHandler('FlaskTest.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def register_rest_api():
return Api(app)
def register_rest_resources():
api.add_resource(PostsList, '/api/posts')
api.add_resource(Posts, '/api/posts/<string:post_id>')
def set_app_configuration():
app.config['REMEMBER_COOKIE_DURATION'] = AppConfig.REMEMBER_COOKIE_DURATION
register_mods()
api = register_rest_api()
register_rest_resources()
build_db_engine()
init_login()
init_logger()
set_app_configuration()
app.run(AppConfig.APPHOST, AppConfig.APPPORT) | return user | conditional_block |
lc003-longest-substring-without-repeating-characters.py | # coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear()
else:
cache[s[pos]] = pos
pos += 1
val = max(val, len(cache))
return val
def | (self):
self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1)
if __name__ == "__main__":
unittest.main()
| test | identifier_name |
lc003-longest-substring-without-repeating-characters.py | # coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear()
else:
cache[s[pos]] = pos | return val
def test(self):
self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1)
if __name__ == "__main__":
unittest.main() | pos += 1
val = max(val, len(cache)) | random_line_split |
lc003-longest-substring-without-repeating-characters.py | # coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
|
else:
cache[s[pos]] = pos
pos += 1
val = max(val, len(cache))
return val
def test(self):
self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1)
if __name__ == "__main__":
unittest.main()
| pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear() | conditional_block |
lc003-longest-substring-without-repeating-characters.py | # coding=utf-8
import unittest
"""3. Longest Substring Without Repeating Characters
https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
Given a string, find the length of the **longest substring** without repeating
characters.
**Examples:**
Given `"abcabcbb"`, the answer is `"abc"`, which the length is 3.
Given `"bbbbb"`, the answer is `"b"`, with the length of 1.
Given `"pwwkew"`, the answer is `"wke"`, with the length of 3. Note that the
answer must be a **substring** , `"pwke"` is a _subsequence_ and not a
substring.
Similar Questions:
Longest Substring with At Most Two Distinct Characters (longest-substring-with-at-most-two-distinct-characters)
"""
class Solution(unittest.TestCase):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
cache = {}
val, pos = 0, 0
while pos < len(s):
if s[pos] in cache:
pos = cache[s[pos]] + 1
val = max(val, len(cache))
cache.clear()
else:
cache[s[pos]] = pos
pos += 1
val = max(val, len(cache))
return val
def test(self):
|
if __name__ == "__main__":
unittest.main()
| self.assertEqual(self.lengthOfLongestSubstring("abcabcbb"), 3)
self.assertEqual(self.lengthOfLongestSubstring("bbbbb"), 1)
self.assertEqual(self.lengthOfLongestSubstring("pwwkew"), 3)
self.assertEqual(self.lengthOfLongestSubstring("c"), 1) | identifier_body |
mail.py | """
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def valid_email(email):
import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$")
return reg.match(email)
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'], | server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
server.login(domain['site_email_service_username'], domain['site_email_service_password'])
server.sendmail(domain['site_email_from'], destmail, msg)
server.close() | domain['site_email_service_port']) | random_line_split |
mail.py | """
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def valid_email(email):
|
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'],
domain['site_email_service_port'])
server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
server.login(domain['site_email_service_username'], domain['site_email_service_password'])
server.sendmail(domain['site_email_from'], destmail, msg)
server.close() | import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$")
return reg.match(email) | identifier_body |
mail.py | """
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def valid_email(email):
import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$")
return reg.match(email)
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'],
domain['site_email_service_port'])
server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
|
server.sendmail(domain['site_email_from'], destmail, msg)
server.close() | server.login(domain['site_email_service_username'], domain['site_email_service_password']) | conditional_block |
mail.py | """
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def | (email):
import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$")
return reg.match(email)
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'],
domain['site_email_service_port'])
server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
server.login(domain['site_email_service_username'], domain['site_email_service_password'])
server.sendmail(domain['site_email_from'], destmail, msg)
server.close() | valid_email | identifier_name |
api.ts | import passport from 'passport';
import app from 'server/server';
import { getPermissions } from './permissions';
import { createUser, updateUser } from './queries';
const getRequestIds = (req) => {
const user = req.user || {};
return {
userId: user.id,
submittedUserId: req.body.userId,
email: req.body.email ? req.body.email.toLowerCase().trim() : null,
hash: req.body.hash || null,
};
};
app.post('/api/users', (req, res) => {
const requestIds = getRequestIds(req);
getPermissions(requestIds)
.then((permissions) => {
if (!permissions.create) {
throw new Error('Not Authorized');
}
return createUser(req.body);
})
.then((newUser) => {
passport.authenticate('local')(req, res, () => {
return res.status(201).json(newUser);
});
})
.catch((err) => {
console.error('Error in postUser: ', err);
return res.status(500).json(err.message);
});
});
app.put('/api/users', (req, res) => {
getPermissions(getRequestIds(req))
.then((permissions) => {
if (!permissions.update) {
throw new Error('Not Authorized'); | return updateUser(req.body, permissions.update, req);
})
.then((updatedValues) => {
return res.status(201).json(updatedValues);
})
.catch((err) => {
console.error('Error in putUser: ', err);
return res.status(500).json(err.message);
});
}); | } | random_line_split |
api.ts | import passport from 'passport';
import app from 'server/server';
import { getPermissions } from './permissions';
import { createUser, updateUser } from './queries';
const getRequestIds = (req) => {
const user = req.user || {};
return {
userId: user.id,
submittedUserId: req.body.userId,
email: req.body.email ? req.body.email.toLowerCase().trim() : null,
hash: req.body.hash || null,
};
};
app.post('/api/users', (req, res) => {
const requestIds = getRequestIds(req);
getPermissions(requestIds)
.then((permissions) => {
if (!permissions.create) {
throw new Error('Not Authorized');
}
return createUser(req.body);
})
.then((newUser) => {
passport.authenticate('local')(req, res, () => {
return res.status(201).json(newUser);
});
})
.catch((err) => {
console.error('Error in postUser: ', err);
return res.status(500).json(err.message);
});
});
app.put('/api/users', (req, res) => {
getPermissions(getRequestIds(req))
.then((permissions) => {
if (!permissions.update) |
return updateUser(req.body, permissions.update, req);
})
.then((updatedValues) => {
return res.status(201).json(updatedValues);
})
.catch((err) => {
console.error('Error in putUser: ', err);
return res.status(500).json(err.message);
});
});
| {
throw new Error('Not Authorized');
} | conditional_block |
app_e2e-spec.ts | import {browser, by, element, ExpectedConditions} from 'protractor';
// This test uses Protractor without Angular, so disable Angular features
browser.waitForAngularEnabled(false);
// Since we don't have a protractor bazel rule yet, the test is brought up in
// parallel with building the service under test. So the timeout must include
// compiling the application as well as starting the server.
const timeoutMs = 90 * 1000; | browser.get('');
// Don't run any specs until we see a <div> on the page.
browser.wait(ExpectedConditions.presenceOf(element(by.css('div.ts1'))), timeoutMs);
browser.wait(ExpectedConditions.presenceOf(element(by.css('div.ts2'))), timeoutMs);
browser.wait(ExpectedConditions.presenceOf(element(by.css('div.ts3'))), timeoutMs);
}, timeoutMs);
it('should display: Hello, TypeScript', async (done) => {
const text = await element(by.css('div.ts1')).getText();
expect(text).toEqual(`Hello, TypeScript`);
done();
});
it('should display: Hello, genrule', async (done) => {
const text = await element(by.css('div.ts2')).getText();
expect(text).toEqual(`Hello, genrule`);
done();
});
it('should display: location.host', async (done) => {
const currentUrl = await browser.getCurrentUrl();
const text = await element(by.css('div.ts3')).getText();
expect(`http://${text}/`).toEqual(currentUrl);
done();
});
}); |
describe('app', () => {
beforeAll(() => { | random_line_split |
thickbox.js | /*
* Thickbox 3.1 - One Box To Rule Them All.
* By Cody Lindley (http://www.codylindley.com)
* Copyright (c) 2007 cody lindley
* Licensed under the MIT License: http://www.opensource.org/licenses/mit-license.php
*/
$.noConflict();
// imagepath = images/thickbox/loadingAnimation.gif;
var tb_pathToImage = " ";
/*!!!!!!!!!!!!!!!!! edit below this line at your own risk !!!!!!!!!!!!!!!!!!!!!!!*/
//on page load call tb_init
jQuery(document).ready(function($){
tb_init('a.thickbox, area.thickbox, input.thickbox');//pass where to apply thickbox
imgLoader = new Image();// preload image
imgLoader.src = tb_pathToImage;
});
//add thickbox to href & area elements that have a class of .thickbox
function tb_init(domChunk){
jQuery(domChunk).click(function($){
var t = this.title || this.name || null;
var a = this.href || this.alt;
var g = this.rel || false;
tb_show(t,a,g);
this.blur();
return false;
});
}
function tb_show(caption, url, imageGroup) {//function called when the user clicks on a thickbox link
try {
if (typeof document.body.style.maxHeight === "undefined") {//if IE 6
$("body","html").css({height: "100%", width: "100%"});
$("html").css("overflow","hidden");
if (document.getElementById("TB_HideSelect") === null) {//iframe to hide select elements in ie6
$("body").append("<iframe id='TB_HideSelect'></iframe><div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}else{//all others
if(document.getElementById("TB_overlay") === null){
$("body").append("<div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}
if(tb_detectMacXFF()){
$("#TB_overlay").addClass("TB_overlayMacFFBGHack");//use png overlay so hide flash
}else{
$("#TB_overlay").addClass("TB_overlayBG");//use background and opacity
}
if(caption===null){caption="";}
$("body").append("<div id='TB_load'><img src='"+imgLoader.src+"' /></div>");//add loader to the page
$('#TB_load').show();//show loader
var baseURL;
if(url.indexOf("?")!==-1){ //ff there is a query string involved
baseURL = url.substr(0, url.indexOf("?"));
}else{
baseURL = url;
}
var urlString = /\.jpg$|\.jpeg$|\.png$|\.gif$|\.bmp$/;
var urlType = baseURL.toLowerCase().match(urlString);
if(urlType == '.jpg' || urlType == '.jpeg' || urlType == '.png' || urlType == '.gif' || urlType == '.bmp'){//code to show images
TB_PrevCaption = "";
TB_PrevURL = "";
TB_PrevHTML = "";
TB_NextCaption = "";
TB_NextURL = "";
TB_NextHTML = "";
TB_imageCount = "";
TB_FoundURL = false;
if(imageGroup){
TB_TempArray = $("a[@rel="+imageGroup+"]").get();
for (TB_Counter = 0; ((TB_Counter < TB_TempArray.length) && (TB_NextHTML === "")); TB_Counter++) {
var urlTypeTemp = TB_TempArray[TB_Counter].href.toLowerCase().match(urlString);
if (!(TB_TempArray[TB_Counter].href == url)) {
if (TB_FoundURL) {
TB_NextCaption = TB_TempArray[TB_Counter].title;
TB_NextURL = TB_TempArray[TB_Counter].href;
TB_NextHTML = "<span id='TB_next'> <a href='#'>Next ></a></span>";
} else {
TB_PrevCaption = TB_TempArray[TB_Counter].title;
TB_PrevURL = TB_TempArray[TB_Counter].href;
TB_PrevHTML = "<span id='TB_prev'> <a href='#'>< Prev</a></span>";
}
} else {
TB_FoundURL = true;
TB_imageCount = "Image " + (TB_Counter + 1) +" of "+ (TB_TempArray.length);
}
}
}
imgPreloader = new Image();
imgPreloader.onload = function(){
imgPreloader.onload = null;
// Resizing large images - orginal by Christian Montoya edited by me.
var pagesize = tb_getPageSize();
var x = pagesize[0] - 150;
var y = pagesize[1] - 150;
var imageWidth = imgPreloader.width;
var imageHeight = imgPreloader.height;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
}
} else if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
}
}
// End Resizing
TB_WIDTH = imageWidth + 30;
TB_HEIGHT = imageHeight + 60;
$("#TB_window").append("<a href='' id='TB_ImageOff' title='Close'><img id='TB_Image' src='"+url+"' width='"+imageWidth+"' height='"+imageHeight+"' alt='"+caption+"'/></a>" + "<div id='TB_caption'>"+caption+"<div id='TB_secondLine'>" + TB_imageCount + TB_PrevHTML + TB_NextHTML + "</div></div><div id='TB_closeWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div>");
$("#TB_closeWindowButton").click(tb_remove);
if (!(TB_PrevHTML === "")) {
function goPrev(){
if($(document).unbind("click",goPrev)){$(document).unbind("click",goPrev);}
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_PrevCaption, TB_PrevURL, imageGroup);
return false;
}
$("#TB_prev").click(goPrev);
}
if (!(TB_NextHTML === "")) {
function goNext(){
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_NextCaption, TB_NextURL, imageGroup);
return false;
}
$("#TB_next").click(goNext);
}
document.onkeydown = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
} else if(keycode == 190){ // display previous image
if(!(TB_NextHTML == "")){
document.onkeydown = "";
goNext();
}
} else if(keycode == 188){ // display next image
if(!(TB_PrevHTML == "")){
document.onkeydown = "";
goPrev();
}
}
};
tb_position();
$("#TB_load").remove();
$("#TB_ImageOff").click(tb_remove);
$("#TB_window").css({display:"block"}); //for safari using css instead of show
};
imgPreloader.src = url;
}else{//code to show html
var queryString = url.replace(/^[^\?]+\??/,'');
var params = tb_parseQuery( queryString );
TB_WIDTH = (params['width']*1) + 30 || 630; //defaults to 630 if no paramaters were added to URL
TB_HEIGHT = (params['height']*1) + 40 || 440; //defaults to 440 if no paramaters were added to URL
ajaxContentW = TB_WIDTH - 30;
ajaxContentH = TB_HEIGHT - 45;
if(url.indexOf('TB_iframe') != -1){// either iframe or ajax window
urlNoQuery = url.split('TB_');
$("#TB_iframeContent").remove();
if(params['modal'] != "true"){//iframe no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div></div><iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;' > </iframe>");
}else{//iframe modal
$("#TB_overlay").unbind();
$("#TB_window").append("<iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;'> </iframe>");
}
}else{// not an iframe, ajax
if($("#TB_window").css("display") != "block"){
if(params['modal'] != "true"){//ajax no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton'>close</a> or Esc Key</div></div><div id='TB_ajaxContent' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px'></div>");
}else{//ajax modal
$("#TB_overlay").unbind();
$("#TB_window").append("<div id='TB_ajaxContent' class='TB_modal' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px;'></div>");
}
}else{//this means the window is already up, we are just loading new content via ajax
$("#TB_ajaxContent")[0].style.width = ajaxContentW +"px";
$("#TB_ajaxContent")[0].style.height = ajaxContentH +"px";
$("#TB_ajaxContent")[0].scrollTop = 0;
$("#TB_ajaxWindowTitle").html(caption);
}
}
$("#TB_closeWindowButton").click(tb_remove);
if(url.indexOf('TB_inline') != -1){
$("#TB_ajaxContent").append($('#' + params['inlineId']).children());
$("#TB_window").unload(function () {
$('#' + params['inlineId']).append( $("#TB_ajaxContent").children() ); // move elements back when you're finished
});
tb_position();
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}else if(url.indexOf('TB_iframe') != -1){
tb_position();
if($.browser.safari){//safari needs help because it will not fire iframe onload
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
}else{
$("#TB_ajaxContent").load(url += "&random=" + (new Date().getTime()),function(){//to do a post change this load method
tb_position();
$("#TB_load").remove();
tb_init("#TB_ajaxContent a.thickbox");
$("#TB_window").css({display:"block"});
});
}
}
if(!params['modal']){
document.onkeyup = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
}
};
}
} catch(e) {
//nothing here
}
}
//helper functions below
function tb_showIframe(){
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
function | () {
$("#TB_imageOff").unbind("click");
$("#TB_closeWindowButton").unbind("click");
$("#TB_window").fadeOut("fast",function(){$('#TB_window,#TB_overlay,#TB_HideSelect').trigger("unload").unbind().remove();});
$("#TB_load").remove();
if (typeof document.body.style.maxHeight == "undefined") {//if IE 6
$("body","html").css({height: "auto", width: "auto"});
$("html").css("overflow","");
}
document.onkeydown = "";
document.onkeyup = "";
return false;
}
function tb_position() {
$("#TB_window").css({marginLeft: '-' + parseInt((TB_WIDTH / 2),10) + 'px', width: TB_WIDTH + 'px'});
if ( !(jQuery.browser.msie && jQuery.browser.version < 7)) { // take away IE6
$("#TB_window").css({marginTop: '-' + parseInt((TB_HEIGHT / 2),10) + 'px'});
}
}
function tb_parseQuery ( query ) {
var Params = {};
if ( ! query ) {return Params;}// return empty object
var Pairs = query.split(/[;&]/);
for ( var i = 0; i < Pairs.length; i++ ) {
var KeyVal = Pairs[i].split('=');
if ( ! KeyVal || KeyVal.length != 2 ) {continue;}
var key = unescape( KeyVal[0] );
var val = unescape( KeyVal[1] );
val = val.replace(/\+/g, ' ');
Params[key] = val;
}
return Params;
}
function tb_getPageSize(){
var de = document.documentElement;
var w = window.innerWidth || self.innerWidth || (de&&de.clientWidth) || document.body.clientWidth;
var h = window.innerHeight || self.innerHeight || (de&&de.clientHeight) || document.body.clientHeight;
arrayPageSize = [w,h];
return arrayPageSize;
}
function tb_detectMacXFF() {
var userAgent = navigator.userAgent.toLowerCase();
if (userAgent.indexOf('mac') != -1 && userAgent.indexOf('firefox')!=-1) {
return true;
}
}
| tb_remove | identifier_name |
thickbox.js | /*
* Thickbox 3.1 - One Box To Rule Them All.
* By Cody Lindley (http://www.codylindley.com)
* Copyright (c) 2007 cody lindley
* Licensed under the MIT License: http://www.opensource.org/licenses/mit-license.php
*/
$.noConflict();
// imagepath = images/thickbox/loadingAnimation.gif;
var tb_pathToImage = " ";
/*!!!!!!!!!!!!!!!!! edit below this line at your own risk !!!!!!!!!!!!!!!!!!!!!!!*/
//on page load call tb_init
jQuery(document).ready(function($){
tb_init('a.thickbox, area.thickbox, input.thickbox');//pass where to apply thickbox
imgLoader = new Image();// preload image
imgLoader.src = tb_pathToImage;
});
//add thickbox to href & area elements that have a class of .thickbox
function tb_init(domChunk){
jQuery(domChunk).click(function($){
var t = this.title || this.name || null;
var a = this.href || this.alt;
var g = this.rel || false;
tb_show(t,a,g);
this.blur();
return false;
});
}
function tb_show(caption, url, imageGroup) {//function called when the user clicks on a thickbox link
try {
if (typeof document.body.style.maxHeight === "undefined") {//if IE 6
$("body","html").css({height: "100%", width: "100%"});
$("html").css("overflow","hidden");
if (document.getElementById("TB_HideSelect") === null) {//iframe to hide select elements in ie6
$("body").append("<iframe id='TB_HideSelect'></iframe><div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}else{//all others
if(document.getElementById("TB_overlay") === null){
$("body").append("<div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}
if(tb_detectMacXFF()){
$("#TB_overlay").addClass("TB_overlayMacFFBGHack");//use png overlay so hide flash
}else{
$("#TB_overlay").addClass("TB_overlayBG");//use background and opacity
}
if(caption===null){caption="";}
$("body").append("<div id='TB_load'><img src='"+imgLoader.src+"' /></div>");//add loader to the page
$('#TB_load').show();//show loader
var baseURL;
if(url.indexOf("?")!==-1){ //ff there is a query string involved
baseURL = url.substr(0, url.indexOf("?"));
}else{
baseURL = url;
}
var urlString = /\.jpg$|\.jpeg$|\.png$|\.gif$|\.bmp$/;
var urlType = baseURL.toLowerCase().match(urlString);
if(urlType == '.jpg' || urlType == '.jpeg' || urlType == '.png' || urlType == '.gif' || urlType == '.bmp'){//code to show images
TB_PrevCaption = "";
TB_PrevURL = "";
TB_PrevHTML = "";
TB_NextCaption = "";
TB_NextURL = "";
TB_NextHTML = "";
TB_imageCount = "";
TB_FoundURL = false;
if(imageGroup){
TB_TempArray = $("a[@rel="+imageGroup+"]").get();
for (TB_Counter = 0; ((TB_Counter < TB_TempArray.length) && (TB_NextHTML === "")); TB_Counter++) {
var urlTypeTemp = TB_TempArray[TB_Counter].href.toLowerCase().match(urlString);
if (!(TB_TempArray[TB_Counter].href == url)) {
if (TB_FoundURL) {
TB_NextCaption = TB_TempArray[TB_Counter].title;
TB_NextURL = TB_TempArray[TB_Counter].href;
TB_NextHTML = "<span id='TB_next'> <a href='#'>Next ></a></span>";
} else {
TB_PrevCaption = TB_TempArray[TB_Counter].title;
TB_PrevURL = TB_TempArray[TB_Counter].href;
TB_PrevHTML = "<span id='TB_prev'> <a href='#'>< Prev</a></span>";
}
} else {
TB_FoundURL = true;
TB_imageCount = "Image " + (TB_Counter + 1) +" of "+ (TB_TempArray.length);
}
}
}
imgPreloader = new Image();
imgPreloader.onload = function(){
imgPreloader.onload = null;
// Resizing large images - orginal by Christian Montoya edited by me.
var pagesize = tb_getPageSize();
var x = pagesize[0] - 150;
var y = pagesize[1] - 150;
var imageWidth = imgPreloader.width;
var imageHeight = imgPreloader.height;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
}
} else if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
}
}
// End Resizing
TB_WIDTH = imageWidth + 30;
TB_HEIGHT = imageHeight + 60;
$("#TB_window").append("<a href='' id='TB_ImageOff' title='Close'><img id='TB_Image' src='"+url+"' width='"+imageWidth+"' height='"+imageHeight+"' alt='"+caption+"'/></a>" + "<div id='TB_caption'>"+caption+"<div id='TB_secondLine'>" + TB_imageCount + TB_PrevHTML + TB_NextHTML + "</div></div><div id='TB_closeWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div>");
$("#TB_closeWindowButton").click(tb_remove);
if (!(TB_PrevHTML === "")) {
function goPrev(){
if($(document).unbind("click",goPrev)){$(document).unbind("click",goPrev);}
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_PrevCaption, TB_PrevURL, imageGroup);
return false;
}
$("#TB_prev").click(goPrev);
}
if (!(TB_NextHTML === "")) {
function goNext() |
$("#TB_next").click(goNext);
}
document.onkeydown = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
} else if(keycode == 190){ // display previous image
if(!(TB_NextHTML == "")){
document.onkeydown = "";
goNext();
}
} else if(keycode == 188){ // display next image
if(!(TB_PrevHTML == "")){
document.onkeydown = "";
goPrev();
}
}
};
tb_position();
$("#TB_load").remove();
$("#TB_ImageOff").click(tb_remove);
$("#TB_window").css({display:"block"}); //for safari using css instead of show
};
imgPreloader.src = url;
}else{//code to show html
var queryString = url.replace(/^[^\?]+\??/,'');
var params = tb_parseQuery( queryString );
TB_WIDTH = (params['width']*1) + 30 || 630; //defaults to 630 if no paramaters were added to URL
TB_HEIGHT = (params['height']*1) + 40 || 440; //defaults to 440 if no paramaters were added to URL
ajaxContentW = TB_WIDTH - 30;
ajaxContentH = TB_HEIGHT - 45;
if(url.indexOf('TB_iframe') != -1){// either iframe or ajax window
urlNoQuery = url.split('TB_');
$("#TB_iframeContent").remove();
if(params['modal'] != "true"){//iframe no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div></div><iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;' > </iframe>");
}else{//iframe modal
$("#TB_overlay").unbind();
$("#TB_window").append("<iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;'> </iframe>");
}
}else{// not an iframe, ajax
if($("#TB_window").css("display") != "block"){
if(params['modal'] != "true"){//ajax no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton'>close</a> or Esc Key</div></div><div id='TB_ajaxContent' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px'></div>");
}else{//ajax modal
$("#TB_overlay").unbind();
$("#TB_window").append("<div id='TB_ajaxContent' class='TB_modal' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px;'></div>");
}
}else{//this means the window is already up, we are just loading new content via ajax
$("#TB_ajaxContent")[0].style.width = ajaxContentW +"px";
$("#TB_ajaxContent")[0].style.height = ajaxContentH +"px";
$("#TB_ajaxContent")[0].scrollTop = 0;
$("#TB_ajaxWindowTitle").html(caption);
}
}
$("#TB_closeWindowButton").click(tb_remove);
if(url.indexOf('TB_inline') != -1){
$("#TB_ajaxContent").append($('#' + params['inlineId']).children());
$("#TB_window").unload(function () {
$('#' + params['inlineId']).append( $("#TB_ajaxContent").children() ); // move elements back when you're finished
});
tb_position();
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}else if(url.indexOf('TB_iframe') != -1){
tb_position();
if($.browser.safari){//safari needs help because it will not fire iframe onload
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
}else{
$("#TB_ajaxContent").load(url += "&random=" + (new Date().getTime()),function(){//to do a post change this load method
tb_position();
$("#TB_load").remove();
tb_init("#TB_ajaxContent a.thickbox");
$("#TB_window").css({display:"block"});
});
}
}
if(!params['modal']){
document.onkeyup = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
}
};
}
} catch(e) {
//nothing here
}
}
//helper functions below
function tb_showIframe(){
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
function tb_remove() {
$("#TB_imageOff").unbind("click");
$("#TB_closeWindowButton").unbind("click");
$("#TB_window").fadeOut("fast",function(){$('#TB_window,#TB_overlay,#TB_HideSelect').trigger("unload").unbind().remove();});
$("#TB_load").remove();
if (typeof document.body.style.maxHeight == "undefined") {//if IE 6
$("body","html").css({height: "auto", width: "auto"});
$("html").css("overflow","");
}
document.onkeydown = "";
document.onkeyup = "";
return false;
}
function tb_position() {
$("#TB_window").css({marginLeft: '-' + parseInt((TB_WIDTH / 2),10) + 'px', width: TB_WIDTH + 'px'});
if ( !(jQuery.browser.msie && jQuery.browser.version < 7)) { // take away IE6
$("#TB_window").css({marginTop: '-' + parseInt((TB_HEIGHT / 2),10) + 'px'});
}
}
function tb_parseQuery ( query ) {
var Params = {};
if ( ! query ) {return Params;}// return empty object
var Pairs = query.split(/[;&]/);
for ( var i = 0; i < Pairs.length; i++ ) {
var KeyVal = Pairs[i].split('=');
if ( ! KeyVal || KeyVal.length != 2 ) {continue;}
var key = unescape( KeyVal[0] );
var val = unescape( KeyVal[1] );
val = val.replace(/\+/g, ' ');
Params[key] = val;
}
return Params;
}
function tb_getPageSize(){
var de = document.documentElement;
var w = window.innerWidth || self.innerWidth || (de&&de.clientWidth) || document.body.clientWidth;
var h = window.innerHeight || self.innerHeight || (de&&de.clientHeight) || document.body.clientHeight;
arrayPageSize = [w,h];
return arrayPageSize;
}
function tb_detectMacXFF() {
var userAgent = navigator.userAgent.toLowerCase();
if (userAgent.indexOf('mac') != -1 && userAgent.indexOf('firefox')!=-1) {
return true;
}
}
| {
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_NextCaption, TB_NextURL, imageGroup);
return false;
} | identifier_body |
thickbox.js | /*
* Thickbox 3.1 - One Box To Rule Them All.
* By Cody Lindley (http://www.codylindley.com)
* Copyright (c) 2007 cody lindley
* Licensed under the MIT License: http://www.opensource.org/licenses/mit-license.php
*/
$.noConflict();
// imagepath = images/thickbox/loadingAnimation.gif;
var tb_pathToImage = " ";
/*!!!!!!!!!!!!!!!!! edit below this line at your own risk !!!!!!!!!!!!!!!!!!!!!!!*/
//on page load call tb_init
jQuery(document).ready(function($){
tb_init('a.thickbox, area.thickbox, input.thickbox');//pass where to apply thickbox
imgLoader = new Image();// preload image
imgLoader.src = tb_pathToImage;
});
//add thickbox to href & area elements that have a class of .thickbox
function tb_init(domChunk){
jQuery(domChunk).click(function($){
var t = this.title || this.name || null;
var a = this.href || this.alt;
var g = this.rel || false;
tb_show(t,a,g);
this.blur();
return false;
});
}
function tb_show(caption, url, imageGroup) {//function called when the user clicks on a thickbox link
try {
if (typeof document.body.style.maxHeight === "undefined") {//if IE 6
$("body","html").css({height: "100%", width: "100%"});
$("html").css("overflow","hidden");
if (document.getElementById("TB_HideSelect") === null) {//iframe to hide select elements in ie6
$("body").append("<iframe id='TB_HideSelect'></iframe><div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}else{//all others
if(document.getElementById("TB_overlay") === null){
$("body").append("<div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}
if(tb_detectMacXFF()){
$("#TB_overlay").addClass("TB_overlayMacFFBGHack");//use png overlay so hide flash
}else{
$("#TB_overlay").addClass("TB_overlayBG");//use background and opacity
}
if(caption===null){caption="";}
$("body").append("<div id='TB_load'><img src='"+imgLoader.src+"' /></div>");//add loader to the page
$('#TB_load').show();//show loader
var baseURL;
if(url.indexOf("?")!==-1){ //ff there is a query string involved
baseURL = url.substr(0, url.indexOf("?"));
}else{
baseURL = url;
}
var urlString = /\.jpg$|\.jpeg$|\.png$|\.gif$|\.bmp$/;
var urlType = baseURL.toLowerCase().match(urlString);
if(urlType == '.jpg' || urlType == '.jpeg' || urlType == '.png' || urlType == '.gif' || urlType == '.bmp'){//code to show images
TB_PrevCaption = "";
TB_PrevURL = "";
TB_PrevHTML = "";
TB_NextCaption = "";
TB_NextURL = "";
TB_NextHTML = "";
TB_imageCount = "";
TB_FoundURL = false;
if(imageGroup){
TB_TempArray = $("a[@rel="+imageGroup+"]").get();
for (TB_Counter = 0; ((TB_Counter < TB_TempArray.length) && (TB_NextHTML === "")); TB_Counter++) {
var urlTypeTemp = TB_TempArray[TB_Counter].href.toLowerCase().match(urlString);
if (!(TB_TempArray[TB_Counter].href == url)) {
if (TB_FoundURL) {
TB_NextCaption = TB_TempArray[TB_Counter].title;
TB_NextURL = TB_TempArray[TB_Counter].href;
TB_NextHTML = "<span id='TB_next'> <a href='#'>Next ></a></span>";
} else {
TB_PrevCaption = TB_TempArray[TB_Counter].title;
TB_PrevURL = TB_TempArray[TB_Counter].href;
TB_PrevHTML = "<span id='TB_prev'> <a href='#'>< Prev</a></span>";
}
} else {
TB_FoundURL = true;
TB_imageCount = "Image " + (TB_Counter + 1) +" of "+ (TB_TempArray.length);
}
}
}
imgPreloader = new Image();
imgPreloader.onload = function(){
imgPreloader.onload = null;
// Resizing large images - orginal by Christian Montoya edited by me.
var pagesize = tb_getPageSize();
var x = pagesize[0] - 150;
var y = pagesize[1] - 150;
var imageWidth = imgPreloader.width;
var imageHeight = imgPreloader.height;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
}
} else if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
}
}
// End Resizing
TB_WIDTH = imageWidth + 30;
TB_HEIGHT = imageHeight + 60;
$("#TB_window").append("<a href='' id='TB_ImageOff' title='Close'><img id='TB_Image' src='"+url+"' width='"+imageWidth+"' height='"+imageHeight+"' alt='"+caption+"'/></a>" + "<div id='TB_caption'>"+caption+"<div id='TB_secondLine'>" + TB_imageCount + TB_PrevHTML + TB_NextHTML + "</div></div><div id='TB_closeWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div>");
$("#TB_closeWindowButton").click(tb_remove);
if (!(TB_PrevHTML === "")) {
function goPrev(){
if($(document).unbind("click",goPrev)){$(document).unbind("click",goPrev);}
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_PrevCaption, TB_PrevURL, imageGroup);
return false;
}
$("#TB_prev").click(goPrev);
}
if (!(TB_NextHTML === "")) {
function goNext(){
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_NextCaption, TB_NextURL, imageGroup);
return false;
}
$("#TB_next").click(goNext);
}
document.onkeydown = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
} else if(keycode == 190){ // display previous image
if(!(TB_NextHTML == "")){
document.onkeydown = "";
goNext();
}
} else if(keycode == 188){ // display next image
if(!(TB_PrevHTML == "")){
document.onkeydown = "";
goPrev();
}
}
};
tb_position();
$("#TB_load").remove();
$("#TB_ImageOff").click(tb_remove);
$("#TB_window").css({display:"block"}); //for safari using css instead of show
};
imgPreloader.src = url;
}else{//code to show html
var queryString = url.replace(/^[^\?]+\??/,'');
var params = tb_parseQuery( queryString );
TB_WIDTH = (params['width']*1) + 30 || 630; //defaults to 630 if no paramaters were added to URL
TB_HEIGHT = (params['height']*1) + 40 || 440; //defaults to 440 if no paramaters were added to URL
ajaxContentW = TB_WIDTH - 30;
ajaxContentH = TB_HEIGHT - 45;
if(url.indexOf('TB_iframe') != -1){// either iframe or ajax window
urlNoQuery = url.split('TB_');
$("#TB_iframeContent").remove();
if(params['modal'] != "true"){//iframe no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div></div><iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;' > </iframe>");
}else{//iframe modal
$("#TB_overlay").unbind();
$("#TB_window").append("<iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;'> </iframe>");
}
}else{// not an iframe, ajax
if($("#TB_window").css("display") != "block"){
if(params['modal'] != "true"){//ajax no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton'>close</a> or Esc Key</div></div><div id='TB_ajaxContent' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px'></div>");
}else{//ajax modal
$("#TB_overlay").unbind();
$("#TB_window").append("<div id='TB_ajaxContent' class='TB_modal' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px;'></div>");
}
}else{//this means the window is already up, we are just loading new content via ajax
$("#TB_ajaxContent")[0].style.width = ajaxContentW +"px";
$("#TB_ajaxContent")[0].style.height = ajaxContentH +"px";
$("#TB_ajaxContent")[0].scrollTop = 0;
$("#TB_ajaxWindowTitle").html(caption);
}
}
$("#TB_closeWindowButton").click(tb_remove);
if(url.indexOf('TB_inline') != -1){
$("#TB_ajaxContent").append($('#' + params['inlineId']).children());
$("#TB_window").unload(function () {
$('#' + params['inlineId']).append( $("#TB_ajaxContent").children() ); // move elements back when you're finished
});
tb_position();
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}else if(url.indexOf('TB_iframe') != -1){
tb_position();
if($.browser.safari){//safari needs help because it will not fire iframe onload
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
}else{
$("#TB_ajaxContent").load(url += "&random=" + (new Date().getTime()),function(){//to do a post change this load method
tb_position();
$("#TB_load").remove();
tb_init("#TB_ajaxContent a.thickbox");
$("#TB_window").css({display:"block"});
});
}
}
if(!params['modal']){
document.onkeyup = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else |
if(keycode == 27){ // close
tb_remove();
}
};
}
} catch(e) {
//nothing here
}
}
//helper functions below
function tb_showIframe(){
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
function tb_remove() {
$("#TB_imageOff").unbind("click");
$("#TB_closeWindowButton").unbind("click");
$("#TB_window").fadeOut("fast",function(){$('#TB_window,#TB_overlay,#TB_HideSelect').trigger("unload").unbind().remove();});
$("#TB_load").remove();
if (typeof document.body.style.maxHeight == "undefined") {//if IE 6
$("body","html").css({height: "auto", width: "auto"});
$("html").css("overflow","");
}
document.onkeydown = "";
document.onkeyup = "";
return false;
}
function tb_position() {
$("#TB_window").css({marginLeft: '-' + parseInt((TB_WIDTH / 2),10) + 'px', width: TB_WIDTH + 'px'});
if ( !(jQuery.browser.msie && jQuery.browser.version < 7)) { // take away IE6
$("#TB_window").css({marginTop: '-' + parseInt((TB_HEIGHT / 2),10) + 'px'});
}
}
function tb_parseQuery ( query ) {
var Params = {};
if ( ! query ) {return Params;}// return empty object
var Pairs = query.split(/[;&]/);
for ( var i = 0; i < Pairs.length; i++ ) {
var KeyVal = Pairs[i].split('=');
if ( ! KeyVal || KeyVal.length != 2 ) {continue;}
var key = unescape( KeyVal[0] );
var val = unescape( KeyVal[1] );
val = val.replace(/\+/g, ' ');
Params[key] = val;
}
return Params;
}
function tb_getPageSize(){
var de = document.documentElement;
var w = window.innerWidth || self.innerWidth || (de&&de.clientWidth) || document.body.clientWidth;
var h = window.innerHeight || self.innerHeight || (de&&de.clientHeight) || document.body.clientHeight;
arrayPageSize = [w,h];
return arrayPageSize;
}
function tb_detectMacXFF() {
var userAgent = navigator.userAgent.toLowerCase();
if (userAgent.indexOf('mac') != -1 && userAgent.indexOf('firefox')!=-1) {
return true;
}
}
| { // mozilla
keycode = e.which;
} | conditional_block |
thickbox.js | /*
* Thickbox 3.1 - One Box To Rule Them All.
* By Cody Lindley (http://www.codylindley.com)
* Copyright (c) 2007 cody lindley
* Licensed under the MIT License: http://www.opensource.org/licenses/mit-license.php
*/
$.noConflict();
// imagepath = images/thickbox/loadingAnimation.gif;
var tb_pathToImage = " ";
/*!!!!!!!!!!!!!!!!! edit below this line at your own risk !!!!!!!!!!!!!!!!!!!!!!!*/
//on page load call tb_init
jQuery(document).ready(function($){
tb_init('a.thickbox, area.thickbox, input.thickbox');//pass where to apply thickbox
imgLoader = new Image();// preload image
imgLoader.src = tb_pathToImage;
});
//add thickbox to href & area elements that have a class of .thickbox
function tb_init(domChunk){
jQuery(domChunk).click(function($){
var t = this.title || this.name || null;
var a = this.href || this.alt;
var g = this.rel || false;
tb_show(t,a,g);
this.blur();
return false;
});
}
function tb_show(caption, url, imageGroup) {//function called when the user clicks on a thickbox link
try {
if (typeof document.body.style.maxHeight === "undefined") {//if IE 6
$("body","html").css({height: "100%", width: "100%"});
$("html").css("overflow","hidden");
if (document.getElementById("TB_HideSelect") === null) {//iframe to hide select elements in ie6
$("body").append("<iframe id='TB_HideSelect'></iframe><div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}else{//all others
if(document.getElementById("TB_overlay") === null){
$("body").append("<div id='TB_overlay'></div><div id='TB_window'></div>");
$("#TB_overlay").click(tb_remove);
}
}
if(tb_detectMacXFF()){
$("#TB_overlay").addClass("TB_overlayMacFFBGHack");//use png overlay so hide flash
}else{
$("#TB_overlay").addClass("TB_overlayBG");//use background and opacity
}
if(caption===null){caption="";}
$("body").append("<div id='TB_load'><img src='"+imgLoader.src+"' /></div>");//add loader to the page
$('#TB_load').show();//show loader
var baseURL;
if(url.indexOf("?")!==-1){ //ff there is a query string involved
baseURL = url.substr(0, url.indexOf("?"));
}else{
baseURL = url;
}
var urlString = /\.jpg$|\.jpeg$|\.png$|\.gif$|\.bmp$/;
var urlType = baseURL.toLowerCase().match(urlString);
if(urlType == '.jpg' || urlType == '.jpeg' || urlType == '.png' || urlType == '.gif' || urlType == '.bmp'){//code to show images
TB_PrevCaption = "";
TB_PrevURL = "";
TB_PrevHTML = "";
TB_NextCaption = "";
TB_NextURL = "";
TB_NextHTML = "";
TB_imageCount = "";
TB_FoundURL = false;
if(imageGroup){
TB_TempArray = $("a[@rel="+imageGroup+"]").get();
for (TB_Counter = 0; ((TB_Counter < TB_TempArray.length) && (TB_NextHTML === "")); TB_Counter++) {
var urlTypeTemp = TB_TempArray[TB_Counter].href.toLowerCase().match(urlString);
if (!(TB_TempArray[TB_Counter].href == url)) {
if (TB_FoundURL) {
TB_NextCaption = TB_TempArray[TB_Counter].title;
TB_NextURL = TB_TempArray[TB_Counter].href;
TB_NextHTML = "<span id='TB_next'> <a href='#'>Next ></a></span>";
} else {
TB_PrevCaption = TB_TempArray[TB_Counter].title;
TB_PrevURL = TB_TempArray[TB_Counter].href;
TB_PrevHTML = "<span id='TB_prev'> <a href='#'>< Prev</a></span>";
}
} else {
TB_FoundURL = true;
TB_imageCount = "Image " + (TB_Counter + 1) +" of "+ (TB_TempArray.length);
}
}
}
imgPreloader = new Image();
imgPreloader.onload = function(){
imgPreloader.onload = null;
// Resizing large images - orginal by Christian Montoya edited by me.
var pagesize = tb_getPageSize();
var x = pagesize[0] - 150;
var y = pagesize[1] - 150;
var imageWidth = imgPreloader.width;
var imageHeight = imgPreloader.height;
if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y;
} | if (imageWidth > x) {
imageHeight = imageHeight * (x / imageWidth);
imageWidth = x;
}
}
// End Resizing
TB_WIDTH = imageWidth + 30;
TB_HEIGHT = imageHeight + 60;
$("#TB_window").append("<a href='' id='TB_ImageOff' title='Close'><img id='TB_Image' src='"+url+"' width='"+imageWidth+"' height='"+imageHeight+"' alt='"+caption+"'/></a>" + "<div id='TB_caption'>"+caption+"<div id='TB_secondLine'>" + TB_imageCount + TB_PrevHTML + TB_NextHTML + "</div></div><div id='TB_closeWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div>");
$("#TB_closeWindowButton").click(tb_remove);
if (!(TB_PrevHTML === "")) {
function goPrev(){
if($(document).unbind("click",goPrev)){$(document).unbind("click",goPrev);}
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_PrevCaption, TB_PrevURL, imageGroup);
return false;
}
$("#TB_prev").click(goPrev);
}
if (!(TB_NextHTML === "")) {
function goNext(){
$("#TB_window").remove();
$("body").append("<div id='TB_window'></div>");
tb_show(TB_NextCaption, TB_NextURL, imageGroup);
return false;
}
$("#TB_next").click(goNext);
}
document.onkeydown = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
} else if(keycode == 190){ // display previous image
if(!(TB_NextHTML == "")){
document.onkeydown = "";
goNext();
}
} else if(keycode == 188){ // display next image
if(!(TB_PrevHTML == "")){
document.onkeydown = "";
goPrev();
}
}
};
tb_position();
$("#TB_load").remove();
$("#TB_ImageOff").click(tb_remove);
$("#TB_window").css({display:"block"}); //for safari using css instead of show
};
imgPreloader.src = url;
}else{//code to show html
var queryString = url.replace(/^[^\?]+\??/,'');
var params = tb_parseQuery( queryString );
TB_WIDTH = (params['width']*1) + 30 || 630; //defaults to 630 if no paramaters were added to URL
TB_HEIGHT = (params['height']*1) + 40 || 440; //defaults to 440 if no paramaters were added to URL
ajaxContentW = TB_WIDTH - 30;
ajaxContentH = TB_HEIGHT - 45;
if(url.indexOf('TB_iframe') != -1){// either iframe or ajax window
urlNoQuery = url.split('TB_');
$("#TB_iframeContent").remove();
if(params['modal'] != "true"){//iframe no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton' title='Close'>close</a> or Esc Key</div></div><iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;' > </iframe>");
}else{//iframe modal
$("#TB_overlay").unbind();
$("#TB_window").append("<iframe frameborder='0' hspace='0' src='"+urlNoQuery[0]+"' id='TB_iframeContent' name='TB_iframeContent"+Math.round(Math.random()*1000)+"' onload='tb_showIframe()' style='width:"+(ajaxContentW + 29)+"px;height:"+(ajaxContentH + 17)+"px;'> </iframe>");
}
}else{// not an iframe, ajax
if($("#TB_window").css("display") != "block"){
if(params['modal'] != "true"){//ajax no modal
$("#TB_window").append("<div id='TB_title'><div id='TB_ajaxWindowTitle'>"+caption+"</div><div id='TB_closeAjaxWindow'><a href='#' id='TB_closeWindowButton'>close</a> or Esc Key</div></div><div id='TB_ajaxContent' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px'></div>");
}else{//ajax modal
$("#TB_overlay").unbind();
$("#TB_window").append("<div id='TB_ajaxContent' class='TB_modal' style='width:"+ajaxContentW+"px;height:"+ajaxContentH+"px;'></div>");
}
}else{//this means the window is already up, we are just loading new content via ajax
$("#TB_ajaxContent")[0].style.width = ajaxContentW +"px";
$("#TB_ajaxContent")[0].style.height = ajaxContentH +"px";
$("#TB_ajaxContent")[0].scrollTop = 0;
$("#TB_ajaxWindowTitle").html(caption);
}
}
$("#TB_closeWindowButton").click(tb_remove);
if(url.indexOf('TB_inline') != -1){
$("#TB_ajaxContent").append($('#' + params['inlineId']).children());
$("#TB_window").unload(function () {
$('#' + params['inlineId']).append( $("#TB_ajaxContent").children() ); // move elements back when you're finished
});
tb_position();
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}else if(url.indexOf('TB_iframe') != -1){
tb_position();
if($.browser.safari){//safari needs help because it will not fire iframe onload
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
}else{
$("#TB_ajaxContent").load(url += "&random=" + (new Date().getTime()),function(){//to do a post change this load method
tb_position();
$("#TB_load").remove();
tb_init("#TB_ajaxContent a.thickbox");
$("#TB_window").css({display:"block"});
});
}
}
if(!params['modal']){
document.onkeyup = function(e){
if (e == null) { // ie
keycode = event.keyCode;
} else { // mozilla
keycode = e.which;
}
if(keycode == 27){ // close
tb_remove();
}
};
}
} catch(e) {
//nothing here
}
}
//helper functions below
function tb_showIframe(){
$("#TB_load").remove();
$("#TB_window").css({display:"block"});
}
function tb_remove() {
$("#TB_imageOff").unbind("click");
$("#TB_closeWindowButton").unbind("click");
$("#TB_window").fadeOut("fast",function(){$('#TB_window,#TB_overlay,#TB_HideSelect').trigger("unload").unbind().remove();});
$("#TB_load").remove();
if (typeof document.body.style.maxHeight == "undefined") {//if IE 6
$("body","html").css({height: "auto", width: "auto"});
$("html").css("overflow","");
}
document.onkeydown = "";
document.onkeyup = "";
return false;
}
function tb_position() {
$("#TB_window").css({marginLeft: '-' + parseInt((TB_WIDTH / 2),10) + 'px', width: TB_WIDTH + 'px'});
if ( !(jQuery.browser.msie && jQuery.browser.version < 7)) { // take away IE6
$("#TB_window").css({marginTop: '-' + parseInt((TB_HEIGHT / 2),10) + 'px'});
}
}
function tb_parseQuery ( query ) {
var Params = {};
if ( ! query ) {return Params;}// return empty object
var Pairs = query.split(/[;&]/);
for ( var i = 0; i < Pairs.length; i++ ) {
var KeyVal = Pairs[i].split('=');
if ( ! KeyVal || KeyVal.length != 2 ) {continue;}
var key = unescape( KeyVal[0] );
var val = unescape( KeyVal[1] );
val = val.replace(/\+/g, ' ');
Params[key] = val;
}
return Params;
}
function tb_getPageSize(){
var de = document.documentElement;
var w = window.innerWidth || self.innerWidth || (de&&de.clientWidth) || document.body.clientWidth;
var h = window.innerHeight || self.innerHeight || (de&&de.clientHeight) || document.body.clientHeight;
arrayPageSize = [w,h];
return arrayPageSize;
}
function tb_detectMacXFF() {
var userAgent = navigator.userAgent.toLowerCase();
if (userAgent.indexOf('mac') != -1 && userAgent.indexOf('firefox')!=-1) {
return true;
}
} | } else if (imageHeight > y) {
imageWidth = imageWidth * (y / imageHeight);
imageHeight = y; | random_line_split |
reset.js | 'use strict';
const CONSTANTS = require('./../constants');
const resetRoles = (data, message) => {
let reply = 'You have no roles to reset ' + message.author.toString();
if (message.member.roles.cache) {
var found = false;
//reset user roles
message.member.roles.cache.forEach( (role) => {
if (CONSTANTS.PROTECTED_ROLES.indexOf(role.name) == -1) {
if(!found) reply = message.author.toString() + ', I am removing the following roles:';
found = true;
reply += ' ' + role.name;
message.member.roles.remove(role).catch(console.error);
}
});
found = false;
// reset channel overwritten permissions
Object.keys(data.channelsByName).forEach((channelName) => {
let channel = data.channelsByName[channelName];
if(channel.name.indexOf('-') > -1) |
});
}
message.channel.send(reply);
return reply;
};
module.exports = (data) => ( (message) => {
return resetRoles(data, message);
});
| {
let foundOverwrite = channel.permissionOverwrites.get(message.author.id);
if(foundOverwrite) {
if(!found) reply += '\n' + message.author.toString() + ', I am unhiding these neighborhood channels: ';
found = true; // do not repeat found message
reply += `#${channel.name}`;
foundOverwrite.delete();
}
} | conditional_block |
reset.js | 'use strict';
const CONSTANTS = require('./../constants');
const resetRoles = (data, message) => {
let reply = 'You have no roles to reset ' + message.author.toString();
if (message.member.roles.cache) {
var found = false;
//reset user roles
message.member.roles.cache.forEach( (role) => {
if (CONSTANTS.PROTECTED_ROLES.indexOf(role.name) == -1) {
if(!found) reply = message.author.toString() + ', I am removing the following roles:';
found = true;
reply += ' ' + role.name;
message.member.roles.remove(role).catch(console.error);
}
});
found = false;
// reset channel overwritten permissions
Object.keys(data.channelsByName).forEach((channelName) => {
let channel = data.channelsByName[channelName];
if(channel.name.indexOf('-') > -1) {
let foundOverwrite = channel.permissionOverwrites.get(message.author.id);
if(foundOverwrite) {
if(!found) reply += '\n' + message.author.toString() + ', I am unhiding these neighborhood channels: '; | }
}
});
}
message.channel.send(reply);
return reply;
};
module.exports = (data) => ( (message) => {
return resetRoles(data, message);
}); | found = true; // do not repeat found message
reply += `#${channel.name}`;
foundOverwrite.delete(); | random_line_split |
sensorsregistrar.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.sensor import SensorTypeAPI
from st2common.persistence.sensor import SensorType
__all__ = [
'SensorsRegistrar',
'register_sensors'
]
LOG = logging.getLogger(__name__)
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
class | (ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_sensors_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register sensors from all of the
discovered packs.
:return: Number of sensors registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='sensors')
for pack, sensors_dir in six.iteritems(content):
if not sensors_dir:
LOG.debug('Pack %s does not contain sensors.', pack)
continue
try:
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
sensors = self._get_sensors_from_pack(sensors_dir)
count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir,
str(e))
return registered_count
def register_sensors_from_pack(self, pack_dir):
"""
Register all the sensors from the provided pack.
:return: Number of sensors registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
sensors_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='sensors')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not sensors_dir:
return registered_count
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
try:
sensors = self._get_sensors_from_pack(sensors_dir=sensors_dir)
registered_count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir, str(e))
return registered_count
def _get_sensors_from_pack(self, sensors_dir):
return self.get_resources_from_pack(resources_dir=sensors_dir)
def _register_sensors_from_pack(self, pack, sensors):
registered_count = 0
for sensor in sensors:
try:
self._register_sensor_from_pack(pack=pack, sensor=sensor)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.debug('Failed to register sensor "%s": %s', sensor, str(e))
else:
LOG.debug('Sensor "%s" successfully registered', sensor)
registered_count += 1
return registered_count
def _register_sensor_from_pack(self, pack, sensor):
sensor_metadata_file_path = sensor
LOG.debug('Loading sensor from %s.', sensor_metadata_file_path)
content = self._meta_loader.load(file_path=sensor_metadata_file_path)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
entry_point = content.get('entry_point', None)
if not entry_point:
raise ValueError('Sensor definition missing entry_point')
sensors_dir = os.path.dirname(sensor_metadata_file_path)
sensor_file_path = os.path.join(sensors_dir, entry_point)
artifact_uri = 'file://%s' % (sensor_file_path)
content['artifact_uri'] = artifact_uri
content['entry_point'] = entry_point
sensor_api = SensorTypeAPI(**content)
sensor_model = SensorTypeAPI.to_model(sensor_api)
sensor_types = SensorType.query(pack=sensor_model.pack, name=sensor_model.name)
if len(sensor_types) >= 1:
sensor_type = sensor_types[0]
LOG.debug('Found existing sensor id:%s with name:%s. Will update it.',
sensor_type.id, sensor_type.name)
sensor_model.id = sensor_type.id
try:
sensor_model = SensorType.add_or_update(sensor_model)
except:
LOG.exception('Failed creating sensor model for %s', sensor)
return sensor_model
def register_sensors(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = SensorsRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_sensors_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_sensors_from_packs(base_dirs=packs_base_paths)
return result
| SensorsRegistrar | identifier_name |
sensorsregistrar.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.sensor import SensorTypeAPI
from st2common.persistence.sensor import SensorType
__all__ = [
'SensorsRegistrar',
'register_sensors'
]
LOG = logging.getLogger(__name__)
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
class SensorsRegistrar(ResourceRegistrar):
|
def register_sensors(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = SensorsRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_sensors_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_sensors_from_packs(base_dirs=packs_base_paths)
return result
| ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_sensors_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register sensors from all of the
discovered packs.
:return: Number of sensors registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='sensors')
for pack, sensors_dir in six.iteritems(content):
if not sensors_dir:
LOG.debug('Pack %s does not contain sensors.', pack)
continue
try:
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
sensors = self._get_sensors_from_pack(sensors_dir)
count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir,
str(e))
return registered_count
def register_sensors_from_pack(self, pack_dir):
"""
Register all the sensors from the provided pack.
:return: Number of sensors registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
sensors_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='sensors')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not sensors_dir:
return registered_count
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
try:
sensors = self._get_sensors_from_pack(sensors_dir=sensors_dir)
registered_count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir, str(e))
return registered_count
def _get_sensors_from_pack(self, sensors_dir):
return self.get_resources_from_pack(resources_dir=sensors_dir)
def _register_sensors_from_pack(self, pack, sensors):
registered_count = 0
for sensor in sensors:
try:
self._register_sensor_from_pack(pack=pack, sensor=sensor)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.debug('Failed to register sensor "%s": %s', sensor, str(e))
else:
LOG.debug('Sensor "%s" successfully registered', sensor)
registered_count += 1
return registered_count
def _register_sensor_from_pack(self, pack, sensor):
sensor_metadata_file_path = sensor
LOG.debug('Loading sensor from %s.', sensor_metadata_file_path)
content = self._meta_loader.load(file_path=sensor_metadata_file_path)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
entry_point = content.get('entry_point', None)
if not entry_point:
raise ValueError('Sensor definition missing entry_point')
sensors_dir = os.path.dirname(sensor_metadata_file_path)
sensor_file_path = os.path.join(sensors_dir, entry_point)
artifact_uri = 'file://%s' % (sensor_file_path)
content['artifact_uri'] = artifact_uri
content['entry_point'] = entry_point
sensor_api = SensorTypeAPI(**content)
sensor_model = SensorTypeAPI.to_model(sensor_api)
sensor_types = SensorType.query(pack=sensor_model.pack, name=sensor_model.name)
if len(sensor_types) >= 1:
sensor_type = sensor_types[0]
LOG.debug('Found existing sensor id:%s with name:%s. Will update it.',
sensor_type.id, sensor_type.name)
sensor_model.id = sensor_type.id
try:
sensor_model = SensorType.add_or_update(sensor_model)
except:
LOG.exception('Failed creating sensor model for %s', sensor)
return sensor_model | identifier_body |
sensorsregistrar.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.sensor import SensorTypeAPI
from st2common.persistence.sensor import SensorType
__all__ = [
'SensorsRegistrar',
'register_sensors'
]
LOG = logging.getLogger(__name__)
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
class SensorsRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_sensors_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register sensors from all of the
discovered packs.
:return: Number of sensors registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='sensors')
for pack, sensors_dir in six.iteritems(content):
if not sensors_dir:
LOG.debug('Pack %s does not contain sensors.', pack)
continue
try:
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
sensors = self._get_sensors_from_pack(sensors_dir)
count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir,
str(e))
return registered_count
def register_sensors_from_pack(self, pack_dir):
"""
Register all the sensors from the provided pack.
:return: Number of sensors registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
sensors_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='sensors')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not sensors_dir:
return registered_count
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
try:
sensors = self._get_sensors_from_pack(sensors_dir=sensors_dir)
registered_count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir, str(e))
return registered_count
def _get_sensors_from_pack(self, sensors_dir):
return self.get_resources_from_pack(resources_dir=sensors_dir)
def _register_sensors_from_pack(self, pack, sensors):
registered_count = 0
for sensor in sensors:
try:
self._register_sensor_from_pack(pack=pack, sensor=sensor)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.debug('Failed to register sensor "%s": %s', sensor, str(e))
else:
|
return registered_count
def _register_sensor_from_pack(self, pack, sensor):
sensor_metadata_file_path = sensor
LOG.debug('Loading sensor from %s.', sensor_metadata_file_path)
content = self._meta_loader.load(file_path=sensor_metadata_file_path)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
entry_point = content.get('entry_point', None)
if not entry_point:
raise ValueError('Sensor definition missing entry_point')
sensors_dir = os.path.dirname(sensor_metadata_file_path)
sensor_file_path = os.path.join(sensors_dir, entry_point)
artifact_uri = 'file://%s' % (sensor_file_path)
content['artifact_uri'] = artifact_uri
content['entry_point'] = entry_point
sensor_api = SensorTypeAPI(**content)
sensor_model = SensorTypeAPI.to_model(sensor_api)
sensor_types = SensorType.query(pack=sensor_model.pack, name=sensor_model.name)
if len(sensor_types) >= 1:
sensor_type = sensor_types[0]
LOG.debug('Found existing sensor id:%s with name:%s. Will update it.',
sensor_type.id, sensor_type.name)
sensor_model.id = sensor_type.id
try:
sensor_model = SensorType.add_or_update(sensor_model)
except:
LOG.exception('Failed creating sensor model for %s', sensor)
return sensor_model
def register_sensors(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = SensorsRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_sensors_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_sensors_from_packs(base_dirs=packs_base_paths)
return result
| LOG.debug('Sensor "%s" successfully registered', sensor)
registered_count += 1 | conditional_block |
sensorsregistrar.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.sensor import SensorTypeAPI
from st2common.persistence.sensor import SensorType
__all__ = [
'SensorsRegistrar',
'register_sensors'
]
LOG = logging.getLogger(__name__)
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
class SensorsRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_sensors_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register sensors from all of the
discovered packs.
:return: Number of sensors registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='sensors')
for pack, sensors_dir in six.iteritems(content):
if not sensors_dir:
LOG.debug('Pack %s does not contain sensors.', pack)
continue
try:
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
sensors = self._get_sensors_from_pack(sensors_dir)
count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir,
str(e))
return registered_count
def register_sensors_from_pack(self, pack_dir):
"""
Register all the sensors from the provided pack.
:return: Number of sensors registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
sensors_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='sensors')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not sensors_dir:
return registered_count
LOG.debug('Registering sensors from pack %s:, dir: %s', pack, sensors_dir)
try:
sensors = self._get_sensors_from_pack(sensors_dir=sensors_dir)
registered_count = self._register_sensors_from_pack(pack=pack, sensors=sensors)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all sensors from pack "%s": %s', sensors_dir, str(e))
return registered_count
def _get_sensors_from_pack(self, sensors_dir):
return self.get_resources_from_pack(resources_dir=sensors_dir)
def _register_sensors_from_pack(self, pack, sensors):
registered_count = 0
for sensor in sensors:
try:
self._register_sensor_from_pack(pack=pack, sensor=sensor)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.debug('Failed to register sensor "%s": %s', sensor, str(e))
else:
LOG.debug('Sensor "%s" successfully registered', sensor)
registered_count += 1
return registered_count
def _register_sensor_from_pack(self, pack, sensor):
sensor_metadata_file_path = sensor
LOG.debug('Loading sensor from %s.', sensor_metadata_file_path)
content = self._meta_loader.load(file_path=sensor_metadata_file_path)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
entry_point = content.get('entry_point', None)
if not entry_point:
raise ValueError('Sensor definition missing entry_point')
sensors_dir = os.path.dirname(sensor_metadata_file_path)
sensor_file_path = os.path.join(sensors_dir, entry_point)
artifact_uri = 'file://%s' % (sensor_file_path)
content['artifact_uri'] = artifact_uri
content['entry_point'] = entry_point
sensor_api = SensorTypeAPI(**content)
sensor_model = SensorTypeAPI.to_model(sensor_api)
sensor_types = SensorType.query(pack=sensor_model.pack, name=sensor_model.name)
if len(sensor_types) >= 1:
sensor_type = sensor_types[0]
LOG.debug('Found existing sensor id:%s with name:%s. Will update it.',
sensor_type.id, sensor_type.name)
sensor_model.id = sensor_type.id
try:
sensor_model = SensorType.add_or_update(sensor_model)
except:
LOG.exception('Failed creating sensor model for %s', sensor)
return sensor_model
def register_sensors(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = SensorsRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_sensors_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_sensors_from_packs(base_dirs=packs_base_paths)
return result | # http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
urls.py | from django.conf.urls import url
from rpi.beehive.views import AddBeehiveView, delete_readering_view, \
ChartReaderingView, export_view, ListReaderingView, DeleteBeehiveView, \
ModifyBeehiveView, summary_view
urlpatterns = [
url(r'^ajouter$', AddBeehiveView.as_view(), name='add-beehive'),
url(r'^(?P<pk>\d+)/$', summary_view, name='summary'),
url(r'^(?P<pk>\d+)/voir/tableau/$', ListReaderingView.as_view(),
name='table'),
url(r'^(?P<pk>\d+)/voir/graphiques/$', ChartReaderingView.as_view(), | url(r'^(?P<pk>\d+)/modifier/$', ModifyBeehiveView.as_view(),
name='modify-beehive'),
url(r'^(?P<pk>\d+)/supprimer/$', DeleteBeehiveView.as_view(),
name='delete-beehive'),
url(r'^supprimer-releve/(?P<pk>\d+)/$', delete_readering_view,
name='delete-readering'),
] | name='charts'),
url(r'^(?P<pk>\d+)/exporter/$', export_view, name='export'), | random_line_split |
mock-heroes.ts | import { Hero } from './hero';
export var HEROES: Hero[] = [
{ id: 40, isSecret: false, name: 'Mr. Nice' },
{ id: 41, isSecret: false, name: 'Narco' }, | { id: 45, isSecret: false, name: 'RubberMan' },
{ id: 46, isSecret: false, name: 'Dynama' },
{ id: 47, isSecret: true, name: 'Dr IQ' },
{ id: 48, isSecret: true, name: 'Magma' },
{ id: 49, isSecret: true, name: 'Tornado' }
]; | { id: 42, isSecret: false, name: 'Bombasto' },
{ id: 43, isSecret: false, name: 'Celeritas' },
{ id: 44, isSecret: false, name: 'Magneta' }, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.