file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
draft.py
|
from datetime import datetime
from xmodule.modulestore import Location, namedtuple_to_son
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore.mongo.base import MongoModuleStore
from pytz import UTC
DRAFT = 'draft'
# Things w/ these categories should never be marked as version='draft'
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location).replace(revision=None)
def wrap_draft(item):
"""
Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
setattr(item, 'is_draft', item.location.revision == DRAFT)
item.location = item.location.replace(revision=None)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
That is, edits made to units are stored to locations that have the revision DRAFT,
and when reads are made, they first read with revision DRAFT, and then fall back
to the baseline revision only if DRAFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and optionally
their children) to published modules.
"""
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If location.revision is None, returns the item with the most
recent revision
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
Get an instance of this location, with policy for course_id applied.
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def get_items(self, location, course_id=None, depth=0):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
depth: An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
draft_loc = as_draft(location)
draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location.replace(revision=None) for item in draft_items)
non_draft_items = [
item
for item in items
if (item.location.revision != DRAFT
and item.location.replace(revision=None) not in draft_locs_found)
]
return [wrap_draft(item) for item in draft_items + non_draft_items]
def clone_item(self, source, location):
"""
Clone a new item that is a copy of the item at the location `source`
and writes it to `location`
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
return wrap_draft(super(DraftModuleStore, self).clone_item(source, as_draft(location)))
def update_item(self, location, data, allow_not_found=False):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
draft_loc = as_draft(location)
try:
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
except ItemNotFoundError, e:
if not allow_not_found:
raise e
return super(DraftModuleStore, self).update_item(draft_loc, data)
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
children
location: Something that can be passed to Location
children: A list of child item identifiers
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
|
metadata: A nested dictionary of module metadata
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
del metadata['is_draft']
return super(DraftModuleStore, self).update_metadata(draft_loc, metadata)
def delete_item(self, location, delete_all_versions=False):
"""
Delete an item from this modulestore
location: Something that can be passed to Location
"""
super(DraftModuleStore, self).delete_item(as_draft(location))
if delete_all_versions:
super(DraftModuleStore, self).delete_item(as_published(location))
return
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location. Needed
for path_to_location().
returns an iterable of things that can be passed to Location.
'''
return super(DraftModuleStore, self).get_parent_locations(location, course_id)
def publish(self, location, published_by_id):
"""
Save a current draft to the underlying modulestore
"""
draft = self.get_item(location)
draft.cms.published_date = datetime.now(UTC)
draft.cms.published_by = published_by_id
super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data)
super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children)
super(DraftModuleStore, self).update_metadata(location, own_metadata(draft))
self.delete_item(location)
def unpublish(self, location):
"""
Turn the published version into a draft, removing the published version
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
super(DraftModuleStore, self).clone_item(location, as_draft(location))
super(DraftModuleStore, self).delete_item(location)
def _query_children_for_cache_children(self, items):
# first get non-draft in a round-trip
queried_children = []
to_process_non_drafts = super(DraftModuleStore, self)._query_children_for_cache_children(items)
to_process_dict = {}
for non_draft in to_process_non_drafts:
to_process_dict[Location(non_draft["_id"])] = non_draft
# now query all draft content in another round-trip
query = {
'_id': {'$in': [namedtuple_to_son(as_draft(Location(item))) for item in items]}
}
to_process_drafts = list(self.collection.find(query))
# now we have to go through all drafts and replace the non-draft
# with the draft. This is because the semantics of the DraftStore is to
# always return the draft - if available
for draft in to_process_drafts:
draft_loc = Location(draft["_id"])
draft_as_non_draft_loc = draft_loc.replace(revision=None)
# does non-draft exist in the collection
# if so, replace it
if draft_as_non_draft_loc in to_process_dict:
to_process_dict[draft_as_non_draft_loc] = draft
# convert the dict - which is used for look ups - back into a list
for key, value in to_process_dict.iteritems():
queried_children.append(value)
return queried_children
|
random_line_split
|
|
inventoryState.ts
|
import { ActionChartItem, ActionChart, Item } from "..";
import { mechanicsEngine } from "../controller/mechanics/mechanicsEngine";
/**
* Inventory state at one point
*/
export class InventoryState {
public weapons: ActionChartItem[] = [];
public hasBackpack: boolean = false;
public backpackItems: ActionChartItem[] = [];
public specialItems: ActionChartItem[] = [];
public beltPouch: number = 0;
public arrows: number = 0;
public meals: number = 0;
/**
* Create a inventory state with the current inventory state
* @param objectTypes Kind of objects to get: 'all' = all, 'weaponlike' = weapons and weapon Special Objects,
* 'allobjects' = weapons, special items and backpack items
*/
public static fromActionChart(objectTypes: string, actionChart: ActionChart): InventoryState {
const objects = new InventoryState();
if (objectTypes === "all" || objectTypes === "allobjects") {
objects.weapons = actionChart.weapons.deepClone();
objects.backpackItems = actionChart.backpackItems.deepClone();
objects.specialItems = actionChart.specialItems.deepClone();
objects.arrows = actionChart.arrows;
objects.meals = actionChart.meals;
if (objectTypes === "all") {
objects.hasBackpack = actionChart.hasBackpack;
objects.beltPouch = actionChart.beltPouch;
}
} else if (objectTypes === "weaponlike") {
for (const w of actionChart.getWeaponAChartItems(false)) {
objects.addItem(w.clone());
}
} else
|
return objects;
}
private addItem(aChartItem: ActionChartItem) {
const item = aChartItem.getItem();
if (!item) {
return;
}
if (item.type === Item.WEAPON) {
this.weapons.push(aChartItem);
} else if (item.type === Item.SPECIAL) {
this.specialItems.push(aChartItem);
} else if (item.type === Item.OBJECT) {
this.backpackItems.push(aChartItem);
}
}
public addItemsArray(items: ActionChartItem[]) {
for (const item of items) {
this.addItem(item.clone());
}
}
/**
* Append to this inventory state other state
* @param s2 The state to append to this
*/
public addInventoryToThis(s2: InventoryState) {
this.weapons = this.weapons.concat(s2.weapons);
this.hasBackpack = this.hasBackpack || s2.hasBackpack;
this.backpackItems = this.backpackItems.concat(s2.backpackItems);
this.specialItems = this.specialItems.concat(s2.specialItems);
this.beltPouch = this.beltPouch + s2.beltPouch;
this.arrows = this.arrows + s2.arrows;
this.meals = this.meals + s2.meals;
}
/**
* Get special items on this state that are weapon, remove them from the state, and return them
* @returns Special items on state that they were weapons
*/
public getAndRemoveSpecialItemsNonWeapon(): ActionChartItem[] {
// Recover only non-weapon special items
const toRecover: ActionChartItem[] = [];
for (const aChartItem of this.specialItems) {
const i = aChartItem.getItem();
if (i && !i.isWeapon()) {
toRecover.push(aChartItem);
}
}
// Remove recovered items
for (const aChartItem of toRecover) {
this.specialItems.removeValue(aChartItem);
}
return toRecover;
}
/**
* Create a inventory state from an object
* @param object The inventory state object. Must to have same properties than InventoryState
*/
public static fromObject(object: any): InventoryState {
if (!object) {
return new InventoryState();
}
const inventoryState: InventoryState = $.extend(new InventoryState(), object);
// Convert objects to ActionChartItem:
inventoryState.weapons = ActionChartItem.fromObjectsArray(inventoryState.weapons);
inventoryState.backpackItems = ActionChartItem.fromObjectsArray(inventoryState.backpackItems);
inventoryState.specialItems = ActionChartItem.fromObjectsArray(inventoryState.specialItems);
return inventoryState;
}
/** Return a plain object with this instance info. */
public toObject(): any {
return {
weapons: this.weapons,
hasBackpack: this.hasBackpack,
backpackItems: this.backpackItems,
specialItems: this.specialItems,
beltPouch: this.beltPouch,
arrows: this.arrows,
meals: this.meals
};
}
}
|
{
const msg = "Wrong objectTypes: " + objectTypes;
mechanicsEngine.debugWarning(msg);
throw msg;
}
|
conditional_block
|
inventoryState.ts
|
import { ActionChartItem, ActionChart, Item } from "..";
import { mechanicsEngine } from "../controller/mechanics/mechanicsEngine";
/**
* Inventory state at one point
*/
export class InventoryState {
public weapons: ActionChartItem[] = [];
public hasBackpack: boolean = false;
public backpackItems: ActionChartItem[] = [];
public specialItems: ActionChartItem[] = [];
public beltPouch: number = 0;
public arrows: number = 0;
public meals: number = 0;
/**
* Create a inventory state with the current inventory state
* @param objectTypes Kind of objects to get: 'all' = all, 'weaponlike' = weapons and weapon Special Objects,
* 'allobjects' = weapons, special items and backpack items
*/
public static fromActionChart(objectTypes: string, actionChart: ActionChart): InventoryState {
const objects = new InventoryState();
if (objectTypes === "all" || objectTypes === "allobjects") {
objects.weapons = actionChart.weapons.deepClone();
objects.backpackItems = actionChart.backpackItems.deepClone();
objects.specialItems = actionChart.specialItems.deepClone();
objects.arrows = actionChart.arrows;
objects.meals = actionChart.meals;
if (objectTypes === "all") {
objects.hasBackpack = actionChart.hasBackpack;
objects.beltPouch = actionChart.beltPouch;
}
} else if (objectTypes === "weaponlike") {
for (const w of actionChart.getWeaponAChartItems(false)) {
objects.addItem(w.clone());
}
} else {
const msg = "Wrong objectTypes: " + objectTypes;
mechanicsEngine.debugWarning(msg);
throw msg;
}
return objects;
}
private addItem(aChartItem: ActionChartItem) {
const item = aChartItem.getItem();
if (!item) {
return;
}
if (item.type === Item.WEAPON) {
this.weapons.push(aChartItem);
} else if (item.type === Item.SPECIAL) {
this.specialItems.push(aChartItem);
} else if (item.type === Item.OBJECT) {
this.backpackItems.push(aChartItem);
}
}
public
|
(items: ActionChartItem[]) {
for (const item of items) {
this.addItem(item.clone());
}
}
/**
* Append to this inventory state other state
* @param s2 The state to append to this
*/
public addInventoryToThis(s2: InventoryState) {
this.weapons = this.weapons.concat(s2.weapons);
this.hasBackpack = this.hasBackpack || s2.hasBackpack;
this.backpackItems = this.backpackItems.concat(s2.backpackItems);
this.specialItems = this.specialItems.concat(s2.specialItems);
this.beltPouch = this.beltPouch + s2.beltPouch;
this.arrows = this.arrows + s2.arrows;
this.meals = this.meals + s2.meals;
}
/**
* Get special items on this state that are weapon, remove them from the state, and return them
* @returns Special items on state that they were weapons
*/
public getAndRemoveSpecialItemsNonWeapon(): ActionChartItem[] {
// Recover only non-weapon special items
const toRecover: ActionChartItem[] = [];
for (const aChartItem of this.specialItems) {
const i = aChartItem.getItem();
if (i && !i.isWeapon()) {
toRecover.push(aChartItem);
}
}
// Remove recovered items
for (const aChartItem of toRecover) {
this.specialItems.removeValue(aChartItem);
}
return toRecover;
}
/**
* Create a inventory state from an object
* @param object The inventory state object. Must to have same properties than InventoryState
*/
public static fromObject(object: any): InventoryState {
if (!object) {
return new InventoryState();
}
const inventoryState: InventoryState = $.extend(new InventoryState(), object);
// Convert objects to ActionChartItem:
inventoryState.weapons = ActionChartItem.fromObjectsArray(inventoryState.weapons);
inventoryState.backpackItems = ActionChartItem.fromObjectsArray(inventoryState.backpackItems);
inventoryState.specialItems = ActionChartItem.fromObjectsArray(inventoryState.specialItems);
return inventoryState;
}
/** Return a plain object with this instance info. */
public toObject(): any {
return {
weapons: this.weapons,
hasBackpack: this.hasBackpack,
backpackItems: this.backpackItems,
specialItems: this.specialItems,
beltPouch: this.beltPouch,
arrows: this.arrows,
meals: this.meals
};
}
}
|
addItemsArray
|
identifier_name
|
inventoryState.ts
|
import { ActionChartItem, ActionChart, Item } from "..";
import { mechanicsEngine } from "../controller/mechanics/mechanicsEngine";
/**
* Inventory state at one point
*/
export class InventoryState {
public weapons: ActionChartItem[] = [];
public hasBackpack: boolean = false;
public backpackItems: ActionChartItem[] = [];
public specialItems: ActionChartItem[] = [];
public beltPouch: number = 0;
public arrows: number = 0;
public meals: number = 0;
/**
* Create a inventory state with the current inventory state
* @param objectTypes Kind of objects to get: 'all' = all, 'weaponlike' = weapons and weapon Special Objects,
* 'allobjects' = weapons, special items and backpack items
*/
public static fromActionChart(objectTypes: string, actionChart: ActionChart): InventoryState {
const objects = new InventoryState();
if (objectTypes === "all" || objectTypes === "allobjects") {
objects.weapons = actionChart.weapons.deepClone();
objects.backpackItems = actionChart.backpackItems.deepClone();
objects.specialItems = actionChart.specialItems.deepClone();
objects.arrows = actionChart.arrows;
objects.meals = actionChart.meals;
if (objectTypes === "all") {
objects.hasBackpack = actionChart.hasBackpack;
objects.beltPouch = actionChart.beltPouch;
}
} else if (objectTypes === "weaponlike") {
for (const w of actionChart.getWeaponAChartItems(false)) {
objects.addItem(w.clone());
}
} else {
const msg = "Wrong objectTypes: " + objectTypes;
mechanicsEngine.debugWarning(msg);
throw msg;
}
return objects;
}
private addItem(aChartItem: ActionChartItem) {
const item = aChartItem.getItem();
if (!item) {
return;
}
if (item.type === Item.WEAPON) {
this.weapons.push(aChartItem);
} else if (item.type === Item.SPECIAL) {
this.specialItems.push(aChartItem);
} else if (item.type === Item.OBJECT) {
this.backpackItems.push(aChartItem);
}
}
public addItemsArray(items: ActionChartItem[]) {
for (const item of items) {
this.addItem(item.clone());
}
}
/**
* Append to this inventory state other state
* @param s2 The state to append to this
*/
public addInventoryToThis(s2: InventoryState) {
this.weapons = this.weapons.concat(s2.weapons);
this.hasBackpack = this.hasBackpack || s2.hasBackpack;
this.backpackItems = this.backpackItems.concat(s2.backpackItems);
this.specialItems = this.specialItems.concat(s2.specialItems);
this.beltPouch = this.beltPouch + s2.beltPouch;
this.arrows = this.arrows + s2.arrows;
this.meals = this.meals + s2.meals;
}
/**
* Get special items on this state that are weapon, remove them from the state, and return them
* @returns Special items on state that they were weapons
*/
public getAndRemoveSpecialItemsNonWeapon(): ActionChartItem[] {
// Recover only non-weapon special items
const toRecover: ActionChartItem[] = [];
for (const aChartItem of this.specialItems) {
const i = aChartItem.getItem();
if (i && !i.isWeapon()) {
toRecover.push(aChartItem);
}
}
// Remove recovered items
for (const aChartItem of toRecover) {
this.specialItems.removeValue(aChartItem);
}
return toRecover;
}
/**
* Create a inventory state from an object
* @param object The inventory state object. Must to have same properties than InventoryState
*/
public static fromObject(object: any): InventoryState {
if (!object) {
return new InventoryState();
}
const inventoryState: InventoryState = $.extend(new InventoryState(), object);
// Convert objects to ActionChartItem:
inventoryState.weapons = ActionChartItem.fromObjectsArray(inventoryState.weapons);
inventoryState.backpackItems = ActionChartItem.fromObjectsArray(inventoryState.backpackItems);
inventoryState.specialItems = ActionChartItem.fromObjectsArray(inventoryState.specialItems);
return inventoryState;
}
/** Return a plain object with this instance info. */
public toObject(): any
|
}
|
{
return {
weapons: this.weapons,
hasBackpack: this.hasBackpack,
backpackItems: this.backpackItems,
specialItems: this.specialItems,
beltPouch: this.beltPouch,
arrows: this.arrows,
meals: this.meals
};
}
|
identifier_body
|
inventoryState.ts
|
import { ActionChartItem, ActionChart, Item } from "..";
import { mechanicsEngine } from "../controller/mechanics/mechanicsEngine";
/**
* Inventory state at one point
*/
export class InventoryState {
public weapons: ActionChartItem[] = [];
public hasBackpack: boolean = false;
public backpackItems: ActionChartItem[] = [];
public specialItems: ActionChartItem[] = [];
public beltPouch: number = 0;
public arrows: number = 0;
public meals: number = 0;
/**
* Create a inventory state with the current inventory state
* @param objectTypes Kind of objects to get: 'all' = all, 'weaponlike' = weapons and weapon Special Objects,
* 'allobjects' = weapons, special items and backpack items
*/
public static fromActionChart(objectTypes: string, actionChart: ActionChart): InventoryState {
const objects = new InventoryState();
if (objectTypes === "all" || objectTypes === "allobjects") {
objects.weapons = actionChart.weapons.deepClone();
objects.backpackItems = actionChart.backpackItems.deepClone();
objects.specialItems = actionChart.specialItems.deepClone();
objects.arrows = actionChart.arrows;
objects.meals = actionChart.meals;
if (objectTypes === "all") {
objects.hasBackpack = actionChart.hasBackpack;
objects.beltPouch = actionChart.beltPouch;
}
} else if (objectTypes === "weaponlike") {
for (const w of actionChart.getWeaponAChartItems(false)) {
objects.addItem(w.clone());
}
} else {
const msg = "Wrong objectTypes: " + objectTypes;
mechanicsEngine.debugWarning(msg);
throw msg;
}
return objects;
}
private addItem(aChartItem: ActionChartItem) {
const item = aChartItem.getItem();
if (!item) {
return;
}
if (item.type === Item.WEAPON) {
this.weapons.push(aChartItem);
} else if (item.type === Item.SPECIAL) {
this.specialItems.push(aChartItem);
} else if (item.type === Item.OBJECT) {
this.backpackItems.push(aChartItem);
}
}
public addItemsArray(items: ActionChartItem[]) {
for (const item of items) {
this.addItem(item.clone());
}
}
/**
* Append to this inventory state other state
* @param s2 The state to append to this
*/
public addInventoryToThis(s2: InventoryState) {
this.weapons = this.weapons.concat(s2.weapons);
this.hasBackpack = this.hasBackpack || s2.hasBackpack;
this.backpackItems = this.backpackItems.concat(s2.backpackItems);
this.specialItems = this.specialItems.concat(s2.specialItems);
this.beltPouch = this.beltPouch + s2.beltPouch;
this.arrows = this.arrows + s2.arrows;
this.meals = this.meals + s2.meals;
}
/**
* Get special items on this state that are weapon, remove them from the state, and return them
* @returns Special items on state that they were weapons
*/
public getAndRemoveSpecialItemsNonWeapon(): ActionChartItem[] {
// Recover only non-weapon special items
const toRecover: ActionChartItem[] = [];
for (const aChartItem of this.specialItems) {
const i = aChartItem.getItem();
if (i && !i.isWeapon()) {
toRecover.push(aChartItem);
}
}
// Remove recovered items
for (const aChartItem of toRecover) {
this.specialItems.removeValue(aChartItem);
}
|
return toRecover;
}
/**
* Create a inventory state from an object
* @param object The inventory state object. Must to have same properties than InventoryState
*/
public static fromObject(object: any): InventoryState {
if (!object) {
return new InventoryState();
}
const inventoryState: InventoryState = $.extend(new InventoryState(), object);
// Convert objects to ActionChartItem:
inventoryState.weapons = ActionChartItem.fromObjectsArray(inventoryState.weapons);
inventoryState.backpackItems = ActionChartItem.fromObjectsArray(inventoryState.backpackItems);
inventoryState.specialItems = ActionChartItem.fromObjectsArray(inventoryState.specialItems);
return inventoryState;
}
/** Return a plain object with this instance info. */
public toObject(): any {
return {
weapons: this.weapons,
hasBackpack: this.hasBackpack,
backpackItems: this.backpackItems,
specialItems: this.specialItems,
beltPouch: this.beltPouch,
arrows: this.arrows,
meals: this.meals
};
}
}
|
random_line_split
|
|
users.rs
|
#![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32
|
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
|
{
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
|
identifier_body
|
users.rs
|
#![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn
|
(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
|
uumain
|
identifier_name
|
users.rs
|
#![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null()
|
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
|
{
break;
}
|
conditional_block
|
users.rs
|
#![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
|
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
|
println!("{} {}", NAME, VERSION);
|
random_line_split
|
hello_triangle.rs
|
extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn
|
() {
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS))
.set_indices(&INDICES)
.build()
.unwrap();
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh.
let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
}
|
main
|
identifier_name
|
hello_triangle.rs
|
extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn main() {
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS))
|
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh.
let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
}
|
.set_indices(&INDICES)
.build()
.unwrap();
|
random_line_split
|
hello_triangle.rs
|
extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn main()
|
{
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS))
.set_indices(&INDICES)
.build()
.unwrap();
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh.
let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
}
|
identifier_body
|
|
TestRe.py
|
# -*- coding: utf-8 -*-
import re
test = '用户输入的字符串'
if re.match(r'用户', test):
print('ok')
else:
|
iled')
print('a b c'.split(' '))
print(re.split(r'\s*', 'a b c'))
print(re.split(r'[\s\,\;]+', 'a,b;; c d'))
m = re.match(r'^(\d{3})-(\d{3,8})$', '010-12345')
print(m.group(1))
m = re.match(r'^(\S+)@(\S+.com)$', '[email protected]')
print(m.group(2))
print(m.groups())
# <Tom Paris> tom@voyager .org
re_mail = re.compile(r'<(\S+)\s+(\S+)>\s+(\S+)@(\S+.org)')
print(re_mail.match('<Tom Paris> [email protected]').groups())
str = 'abcbacba'
# non-greed match
re = re.compile(r'a.*?a', re.S)
print(re.match(str).group())
|
print('fa
|
conditional_block
|
TestRe.py
|
# -*- coding: utf-8 -*-
import re
test = '用户输入的字符串'
if re.match(r'用户', test):
print('ok')
else:
print('failed')
print('a b c'.split(' '))
print(re.split(r'\s*', 'a b c'))
print(re.split(r'[\s\,\;]+', 'a,b;; c d'))
m = re.match(r'^(\d{3})-(\d{3,8})$', '010-12345')
print(m.group(1))
m = re.match(r'^(\S+)@(\S+.com)$', '[email protected]')
print(m.group(2))
print(m.groups())
# <Tom Paris> tom@voyager .org
re_mail = re.compile(r'<(\S+)\s+(\S+)>\s+(\S+)@(\S+.org)')
|
str = 'abcbacba'
# non-greed match
re = re.compile(r'a.*?a', re.S)
print(re.match(str).group())
|
print(re_mail.match('<Tom Paris> [email protected]').groups())
|
random_line_split
|
xml.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl
|
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
|
{
&self.inner.sink.sink.base_url
}
|
identifier_body
|
xml.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take()
|
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
|
{
return Err(script);
}
|
conditional_block
|
xml.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
|
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
|
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
|
random_line_split
|
xml.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn
|
(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
|
trace
|
identifier_name
|
SqliteCustomFunctions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
if __name__ == '__main__':
sys.path.append('../../')
import json
import logging
import sqlite3
from gchelpers.ip.GeoDbManager import GeoDbManager
from gchelpers.dt import DateTimeHandler
GEO_MANAGER = GeoDbManager()
def splitpath(path, n):
path_array = re.split('[\\\/]',path)
start_index = -(n+1)
# Check that path has enough elements
if abs(start_index) > len(path_array):
new_path = os.path.join(path_array[0],*path_array[1:])
else:
new_path = os.path.join(path_array[start_index],*path_array[start_index+1:])
return new_path
def RegisterSQLiteFunctions(dbh):
sqlite3.enable_callback_tracebacks(True)
dbh.create_function("REGEXP", 2, Regexp)
dbh.create_function('Basename',1,Basename)
dbh.create_function('BasenameN',2,BasenameN)
dbh.create_function("GetRegMatch", 3, GetRegMatch)
dbh.create_function("GetRegMatchArray", 3, GetRegMatchArray)
dbh.create_function("RemoveNewLines", 1, RemoveNewLines)
dbh.create_function("DtFormat", 2, DtFormat)
dbh.create_function("DtFormatTz", 4, DtFormatTz)
if GEO_MANAGER.DB_ATTACHED:
dbh.create_function("GetIpInfo", 1, GetIpInfo)
def DtFormatTz(dtstringin,newformat,current_tz_str,new_tz_str):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Timezone Conversion
new_datetime_obj = DateTimeHandler.ConvertDatetimeTz(
datetime_obj,
current_tz_str,
new_tz_str
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
new_datetime_obj,
newformat
)
return string_out
return None
def DtFormat(dtstringin,newformat):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
datetime_obj,
newformat
)
return string_out
return None
def Regexp(pattern,input):
if input is None:
return False
try:
if re.search(pattern, input):
return True
else:
return False
except Exception as error:
print(u'ERROR: {}'.format(str(error)))
return False
def Basename(fullname):
'''Get the base name of a fullname string'''
value = ''
if fullname:
try:
value = os.path.basename(fullname)
except:
value = filename
return value
def BasenameN(fullname,n):
'''Get the base name of a fullname string'''
value = ''
if fullname is None:
return None
value = splitpath(fullname,n)
return value
def GetIpInfo(ip_address):
if ip_address is None:
return None
geo = GEO_MANAGER
info = geo.GetIpInfo(ip_address)
return json.dumps(info)
def RemoveNewLines(input):
|
def GetRegMatch(input,group,pattern):
if input is None:
return None
match = re.search(pattern, input)
result = None
if match:
result = match.group(group)
return result
def GetRegMatchArray(input,group,pattern):
hits = []
if input is None:
return json.dumps(hits)
for result in re.finditer(pattern, input):
hits.append(result.group(group))
if len(hits) > 0:
return json.dumps(hits)
return json.dumps(hits)
def test1():
n = 2
fullname = "Partition 1\\TEST_P1 [NTFS]\\[root]\\testfolder002\\testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
def test2():
n = 2
fullname = "testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
if __name__ == '__main__':
test1()
test2()
|
if input is None:
return None
input = input.replace("\n", "")
input = input.replace("\r", "")
return input
|
identifier_body
|
SqliteCustomFunctions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
if __name__ == '__main__':
sys.path.append('../../')
import json
import logging
import sqlite3
from gchelpers.ip.GeoDbManager import GeoDbManager
from gchelpers.dt import DateTimeHandler
GEO_MANAGER = GeoDbManager()
def splitpath(path, n):
path_array = re.split('[\\\/]',path)
start_index = -(n+1)
# Check that path has enough elements
if abs(start_index) > len(path_array):
new_path = os.path.join(path_array[0],*path_array[1:])
else:
new_path = os.path.join(path_array[start_index],*path_array[start_index+1:])
return new_path
def RegisterSQLiteFunctions(dbh):
sqlite3.enable_callback_tracebacks(True)
dbh.create_function("REGEXP", 2, Regexp)
dbh.create_function('Basename',1,Basename)
dbh.create_function('BasenameN',2,BasenameN)
dbh.create_function("GetRegMatch", 3, GetRegMatch)
dbh.create_function("GetRegMatchArray", 3, GetRegMatchArray)
dbh.create_function("RemoveNewLines", 1, RemoveNewLines)
dbh.create_function("DtFormat", 2, DtFormat)
dbh.create_function("DtFormatTz", 4, DtFormatTz)
if GEO_MANAGER.DB_ATTACHED:
dbh.create_function("GetIpInfo", 1, GetIpInfo)
def DtFormatTz(dtstringin,newformat,current_tz_str,new_tz_str):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Timezone Conversion
new_datetime_obj = DateTimeHandler.ConvertDatetimeTz(
datetime_obj,
current_tz_str,
new_tz_str
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
new_datetime_obj,
newformat
)
return string_out
return None
def DtFormat(dtstringin,newformat):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
datetime_obj,
newformat
)
return string_out
return None
def Regexp(pattern,input):
if input is None:
return False
try:
if re.search(pattern, input):
return True
else:
return False
except Exception as error:
print(u'ERROR: {}'.format(str(error)))
return False
def Basename(fullname):
'''Get the base name of a fullname string'''
value = ''
if fullname:
try:
value = os.path.basename(fullname)
except:
value = filename
return value
def BasenameN(fullname,n):
'''Get the base name of a fullname string'''
value = ''
if fullname is None:
return None
value = splitpath(fullname,n)
return value
def GetIpInfo(ip_address):
if ip_address is None:
return None
geo = GEO_MANAGER
info = geo.GetIpInfo(ip_address)
return json.dumps(info)
def RemoveNewLines(input):
if input is None:
return None
input = input.replace("\n", "")
input = input.replace("\r", "")
return input
def GetRegMatch(input,group,pattern):
if input is None:
return None
match = re.search(pattern, input)
result = None
if match:
result = match.group(group)
return result
def
|
(input,group,pattern):
hits = []
if input is None:
return json.dumps(hits)
for result in re.finditer(pattern, input):
hits.append(result.group(group))
if len(hits) > 0:
return json.dumps(hits)
return json.dumps(hits)
def test1():
n = 2
fullname = "Partition 1\\TEST_P1 [NTFS]\\[root]\\testfolder002\\testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
def test2():
n = 2
fullname = "testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
if __name__ == '__main__':
test1()
test2()
|
GetRegMatchArray
|
identifier_name
|
SqliteCustomFunctions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
if __name__ == '__main__':
sys.path.append('../../')
import json
import logging
import sqlite3
from gchelpers.ip.GeoDbManager import GeoDbManager
from gchelpers.dt import DateTimeHandler
GEO_MANAGER = GeoDbManager()
def splitpath(path, n):
path_array = re.split('[\\\/]',path)
start_index = -(n+1)
# Check that path has enough elements
if abs(start_index) > len(path_array):
new_path = os.path.join(path_array[0],*path_array[1:])
else:
new_path = os.path.join(path_array[start_index],*path_array[start_index+1:])
return new_path
def RegisterSQLiteFunctions(dbh):
sqlite3.enable_callback_tracebacks(True)
dbh.create_function("REGEXP", 2, Regexp)
dbh.create_function('Basename',1,Basename)
dbh.create_function('BasenameN',2,BasenameN)
dbh.create_function("GetRegMatch", 3, GetRegMatch)
dbh.create_function("GetRegMatchArray", 3, GetRegMatchArray)
dbh.create_function("RemoveNewLines", 1, RemoveNewLines)
dbh.create_function("DtFormat", 2, DtFormat)
dbh.create_function("DtFormatTz", 4, DtFormatTz)
if GEO_MANAGER.DB_ATTACHED:
dbh.create_function("GetIpInfo", 1, GetIpInfo)
def DtFormatTz(dtstringin,newformat,current_tz_str,new_tz_str):
if dtstringin:
|
return None
def DtFormat(dtstringin,newformat):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
datetime_obj,
newformat
)
return string_out
return None
def Regexp(pattern,input):
if input is None:
return False
try:
if re.search(pattern, input):
return True
else:
return False
except Exception as error:
print(u'ERROR: {}'.format(str(error)))
return False
def Basename(fullname):
'''Get the base name of a fullname string'''
value = ''
if fullname:
try:
value = os.path.basename(fullname)
except:
value = filename
return value
def BasenameN(fullname,n):
'''Get the base name of a fullname string'''
value = ''
if fullname is None:
return None
value = splitpath(fullname,n)
return value
def GetIpInfo(ip_address):
if ip_address is None:
return None
geo = GEO_MANAGER
info = geo.GetIpInfo(ip_address)
return json.dumps(info)
def RemoveNewLines(input):
if input is None:
return None
input = input.replace("\n", "")
input = input.replace("\r", "")
return input
def GetRegMatch(input,group,pattern):
if input is None:
return None
match = re.search(pattern, input)
result = None
if match:
result = match.group(group)
return result
def GetRegMatchArray(input,group,pattern):
hits = []
if input is None:
return json.dumps(hits)
for result in re.finditer(pattern, input):
hits.append(result.group(group))
if len(hits) > 0:
return json.dumps(hits)
return json.dumps(hits)
def test1():
n = 2
fullname = "Partition 1\\TEST_P1 [NTFS]\\[root]\\testfolder002\\testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
def test2():
n = 2
fullname = "testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
if __name__ == '__main__':
test1()
test2()
|
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Timezone Conversion
new_datetime_obj = DateTimeHandler.ConvertDatetimeTz(
datetime_obj,
current_tz_str,
new_tz_str
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
new_datetime_obj,
newformat
)
return string_out
|
conditional_block
|
SqliteCustomFunctions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
if __name__ == '__main__':
sys.path.append('../../')
import json
import logging
import sqlite3
from gchelpers.ip.GeoDbManager import GeoDbManager
from gchelpers.dt import DateTimeHandler
GEO_MANAGER = GeoDbManager()
def splitpath(path, n):
path_array = re.split('[\\\/]',path)
start_index = -(n+1)
# Check that path has enough elements
if abs(start_index) > len(path_array):
new_path = os.path.join(path_array[0],*path_array[1:])
else:
new_path = os.path.join(path_array[start_index],*path_array[start_index+1:])
return new_path
def RegisterSQLiteFunctions(dbh):
sqlite3.enable_callback_tracebacks(True)
dbh.create_function("REGEXP", 2, Regexp)
dbh.create_function('Basename',1,Basename)
dbh.create_function('BasenameN',2,BasenameN)
dbh.create_function("GetRegMatch", 3, GetRegMatch)
dbh.create_function("GetRegMatchArray", 3, GetRegMatchArray)
dbh.create_function("RemoveNewLines", 1, RemoveNewLines)
dbh.create_function("DtFormat", 2, DtFormat)
dbh.create_function("DtFormatTz", 4, DtFormatTz)
if GEO_MANAGER.DB_ATTACHED:
dbh.create_function("GetIpInfo", 1, GetIpInfo)
def DtFormatTz(dtstringin,newformat,current_tz_str,new_tz_str):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Timezone Conversion
new_datetime_obj = DateTimeHandler.ConvertDatetimeTz(
datetime_obj,
current_tz_str,
new_tz_str
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
new_datetime_obj,
newformat
)
return string_out
return None
def DtFormat(dtstringin,newformat):
if dtstringin:
string_out = None
# Get object from in string
datetime_obj = DateTimeHandler.DatetimeFromString(
dtstringin
)
# Format object
string_out = DateTimeHandler.StringFromDatetime(
datetime_obj,
newformat
)
return string_out
return None
def Regexp(pattern,input):
if input is None:
return False
try:
if re.search(pattern, input):
return True
else:
return False
except Exception as error:
print(u'ERROR: {}'.format(str(error)))
return False
def Basename(fullname):
'''Get the base name of a fullname string'''
value = ''
if fullname:
try:
value = os.path.basename(fullname)
except:
value = filename
return value
def BasenameN(fullname,n):
'''Get the base name of a fullname string'''
value = ''
if fullname is None:
return None
value = splitpath(fullname,n)
return value
def GetIpInfo(ip_address):
if ip_address is None:
return None
geo = GEO_MANAGER
info = geo.GetIpInfo(ip_address)
return json.dumps(info)
def RemoveNewLines(input):
if input is None:
return None
input = input.replace("\n", "")
input = input.replace("\r", "")
return input
def GetRegMatch(input,group,pattern):
if input is None:
return None
match = re.search(pattern, input)
result = None
if match:
result = match.group(group)
return result
def GetRegMatchArray(input,group,pattern):
hits = []
if input is None:
return json.dumps(hits)
for result in re.finditer(pattern, input):
hits.append(result.group(group))
if len(hits) > 0:
return json.dumps(hits)
return json.dumps(hits)
def test1():
n = 2
fullname = "Partition 1\\TEST_P1 [NTFS]\\[root]\\testfolder002\\testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
|
def test2():
n = 2
fullname = "testfolder001\\testfile088.png"
splitname = splitpath(fullname,n)
print splitname
if __name__ == '__main__':
test1()
test2()
|
print splitname
|
random_line_split
|
kana.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(start, end):
try:
yield unichr(val)
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
return codepoint_range(*value)
else:
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hira, hira)
return hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
i
|
ana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
|
s_kata = is_katak
|
identifier_name
|
kana.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(star
|
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
return codepoint_range(*value)
else:
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hira, hira)
return hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
|
t, end):
try:
yield unichr(val)
|
conditional_block
|
kana.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(start, end):
try:
yield unichr(val)
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
|
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hira, hira)
return hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
|
return codepoint_range(*value)
else:
|
random_line_split
|
kana.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(start, end):
try:
yield unichr(val)
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
return codepoint_range(*value)
else:
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hi
|
turn hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
|
ra, hira)
re
|
identifier_body
|
PlaceholderImage.js
|
import cx from 'clsx'
import PropTypes from 'prop-types'
import React from 'react'
import { customPropTypes, getElementType, getUnhandledProps, useKeyOnly } from '../../lib'
/**
* A placeholder can contain an image.
*/
function PlaceholderImage(props)
|
PlaceholderImage.propTypes = {
/** An element type to render as (string or function). */
as: PropTypes.elementType,
/** Additional classes. */
className: PropTypes.string,
/** An image can modify size correctly with responsive styles. */
square: customPropTypes.every([customPropTypes.disallow(['rectangular']), PropTypes.bool]),
/** An image can modify size correctly with responsive styles. */
rectangular: customPropTypes.every([customPropTypes.disallow(['square']), PropTypes.bool]),
}
export default PlaceholderImage
|
{
const { className, square, rectangular } = props
const classes = cx(
useKeyOnly(square, 'square'),
useKeyOnly(rectangular, 'rectangular'),
'image',
className,
)
const rest = getUnhandledProps(PlaceholderImage, props)
const ElementType = getElementType(PlaceholderImage, props)
return <ElementType {...rest} className={classes} />
}
|
identifier_body
|
PlaceholderImage.js
|
import cx from 'clsx'
import PropTypes from 'prop-types'
import React from 'react'
import { customPropTypes, getElementType, getUnhandledProps, useKeyOnly } from '../../lib'
/**
* A placeholder can contain an image.
*/
function
|
(props) {
const { className, square, rectangular } = props
const classes = cx(
useKeyOnly(square, 'square'),
useKeyOnly(rectangular, 'rectangular'),
'image',
className,
)
const rest = getUnhandledProps(PlaceholderImage, props)
const ElementType = getElementType(PlaceholderImage, props)
return <ElementType {...rest} className={classes} />
}
PlaceholderImage.propTypes = {
/** An element type to render as (string or function). */
as: PropTypes.elementType,
/** Additional classes. */
className: PropTypes.string,
/** An image can modify size correctly with responsive styles. */
square: customPropTypes.every([customPropTypes.disallow(['rectangular']), PropTypes.bool]),
/** An image can modify size correctly with responsive styles. */
rectangular: customPropTypes.every([customPropTypes.disallow(['square']), PropTypes.bool]),
}
export default PlaceholderImage
|
PlaceholderImage
|
identifier_name
|
PlaceholderImage.js
|
import cx from 'clsx'
import PropTypes from 'prop-types'
import React from 'react'
import { customPropTypes, getElementType, getUnhandledProps, useKeyOnly } from '../../lib'
/**
* A placeholder can contain an image.
*/
function PlaceholderImage(props) {
|
useKeyOnly(square, 'square'),
useKeyOnly(rectangular, 'rectangular'),
'image',
className,
)
const rest = getUnhandledProps(PlaceholderImage, props)
const ElementType = getElementType(PlaceholderImage, props)
return <ElementType {...rest} className={classes} />
}
PlaceholderImage.propTypes = {
/** An element type to render as (string or function). */
as: PropTypes.elementType,
/** Additional classes. */
className: PropTypes.string,
/** An image can modify size correctly with responsive styles. */
square: customPropTypes.every([customPropTypes.disallow(['rectangular']), PropTypes.bool]),
/** An image can modify size correctly with responsive styles. */
rectangular: customPropTypes.every([customPropTypes.disallow(['square']), PropTypes.bool]),
}
export default PlaceholderImage
|
const { className, square, rectangular } = props
const classes = cx(
|
random_line_split
|
subprocess_py27.py
|
# This file is part of the Python 2.7 module subprocess.py, included here
# for compatibility with Python 2.6.
#
# It is still under the original, very open PSF license, see the original
# copyright message included below.
#
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
from subprocess import Popen, CalledProcessError, PIPE
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
|
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
|
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
|
random_line_split
|
subprocess_py27.py
|
# This file is part of the Python 2.7 module subprocess.py, included here
# for compatibility with Python 2.6.
#
# It is still under the original, very open PSF license, see the original
# copyright message included below.
#
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
from subprocess import Popen, CalledProcessError, PIPE
def
|
(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
|
check_output
|
identifier_name
|
subprocess_py27.py
|
# This file is part of the Python 2.7 module subprocess.py, included here
# for compatibility with Python 2.6.
#
# It is still under the original, very open PSF license, see the original
# copyright message included below.
#
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
from subprocess import Popen, CalledProcessError, PIPE
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
|
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
|
raise ValueError('stdout argument not allowed, it will be overridden.')
|
conditional_block
|
subprocess_py27.py
|
# This file is part of the Python 2.7 module subprocess.py, included here
# for compatibility with Python 2.6.
#
# It is still under the original, very open PSF license, see the original
# copyright message included below.
#
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
from subprocess import Popen, CalledProcessError, PIPE
def check_output(*popenargs, **kwargs):
|
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
|
identifier_body
|
|
texts.js
|
CS.Controllers.Texts = [
{
type: "workbook-area-description",
workbookAreaClassName: "Strengths",
htmlText: "<p>Vad utmärker dig som person? Vilka styrkor använder du för att få jobbet gjort? Från vilket perspektiv ser du på tillvaron? </p><p>I det här avsnittet av Tracks kommer du att besvara frågor om vad som är utmärkande för hur du gör ditt jobb. </p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Drivers",
htmlText: "<p>När du presenterar dig är det bra att berätta något om vad som driver dig att göra det du gör och vilka dina motivationskrafter är. Det gör bilden av dig mer levande och det blir lättare att lägga dig på minnet. Dessutom visar det att du har självinsikt och vet vad du söker och vad du vill ha.</p><p>I det här avsnittet av Tracks kommer du att besvara frågor om dina motivationskrafter.</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Contexts",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Workplace",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Achievements",
htmlText: "<p>Först lär man sig gå och sen lär man sig springa. Vad du har åstadkommit är en plattform för vad du kan åstadkomma i framtiden.</p><p>I det här avsnittet fokuserar vi på vad du åstadkommit, vilken skillnad du gjort och vad som blivit bättre medan du haft ditt senaste jobb.</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Coworkers",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Culture",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Expertise",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Leadership",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Lesses",
htmlText: "<p>TODO</p>"
|
type: "workbook-area-description",
workbookAreaClassName: "ManagementStyle",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Mores",
htmlText: "<p>Ett av de enklaste och mest effektiva sätten att utvecklas mot ett mer meningsfullt yrkesliv är att aktivt göra mer av det man gillar och mindre av det man inte gillar. Ett första steg i den riktningen är att identifiera vad det är man gärna vill göra mer av.</p><p>I det här avsnittet av Tracks kommer du att identifiera vad du vill göra mer av i ditt nuvarande jobb och när du tar dig an nästa utmaning.</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Employers",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "PhaseAndSize",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Projects",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Roles",
htmlText: "<p>TODO</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "ToolsAndMethods",
htmlText: "<p>De verktyg och metoder vi använder i jobbet blir ofta en del av vår yrkesidentitet. Man kan ha starka åsikter om vilka verktyg som fungerar bäst och vilka som fungerar mindre bra. Eller om vilka metoder man helst använder och vilka man försöker undvika.</p><p>I det här avsnittet av Tracks kommer du att lista verktyg och metoder som är viktiga i ditt jobb.</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Tracks",
htmlText: "<p>Det här området av Tracks handlar om vilka olika vägar du skulle kunna ta; vilka spår du skulle kunna följa.</p><p>Fundera på olika spår du skulle kunna tänka dig att prova någon gång i livet. Ett spår att följa kan vara alltifrån en roll du gärna skulle vilja ha, till en bransch du tycker verkar spännande eller en hobby du vill plocka upp och göra något mer av.</p>"
},
{
type: "workbook-area-description",
workbookAreaClassName: "Values",
htmlText: "<p>TODO</p>"
}
];
|
},
{
|
random_line_split
|
method_info.rs
|
use super::Attributes;
#[derive(Debug)]
pub struct
|
{
pub access_flags: MethodAccessFlags,
pub name_index: u16,
pub descriptor_index: u16,
pub attrs: Attributes,
}
bitflags! {
pub flags MethodAccessFlags: u16 {
const METHOD_ACC_PUBLIC = 0x0001,
const METHOD_ACC_PRIVATE = 0x0002,
const METHOD_ACC_PROTECTED = 0x0004,
const METHOD_ACC_STATIC = 0x0008,
const METHOD_ACC_FINAL = 0x0010,
const METHOD_ACC_SYNCHRONIZED = 0x0020,
const METHOD_ACC_BRIDGE = 0x0040,
const METHOD_ACC_VARARGS = 0x0080,
const METHOD_ACC_NATIVE = 0x0100,
const METHOD_ACC_ABSTRACT = 0x0400,
const METHOD_ACC_STRICT = 0x0800,
const METHOD_ACC_SYNTHETIC = 0x1000
}
}
impl MethodAccessFlags {
pub fn is_public(&self) -> bool {
self.contains(METHOD_ACC_PUBLIC)
}
pub fn is_private(&self) -> bool {
self.contains(METHOD_ACC_PRIVATE)
}
pub fn is_protected(&self) -> bool {
self.contains(METHOD_ACC_PROTECTED)
}
pub fn is_static(&self) -> bool {
self.contains(METHOD_ACC_STATIC)
}
pub fn is_final(&self) -> bool {
self.contains(METHOD_ACC_FINAL)
}
pub fn is_synchronized(&self) -> bool {
self.contains(METHOD_ACC_SYNCHRONIZED)
}
pub fn is_bridge(&self) -> bool {
self.contains(METHOD_ACC_BRIDGE)
}
pub fn is_varargs(&self) -> bool {
self.contains(METHOD_ACC_VARARGS)
}
pub fn is_native(&self) -> bool {
self.contains(METHOD_ACC_NATIVE)
}
pub fn is_abstract(&self) -> bool {
self.contains(METHOD_ACC_ABSTRACT)
}
pub fn is_strict(&self) -> bool {
self.contains(METHOD_ACC_STRICT)
}
pub fn is_synthetic(&self) -> bool {
self.contains(METHOD_ACC_SYNTHETIC)
}
}
|
MethodInfo
|
identifier_name
|
method_info.rs
|
use super::Attributes;
#[derive(Debug)]
pub struct MethodInfo {
pub access_flags: MethodAccessFlags,
pub name_index: u16,
pub descriptor_index: u16,
pub attrs: Attributes,
}
bitflags! {
pub flags MethodAccessFlags: u16 {
const METHOD_ACC_PUBLIC = 0x0001,
const METHOD_ACC_PRIVATE = 0x0002,
const METHOD_ACC_PROTECTED = 0x0004,
const METHOD_ACC_STATIC = 0x0008,
const METHOD_ACC_FINAL = 0x0010,
const METHOD_ACC_SYNCHRONIZED = 0x0020,
const METHOD_ACC_BRIDGE = 0x0040,
const METHOD_ACC_VARARGS = 0x0080,
const METHOD_ACC_NATIVE = 0x0100,
const METHOD_ACC_ABSTRACT = 0x0400,
const METHOD_ACC_STRICT = 0x0800,
const METHOD_ACC_SYNTHETIC = 0x1000
}
}
impl MethodAccessFlags {
pub fn is_public(&self) -> bool {
self.contains(METHOD_ACC_PUBLIC)
}
pub fn is_private(&self) -> bool {
self.contains(METHOD_ACC_PRIVATE)
}
pub fn is_protected(&self) -> bool {
self.contains(METHOD_ACC_PROTECTED)
}
pub fn is_static(&self) -> bool {
self.contains(METHOD_ACC_STATIC)
}
pub fn is_final(&self) -> bool {
self.contains(METHOD_ACC_FINAL)
}
pub fn is_synchronized(&self) -> bool {
self.contains(METHOD_ACC_SYNCHRONIZED)
|
}
pub fn is_bridge(&self) -> bool {
self.contains(METHOD_ACC_BRIDGE)
}
pub fn is_varargs(&self) -> bool {
self.contains(METHOD_ACC_VARARGS)
}
pub fn is_native(&self) -> bool {
self.contains(METHOD_ACC_NATIVE)
}
pub fn is_abstract(&self) -> bool {
self.contains(METHOD_ACC_ABSTRACT)
}
pub fn is_strict(&self) -> bool {
self.contains(METHOD_ACC_STRICT)
}
pub fn is_synthetic(&self) -> bool {
self.contains(METHOD_ACC_SYNTHETIC)
}
}
|
random_line_split
|
|
test_level_types.py
|
from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import APITestCase, SerializationMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.models import LevelType
from course_discovery.apps.course_metadata.tests.factories import LevelTypeFactory
class LevelTypeViewSetTests(SerializationMixin, APITestCase):
list_path = reverse('api:v1:level_type-list')
|
def test_authentication(self):
""" Verify the endpoint requires the user to be authenticated. """
response = self.client.get(self.list_path)
assert response.status_code == 200
self.client.logout()
response = self.client.get(self.list_path)
assert response.status_code == 401
def test_list(self):
""" Verify the endpoint returns a list of all program types. """
LevelTypeFactory.create_batch(4)
expected = LevelType.objects.all()
with self.assertNumQueries(6):
response = self.client.get(self.list_path)
assert response.status_code == 200
assert response.data['results'] == self.serialize_level_type(expected, many=True)
def test_retrieve(self):
""" The request should return details for a single level type. """
level_type = LevelTypeFactory()
level_type.set_current_language('en')
level_type.name_t = level_type.name
level_type.save()
url = reverse('api:v1:level_type-detail', kwargs={'name': level_type.name})
print(level_type.__dict__)
with self.assertNumQueries(5):
response = self.client.get(url)
assert response.status_code == 200
assert response.data == self.serialize_level_type(level_type)
|
def setUp(self):
super().setUp()
self.user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=self.user.username, password=USER_PASSWORD)
|
random_line_split
|
test_level_types.py
|
from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import APITestCase, SerializationMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.models import LevelType
from course_discovery.apps.course_metadata.tests.factories import LevelTypeFactory
class LevelTypeViewSetTests(SerializationMixin, APITestCase):
list_path = reverse('api:v1:level_type-list')
def setUp(self):
super().setUp()
self.user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=self.user.username, password=USER_PASSWORD)
def test_authentication(self):
""" Verify the endpoint requires the user to be authenticated. """
response = self.client.get(self.list_path)
assert response.status_code == 200
self.client.logout()
response = self.client.get(self.list_path)
assert response.status_code == 401
def test_list(self):
""" Verify the endpoint returns a list of all program types. """
LevelTypeFactory.create_batch(4)
expected = LevelType.objects.all()
with self.assertNumQueries(6):
response = self.client.get(self.list_path)
assert response.status_code == 200
assert response.data['results'] == self.serialize_level_type(expected, many=True)
def
|
(self):
""" The request should return details for a single level type. """
level_type = LevelTypeFactory()
level_type.set_current_language('en')
level_type.name_t = level_type.name
level_type.save()
url = reverse('api:v1:level_type-detail', kwargs={'name': level_type.name})
print(level_type.__dict__)
with self.assertNumQueries(5):
response = self.client.get(url)
assert response.status_code == 200
assert response.data == self.serialize_level_type(level_type)
|
test_retrieve
|
identifier_name
|
test_level_types.py
|
from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import APITestCase, SerializationMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.models import LevelType
from course_discovery.apps.course_metadata.tests.factories import LevelTypeFactory
class LevelTypeViewSetTests(SerializationMixin, APITestCase):
list_path = reverse('api:v1:level_type-list')
def setUp(self):
|
def test_authentication(self):
""" Verify the endpoint requires the user to be authenticated. """
response = self.client.get(self.list_path)
assert response.status_code == 200
self.client.logout()
response = self.client.get(self.list_path)
assert response.status_code == 401
def test_list(self):
""" Verify the endpoint returns a list of all program types. """
LevelTypeFactory.create_batch(4)
expected = LevelType.objects.all()
with self.assertNumQueries(6):
response = self.client.get(self.list_path)
assert response.status_code == 200
assert response.data['results'] == self.serialize_level_type(expected, many=True)
def test_retrieve(self):
""" The request should return details for a single level type. """
level_type = LevelTypeFactory()
level_type.set_current_language('en')
level_type.name_t = level_type.name
level_type.save()
url = reverse('api:v1:level_type-detail', kwargs={'name': level_type.name})
print(level_type.__dict__)
with self.assertNumQueries(5):
response = self.client.get(url)
assert response.status_code == 200
assert response.data == self.serialize_level_type(level_type)
|
super().setUp()
self.user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=self.user.username, password=USER_PASSWORD)
|
identifier_body
|
kontrtube.py
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class KontrTubeIE(InfoExtractor):
|
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
duration = self._search_regex(
r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
if duration:
duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
view_count = self._search_regex(
r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
if view_count:
view_count = int_or_none(view_count.replace(' ', ''))
comment_count = int_or_none(self._search_regex(
r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
|
identifier_body
|
|
kontrtube.py
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class
|
(InfoExtractor):
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
duration = self._search_regex(
r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
if duration:
duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
view_count = self._search_regex(
r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
if view_count:
view_count = int_or_none(view_count.replace(' ', ''))
comment_count = int_or_none(self._search_regex(
r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
|
KontrTubeIE
|
identifier_name
|
kontrtube.py
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
|
parse_duration,
)
class KontrTubeIE(InfoExtractor):
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
duration = self._search_regex(
r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
if duration:
duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
view_count = self._search_regex(
r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
if view_count:
view_count = int_or_none(view_count.replace(' ', ''))
comment_count = int_or_none(self._search_regex(
r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
|
from ..utils import (
int_or_none,
|
random_line_split
|
kontrtube.py
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class KontrTubeIE(InfoExtractor):
IE_NAME = 'kontrtube'
IE_DESC = 'KontrTube.ru - Труба зовёт'
_VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/'
_TEST = {
'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
'md5': '975a991a4926c9a85f383a736a2e6b80',
'info_dict': {
'id': '2678',
'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag',
'ext': 'mp4',
'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
'duration': 270,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(
url, display_id, 'Downloading page')
video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
duration = self._search_regex(
r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
if duration:
duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
view_count = self._search_regex(
r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
if view_count:
view_count = int_or_none(view_count.replace(' ', ''))
comment_count = int_or
|
((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
}
|
_none(self._search_regex(
r'Комментарии \
|
conditional_block
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Text layout.
#![deny(unsafe_code)]
use app_units::Au;
use fragment::{Fragment, ScannedTextFragmentInfo, SpecificFragmentInfo, UnscannedTextFragmentInfo};
use gfx::font::{DISABLE_KERNING_SHAPING_FLAG, FontMetrics, IGNORE_LIGATURES_SHAPING_FLAG};
use gfx::font::{RTL_FLAG, RunMetrics, ShapingFlags, ShapingOptions};
use gfx::font_context::FontContext;
use gfx::text::glyph::CharIndex;
use gfx::text::text_run::TextRun;
use gfx::text::util::{self, CompressionMode};
use inline::InlineFragments;
use std::borrow::ToOwned;
use std::collections::LinkedList;
use std::mem;
use std::sync::Arc;
use style::computed_values::{line_height, text_orientation, text_rendering, text_transform};
use style::computed_values::{white_space};
use style::properties::ComputedValues;
use style::properties::style_structs::Font as FontStyle;
use unicode_bidi::{is_rtl, process_text};
use unicode_script::{get_script, Script};
use util::linked_list::split_off_head;
use util::logical_geometry::{LogicalSize, WritingMode};
use util::range::{Range, RangeIndex};
/// Returns the concatenated text of a list of unscanned text fragments.
fn text(fragments: &LinkedList<Fragment>) -> String {
// FIXME: Some of this work is later duplicated in split_first_fragment_at_newline_if_necessary
// and transform_text. This code should be refactored so that the all the scanning for
// newlines is done in a single pass.
let mut text = String::new();
for fragment in fragments {
if let SpecificFragmentInfo::UnscannedText(ref info) = fragment.specific {
if fragment.white_space().preserve_newlines() {
text.push_str(&info.text);
} else {
text.push_str(&info.text.replace("\n", " "));
}
}
}
text
}
/// A stack-allocated object for scanning an inline flow into `TextRun`-containing `TextFragment`s.
pub struct TextRunScanner {
pub clump: LinkedList<Fragment>,
}
impl TextRunScanner {
pub fn new() -> TextRunScanner {
TextRunScanner {
clump: LinkedList::new(),
}
}
pub fn scan_for_runs(&mut self,
font_context: &mut FontContext,
mut fragments: LinkedList<Fragment>)
-> InlineFragments {
debug!("TextRunScanner: scanning {} fragments for text runs...", fragments.len());
debug_assert!(!fragments.is_empty());
// Calculate bidi embedding levels, so we can split bidirectional fragments for reordering.
let text = text(&fragments);
let para_level = fragments.front().unwrap().style.writing_mode.to_bidi_level();
let bidi_info = process_text(&text, Some(para_level));
// Optimization: If all the text is LTR, don't bother splitting on bidi levels.
let bidi_levels = if bidi_info.levels.iter().cloned().any(is_rtl) {
Some(&bidi_info.levels[..])
} else {
None
};
// FIXME(pcwalton): We want to be sure not to allocate multiple times, since this is a
// performance-critical spot, but this may overestimate and allocate too much memory.
let mut new_fragments = Vec::with_capacity(fragments.len());
let mut last_whitespace = false;
let mut paragraph_bytes_processed = 0;
while !fragments.is_empty() {
// Create a clump.
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
while !fragments.is_empty() && self.clump
.back()
.unwrap()
.can_merge_with_fragment(fragments.front()
.unwrap()) {
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
}
// Flush that clump to the list of fragments we're building up.
last_whitespace = self.flush_clump_to_list(font_context,
&mut new_fragments,
&mut paragraph_bytes_processed,
bidi_levels,
last_whitespace);
}
debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => {
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
}
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script && !is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index != font_index ||
run_info.bidi_level != bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if !first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
unscanned_text_fragment_info.insertion_point = Some(insertion_point - offset);
}
Some(_) | None => {
insertion_point_before = unscanned_text_fragment_info.insertion_point;
unscanned_text_fragment_info.insertion_point = None;
}
};
}
first_fragment.transform(first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText(
|
};
fragments.push_front(new_fragment);
}
/// Information about a text run that we're about to create. This is used in `scan_for_runs`.
struct RunInfo {
/// The text that will go in this text run.
text: String,
/// The insertion point in this text run, if applicable.
insertion_point: Option<CharIndex>,
/// The index of the applicable font in the font group.
font_index: usize,
/// A cached copy of the number of Unicode characters in the text run.
character_length: usize,
/// The bidirection embedding level of this text run.
bidi_level: u8,
/// The Unicode script property of this text run.
script: Script,
}
impl RunInfo {
fn new() -> RunInfo {
RunInfo {
text: String::new(),
insertion_point: None,
font_index: 0,
character_length: 0,
bidi_level: 0,
script: Script::Common,
}
}
}
/// A mapping from a portion of an unscanned text fragment to the text run we're going to create
/// for it.
#[derive(Copy, Clone, Debug)]
struct RunMapping {
/// The range of characters within the text fragment.
char_range: Range<CharIndex>,
/// The range of byte indices within the text fragment.
byte_range: Range<usize>,
/// The index of the unscanned text fragment that this mapping corresponds to.
old_fragment_index: usize,
/// The index of the text run we're going to create.
text_run_index: usize,
}
impl RunMapping {
/// Given the current set of text runs, creates a run mapping for the next fragment.
/// `run_info_list` describes the set of runs we've seen already, and `current_run_info`
/// describes the run we just finished processing.
fn new(run_info_list: &[RunInfo], current_run_info: &RunInfo, fragment_index: usize)
-> RunMapping {
RunMapping {
char_range: Range::new(CharIndex(current_run_info.character_length as isize),
CharIndex(0)),
byte_range: Range::new(0, 0),
old_fragment_index: fragment_index,
text_run_index: run_info_list.len(),
}
}
/// Flushes this run mapping to the list. `run_info` describes the text run that we're
/// currently working on. `text` refers to the text of this fragment.
fn flush(mut self,
mappings: &mut Vec<RunMapping>,
run_info: &mut RunInfo,
text: &str,
insertion_point: Option<CharIndex>,
compression: CompressionMode,
text_transform: text_transform::T,
last_whitespace: &mut bool,
start_position: &mut usize,
end_position: usize) {
let old_byte_length = run_info.text.len();
*last_whitespace = util::transform_text(&text[(*start_position)..end_position],
compression,
*last_whitespace,
&mut run_info.text);
// Account for `text-transform`. (Confusingly, this is not handled in "text
// transformation" above, but we follow Gecko in the naming.)
let is_first_run = *start_position == 0;
let character_count = apply_style_transform_if_necessary(&mut run_info.text,
old_byte_length,
text_transform,
*last_whitespace,
is_first_run);
// Record the position of the insertion point if necessary.
if let Some(insertion_point) = insertion_point {
run_info.insertion_point =
Some(CharIndex(run_info.character_length as isize + insertion_point.0))
}
run_info.character_length = run_info.character_length + character_count;
*start_position = end_position;
// Don't flush empty mappings.
if character_count == 0 {
return
}
let new_byte_length = run_info.text.len();
self.byte_range = Range::new(old_byte_length, new_byte_length - old_byte_length);
self.char_range.extend_by(CharIndex(character_count as isize));
mappings.push(self)
}
}
/// Accounts for `text-transform`.
///
/// FIXME(#4311, pcwalton): Title-case mapping can change length of the string;
/// case mapping should be language-specific; `full-width`;
/// use graphemes instead of characters.
fn apply_style_transform_if_necessary(string: &mut String,
first_character_position: usize,
text_transform: text_transform::T,
last_whitespace: bool,
is_first_run: bool)
-> usize {
match text_transform {
text_transform::T::none => string[first_character_position..].chars().count(),
text_transform::T::uppercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_uppercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::lowercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_lowercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::capitalize => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut capitalize_next_letter = is_first_run || last_whitespace;
let mut count = 0;
for character in original.chars() {
count += 1;
// FIXME(#4311, pcwalton): Should be the CSS/Unicode notion of a *typographic
// letter unit*, not an *alphabetic* character:
//
// http://dev.w3.org/csswg/css-text/#typographic-letter-unit
if capitalize_next_letter && character.is_alphabetic() {
string.push(character.to_uppercase().next().unwrap());
capitalize_next_letter = false;
continue
}
string.push(character);
// FIXME(#4311, pcwalton): Try UAX29 instead of just whitespace.
if character.is_whitespace() {
capitalize_next_letter = true
}
}
count
}
}
}
#[derive(Clone)]
struct ScannedTextRun {
run: Arc<TextRun>,
insertion_point: Option<CharIndex>,
}
/// Can a character with script `b` continue a text run with script `a`?
fn is_compatible(a: Script, b: Script) -> bool {
a == b || !is_specific(a) || !is_specific(b)
}
/// Returns true if the script is not invalid or inherited.
fn is_specific(script: Script) -> bool {
script != Script::Common && script != Script::Inherited
}
|
UnscannedTextFragmentInfo::new(string_before,
insertion_point_before)))
|
random_line_split
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Text layout.
#![deny(unsafe_code)]
use app_units::Au;
use fragment::{Fragment, ScannedTextFragmentInfo, SpecificFragmentInfo, UnscannedTextFragmentInfo};
use gfx::font::{DISABLE_KERNING_SHAPING_FLAG, FontMetrics, IGNORE_LIGATURES_SHAPING_FLAG};
use gfx::font::{RTL_FLAG, RunMetrics, ShapingFlags, ShapingOptions};
use gfx::font_context::FontContext;
use gfx::text::glyph::CharIndex;
use gfx::text::text_run::TextRun;
use gfx::text::util::{self, CompressionMode};
use inline::InlineFragments;
use std::borrow::ToOwned;
use std::collections::LinkedList;
use std::mem;
use std::sync::Arc;
use style::computed_values::{line_height, text_orientation, text_rendering, text_transform};
use style::computed_values::{white_space};
use style::properties::ComputedValues;
use style::properties::style_structs::Font as FontStyle;
use unicode_bidi::{is_rtl, process_text};
use unicode_script::{get_script, Script};
use util::linked_list::split_off_head;
use util::logical_geometry::{LogicalSize, WritingMode};
use util::range::{Range, RangeIndex};
/// Returns the concatenated text of a list of unscanned text fragments.
fn text(fragments: &LinkedList<Fragment>) -> String {
// FIXME: Some of this work is later duplicated in split_first_fragment_at_newline_if_necessary
// and transform_text. This code should be refactored so that the all the scanning for
// newlines is done in a single pass.
let mut text = String::new();
for fragment in fragments {
if let SpecificFragmentInfo::UnscannedText(ref info) = fragment.specific {
if fragment.white_space().preserve_newlines() {
text.push_str(&info.text);
} else {
text.push_str(&info.text.replace("\n", " "));
}
}
}
text
}
/// A stack-allocated object for scanning an inline flow into `TextRun`-containing `TextFragment`s.
pub struct TextRunScanner {
pub clump: LinkedList<Fragment>,
}
impl TextRunScanner {
pub fn new() -> TextRunScanner {
TextRunScanner {
clump: LinkedList::new(),
}
}
pub fn scan_for_runs(&mut self,
font_context: &mut FontContext,
mut fragments: LinkedList<Fragment>)
-> InlineFragments {
debug!("TextRunScanner: scanning {} fragments for text runs...", fragments.len());
debug_assert!(!fragments.is_empty());
// Calculate bidi embedding levels, so we can split bidirectional fragments for reordering.
let text = text(&fragments);
let para_level = fragments.front().unwrap().style.writing_mode.to_bidi_level();
let bidi_info = process_text(&text, Some(para_level));
// Optimization: If all the text is LTR, don't bother splitting on bidi levels.
let bidi_levels = if bidi_info.levels.iter().cloned().any(is_rtl) {
Some(&bidi_info.levels[..])
} else {
None
};
// FIXME(pcwalton): We want to be sure not to allocate multiple times, since this is a
// performance-critical spot, but this may overestimate and allocate too much memory.
let mut new_fragments = Vec::with_capacity(fragments.len());
let mut last_whitespace = false;
let mut paragraph_bytes_processed = 0;
while !fragments.is_empty() {
// Create a clump.
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
while !fragments.is_empty() && self.clump
.back()
.unwrap()
.can_merge_with_fragment(fragments.front()
.unwrap()) {
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
}
// Flush that clump to the list of fragments we're building up.
last_whitespace = self.flush_clump_to_list(font_context,
&mut new_fragments,
&mut paragraph_bytes_processed,
bidi_levels,
last_whitespace);
}
debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => {
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
}
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script && !is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index != font_index ||
run_info.bidi_level != bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn lin
|
yle: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if !first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
unscanned_text_fragment_info.insertion_point = Some(insertion_point - offset);
}
Some(_) | None => {
insertion_point_before = unscanned_text_fragment_info.insertion_point;
unscanned_text_fragment_info.insertion_point = None;
}
};
}
first_fragment.transform(first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText(
UnscannedTextFragmentInfo::new(string_before,
insertion_point_before)))
};
fragments.push_front(new_fragment);
}
/// Information about a text run that we're about to create. This is used in `scan_for_runs`.
struct RunInfo {
/// The text that will go in this text run.
text: String,
/// The insertion point in this text run, if applicable.
insertion_point: Option<CharIndex>,
/// The index of the applicable font in the font group.
font_index: usize,
/// A cached copy of the number of Unicode characters in the text run.
character_length: usize,
/// The bidirection embedding level of this text run.
bidi_level: u8,
/// The Unicode script property of this text run.
script: Script,
}
impl RunInfo {
fn new() -> RunInfo {
RunInfo {
text: String::new(),
insertion_point: None,
font_index: 0,
character_length: 0,
bidi_level: 0,
script: Script::Common,
}
}
}
/// A mapping from a portion of an unscanned text fragment to the text run we're going to create
/// for it.
#[derive(Copy, Clone, Debug)]
struct RunMapping {
/// The range of characters within the text fragment.
char_range: Range<CharIndex>,
/// The range of byte indices within the text fragment.
byte_range: Range<usize>,
/// The index of the unscanned text fragment that this mapping corresponds to.
old_fragment_index: usize,
/// The index of the text run we're going to create.
text_run_index: usize,
}
impl RunMapping {
/// Given the current set of text runs, creates a run mapping for the next fragment.
/// `run_info_list` describes the set of runs we've seen already, and `current_run_info`
/// describes the run we just finished processing.
fn new(run_info_list: &[RunInfo], current_run_info: &RunInfo, fragment_index: usize)
-> RunMapping {
RunMapping {
char_range: Range::new(CharIndex(current_run_info.character_length as isize),
CharIndex(0)),
byte_range: Range::new(0, 0),
old_fragment_index: fragment_index,
text_run_index: run_info_list.len(),
}
}
/// Flushes this run mapping to the list. `run_info` describes the text run that we're
/// currently working on. `text` refers to the text of this fragment.
fn flush(mut self,
mappings: &mut Vec<RunMapping>,
run_info: &mut RunInfo,
text: &str,
insertion_point: Option<CharIndex>,
compression: CompressionMode,
text_transform: text_transform::T,
last_whitespace: &mut bool,
start_position: &mut usize,
end_position: usize) {
let old_byte_length = run_info.text.len();
*last_whitespace = util::transform_text(&text[(*start_position)..end_position],
compression,
*last_whitespace,
&mut run_info.text);
// Account for `text-transform`. (Confusingly, this is not handled in "text
// transformation" above, but we follow Gecko in the naming.)
let is_first_run = *start_position == 0;
let character_count = apply_style_transform_if_necessary(&mut run_info.text,
old_byte_length,
text_transform,
*last_whitespace,
is_first_run);
// Record the position of the insertion point if necessary.
if let Some(insertion_point) = insertion_point {
run_info.insertion_point =
Some(CharIndex(run_info.character_length as isize + insertion_point.0))
}
run_info.character_length = run_info.character_length + character_count;
*start_position = end_position;
// Don't flush empty mappings.
if character_count == 0 {
return
}
let new_byte_length = run_info.text.len();
self.byte_range = Range::new(old_byte_length, new_byte_length - old_byte_length);
self.char_range.extend_by(CharIndex(character_count as isize));
mappings.push(self)
}
}
/// Accounts for `text-transform`.
///
/// FIXME(#4311, pcwalton): Title-case mapping can change length of the string;
/// case mapping should be language-specific; `full-width`;
/// use graphemes instead of characters.
fn apply_style_transform_if_necessary(string: &mut String,
first_character_position: usize,
text_transform: text_transform::T,
last_whitespace: bool,
is_first_run: bool)
-> usize {
match text_transform {
text_transform::T::none => string[first_character_position..].chars().count(),
text_transform::T::uppercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_uppercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::lowercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_lowercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::capitalize => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut capitalize_next_letter = is_first_run || last_whitespace;
let mut count = 0;
for character in original.chars() {
count += 1;
// FIXME(#4311, pcwalton): Should be the CSS/Unicode notion of a *typographic
// letter unit*, not an *alphabetic* character:
//
// http://dev.w3.org/csswg/css-text/#typographic-letter-unit
if capitalize_next_letter && character.is_alphabetic() {
string.push(character.to_uppercase().next().unwrap());
capitalize_next_letter = false;
continue
}
string.push(character);
// FIXME(#4311, pcwalton): Try UAX29 instead of just whitespace.
if character.is_whitespace() {
capitalize_next_letter = true
}
}
count
}
}
}
#[derive(Clone)]
struct ScannedTextRun {
run: Arc<TextRun>,
insertion_point: Option<CharIndex>,
}
/// Can a character with script `b` continue a text run with script `a`?
fn is_compatible(a: Script, b: Script) -> bool {
a == b || !is_specific(a) || !is_specific(b)
}
/// Returns true if the script is not invalid or inherited.
fn is_specific(script: Script) -> bool {
script != Script::Common && script != Script::Inherited
}
|
e_height_from_style(st
|
identifier_name
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Text layout.
#![deny(unsafe_code)]
use app_units::Au;
use fragment::{Fragment, ScannedTextFragmentInfo, SpecificFragmentInfo, UnscannedTextFragmentInfo};
use gfx::font::{DISABLE_KERNING_SHAPING_FLAG, FontMetrics, IGNORE_LIGATURES_SHAPING_FLAG};
use gfx::font::{RTL_FLAG, RunMetrics, ShapingFlags, ShapingOptions};
use gfx::font_context::FontContext;
use gfx::text::glyph::CharIndex;
use gfx::text::text_run::TextRun;
use gfx::text::util::{self, CompressionMode};
use inline::InlineFragments;
use std::borrow::ToOwned;
use std::collections::LinkedList;
use std::mem;
use std::sync::Arc;
use style::computed_values::{line_height, text_orientation, text_rendering, text_transform};
use style::computed_values::{white_space};
use style::properties::ComputedValues;
use style::properties::style_structs::Font as FontStyle;
use unicode_bidi::{is_rtl, process_text};
use unicode_script::{get_script, Script};
use util::linked_list::split_off_head;
use util::logical_geometry::{LogicalSize, WritingMode};
use util::range::{Range, RangeIndex};
/// Returns the concatenated text of a list of unscanned text fragments.
fn text(fragments: &LinkedList<Fragment>) -> String {
// FIXME: Some of this work is later duplicated in split_first_fragment_at_newline_if_necessary
// and transform_text. This code should be refactored so that the all the scanning for
// newlines is done in a single pass.
let mut text = String::new();
for fragment in fragments {
if let SpecificFragmentInfo::UnscannedText(ref info) = fragment.specific {
if fragment.white_space().preserve_newlines() {
text.push_str(&info.text);
} else {
text.push_str(&info.text.replace("\n", " "));
}
}
}
text
}
/// A stack-allocated object for scanning an inline flow into `TextRun`-containing `TextFragment`s.
pub struct TextRunScanner {
pub clump: LinkedList<Fragment>,
}
impl TextRunScanner {
pub fn new() -> TextRunScanner {
TextRunScanner {
clump: LinkedList::new(),
}
}
pub fn scan_for_runs(&mut self,
font_context: &mut FontContext,
mut fragments: LinkedList<Fragment>)
-> InlineFragments {
debug!("TextRunScanner: scanning {} fragments for text runs...", fragments.len());
debug_assert!(!fragments.is_empty());
// Calculate bidi embedding levels, so we can split bidirectional fragments for reordering.
let text = text(&fragments);
let para_level = fragments.front().unwrap().style.writing_mode.to_bidi_level();
let bidi_info = process_text(&text, Some(para_level));
// Optimization: If all the text is LTR, don't bother splitting on bidi levels.
let bidi_levels = if bidi_info.levels.iter().cloned().any(is_rtl) {
Some(&bidi_info.levels[..])
} else {
None
};
// FIXME(pcwalton): We want to be sure not to allocate multiple times, since this is a
// performance-critical spot, but this may overestimate and allocate too much memory.
let mut new_fragments = Vec::with_capacity(fragments.len());
let mut last_whitespace = false;
let mut paragraph_bytes_processed = 0;
while !fragments.is_empty() {
// Create a clump.
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
while !fragments.is_empty() && self.clump
.back()
.unwrap()
.can_merge_with_fragment(fragments.front()
.unwrap()) {
split_first_fragment_at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
}
// Flush that clump to the list of fragments we're building up.
last_whitespace = self.flush_clump_to_list(font_context,
&mut new_fragments,
&mut paragraph_bytes_processed,
bidi_levels,
last_whitespace);
}
debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) =>
|
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script && !is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index != font_index ||
run_info.bidi_level != bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if !first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
unscanned_text_fragment_info.insertion_point = Some(insertion_point - offset);
}
Some(_) | None => {
insertion_point_before = unscanned_text_fragment_info.insertion_point;
unscanned_text_fragment_info.insertion_point = None;
}
};
}
first_fragment.transform(first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText(
UnscannedTextFragmentInfo::new(string_before,
insertion_point_before)))
};
fragments.push_front(new_fragment);
}
/// Information about a text run that we're about to create. This is used in `scan_for_runs`.
struct RunInfo {
/// The text that will go in this text run.
text: String,
/// The insertion point in this text run, if applicable.
insertion_point: Option<CharIndex>,
/// The index of the applicable font in the font group.
font_index: usize,
/// A cached copy of the number of Unicode characters in the text run.
character_length: usize,
/// The bidirection embedding level of this text run.
bidi_level: u8,
/// The Unicode script property of this text run.
script: Script,
}
impl RunInfo {
fn new() -> RunInfo {
RunInfo {
text: String::new(),
insertion_point: None,
font_index: 0,
character_length: 0,
bidi_level: 0,
script: Script::Common,
}
}
}
/// A mapping from a portion of an unscanned text fragment to the text run we're going to create
/// for it.
#[derive(Copy, Clone, Debug)]
struct RunMapping {
/// The range of characters within the text fragment.
char_range: Range<CharIndex>,
/// The range of byte indices within the text fragment.
byte_range: Range<usize>,
/// The index of the unscanned text fragment that this mapping corresponds to.
old_fragment_index: usize,
/// The index of the text run we're going to create.
text_run_index: usize,
}
impl RunMapping {
/// Given the current set of text runs, creates a run mapping for the next fragment.
/// `run_info_list` describes the set of runs we've seen already, and `current_run_info`
/// describes the run we just finished processing.
fn new(run_info_list: &[RunInfo], current_run_info: &RunInfo, fragment_index: usize)
-> RunMapping {
RunMapping {
char_range: Range::new(CharIndex(current_run_info.character_length as isize),
CharIndex(0)),
byte_range: Range::new(0, 0),
old_fragment_index: fragment_index,
text_run_index: run_info_list.len(),
}
}
/// Flushes this run mapping to the list. `run_info` describes the text run that we're
/// currently working on. `text` refers to the text of this fragment.
fn flush(mut self,
mappings: &mut Vec<RunMapping>,
run_info: &mut RunInfo,
text: &str,
insertion_point: Option<CharIndex>,
compression: CompressionMode,
text_transform: text_transform::T,
last_whitespace: &mut bool,
start_position: &mut usize,
end_position: usize) {
let old_byte_length = run_info.text.len();
*last_whitespace = util::transform_text(&text[(*start_position)..end_position],
compression,
*last_whitespace,
&mut run_info.text);
// Account for `text-transform`. (Confusingly, this is not handled in "text
// transformation" above, but we follow Gecko in the naming.)
let is_first_run = *start_position == 0;
let character_count = apply_style_transform_if_necessary(&mut run_info.text,
old_byte_length,
text_transform,
*last_whitespace,
is_first_run);
// Record the position of the insertion point if necessary.
if let Some(insertion_point) = insertion_point {
run_info.insertion_point =
Some(CharIndex(run_info.character_length as isize + insertion_point.0))
}
run_info.character_length = run_info.character_length + character_count;
*start_position = end_position;
// Don't flush empty mappings.
if character_count == 0 {
return
}
let new_byte_length = run_info.text.len();
self.byte_range = Range::new(old_byte_length, new_byte_length - old_byte_length);
self.char_range.extend_by(CharIndex(character_count as isize));
mappings.push(self)
}
}
/// Accounts for `text-transform`.
///
/// FIXME(#4311, pcwalton): Title-case mapping can change length of the string;
/// case mapping should be language-specific; `full-width`;
/// use graphemes instead of characters.
fn apply_style_transform_if_necessary(string: &mut String,
first_character_position: usize,
text_transform: text_transform::T,
last_whitespace: bool,
is_first_run: bool)
-> usize {
match text_transform {
text_transform::T::none => string[first_character_position..].chars().count(),
text_transform::T::uppercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_uppercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::lowercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut count = 0;
for ch in original.chars().flat_map(|ch| ch.to_lowercase()) {
string.push(ch);
count += 1;
}
count
}
text_transform::T::capitalize => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut capitalize_next_letter = is_first_run || last_whitespace;
let mut count = 0;
for character in original.chars() {
count += 1;
// FIXME(#4311, pcwalton): Should be the CSS/Unicode notion of a *typographic
// letter unit*, not an *alphabetic* character:
//
// http://dev.w3.org/csswg/css-text/#typographic-letter-unit
if capitalize_next_letter && character.is_alphabetic() {
string.push(character.to_uppercase().next().unwrap());
capitalize_next_letter = false;
continue
}
string.push(character);
// FIXME(#4311, pcwalton): Try UAX29 instead of just whitespace.
if character.is_whitespace() {
capitalize_next_letter = true
}
}
count
}
}
}
#[derive(Clone)]
struct ScannedTextRun {
run: Arc<TextRun>,
insertion_point: Option<CharIndex>,
}
/// Can a character with script `b` continue a text run with script `a`?
fn is_compatible(a: Script, b: Script) -> bool {
a == b || !is_specific(a) || !is_specific(b)
}
/// Returns true if the script is not invalid or inherited.
fn is_specific(script: Script) -> bool {
script != Script::Common && script != Script::Inherited
}
|
{
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
}
|
conditional_block
|
ProximityBridge.d.ts
|
/// <reference path="APIRequest.d.ts" />
/// <reference path="APIResponse.d.ts" />
/// <reference path="BaseSensorBridge.d.ts" />
/// <reference path="CommonUtil.d.ts" />
/// <reference path="IAdaptiveRPGroup.d.ts" />
/// <reference path="IBaseSensor.d.ts" />
/// <reference path="IProximity.d.ts" />
/**
--| ADAPTIVE RUNTIME PLATFORM |----------------------------------------------------------------------------------------
(C) Copyright 2013-2015 Carlos Lozano Diez t/a Adaptive.me <http://adaptive.me>.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 . Unless required by appli-
-cable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing
permissions and limitations under the License.
Original author:
* Carlos Lozano Diez
<http://github.com/carloslozano>
<http://twitter.com/adaptivecoder>
<mailto:[email protected]>
Contributors:
* Ferran Vila Conesa
<http://github.com/fnva>
<http://twitter.com/ferran_vila>
<mailto:[email protected]>
|
* @version v2.2.15
-------------------------------------------| aut inveniam viam aut faciam |--------------------------------------------
*/
declare module Adaptive {
/**
@class Adaptive.ProximityBridge
@extends Adaptive.BaseSensorBridge
Interface for Managing the Proximity operations
@author Carlos Lozano Diez
@since v2.0
*/
class ProximityBridge extends BaseSensorBridge implements IProximity {
/**
@method constructor
Default constructor.
*/
constructor();
}
}
|
* See source code files for contributors.
Release:
|
random_line_split
|
ProximityBridge.d.ts
|
/// <reference path="APIRequest.d.ts" />
/// <reference path="APIResponse.d.ts" />
/// <reference path="BaseSensorBridge.d.ts" />
/// <reference path="CommonUtil.d.ts" />
/// <reference path="IAdaptiveRPGroup.d.ts" />
/// <reference path="IBaseSensor.d.ts" />
/// <reference path="IProximity.d.ts" />
/**
--| ADAPTIVE RUNTIME PLATFORM |----------------------------------------------------------------------------------------
(C) Copyright 2013-2015 Carlos Lozano Diez t/a Adaptive.me <http://adaptive.me>.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 . Unless required by appli-
-cable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing
permissions and limitations under the License.
Original author:
* Carlos Lozano Diez
<http://github.com/carloslozano>
<http://twitter.com/adaptivecoder>
<mailto:[email protected]>
Contributors:
* Ferran Vila Conesa
<http://github.com/fnva>
<http://twitter.com/ferran_vila>
<mailto:[email protected]>
* See source code files for contributors.
Release:
* @version v2.2.15
-------------------------------------------| aut inveniam viam aut faciam |--------------------------------------------
*/
declare module Adaptive {
/**
@class Adaptive.ProximityBridge
@extends Adaptive.BaseSensorBridge
Interface for Managing the Proximity operations
@author Carlos Lozano Diez
@since v2.0
*/
class
|
extends BaseSensorBridge implements IProximity {
/**
@method constructor
Default constructor.
*/
constructor();
}
}
|
ProximityBridge
|
identifier_name
|
message.js
|
var messaging_enabled = process.env.FH_AMQP_APP_ENABLED;
var messaging_user = process.env.FH_AMQP_USER;
var message_pass = process.env.FH_AMQP_PASS;
var messaging_nodes = process.env.FH_AMQP_NODES;
var messaging_max_cons = process.env.FH_AMQP_CONN_MAX || 10;
var messaging_vhost = process.env.FH_AMQP_VHOST;
var messaging_exchange ="fh-events";
var amqpManager;
var retAmqp;
module.exports = function (){
//first time requed set up singletons
function connect(cb)
|
;
connect(function (er, conn){
retAmqp = conn;
});
return {
"getAmqp": function (cb){
connect(cb);
},
"getAmqpManager": function (){
return amqpManager;
}
};
};
|
{
if (messaging_enabled && messaging_enabled !== "false" && ! retAmqp) {
var clusterNodes = [];
var nodes = (messaging_nodes && messaging_nodes.split) ? messaging_nodes.split(",") : [];
var vhost = "";
if(messaging_vhost && messaging_vhost.trim() !== "/"){
vhost = messaging_vhost.trim();
if(vhost.indexOf("/") !== 0){
vhost = "/" + vhost;
}
}
for(var i=0; i < nodes.length; i++){
var node = nodes[i].trim();
node = "amqp://"+messaging_user+":"+message_pass+"@"+node+vhost;
clusterNodes.push(node);
}
var conf = {
"enabled": messaging_enabled,
"clusterNodes": clusterNodes,
"maxReconnectAttempts": messaging_max_cons
};
var amqpjs = require('fh-amqp-js');
amqpManager = new amqpjs.AMQPManager(conf);
amqpManager.connectToCluster();
amqpManager.on("error", function(err){
cb(err);
});
//all good set up object
amqpManager.on("connection", function(){
var retObj = {};
retObj["createErrorMessage"] = function (err){
err = err || {};
return {
"uid": process.env.FH_INSTANCE,
"timestamp": new Date().getTime(),
"eventType": "CRASHED",
"eventClass": "APP_STATE",
"eventLevel": "ERROR",
"domain": process.env.FH_DOMAIN,
"appName":process.env.FH_APPNAME,
"env": process.env.FH_ENV || "",
"updatedBy":"System",
"dyno": "",
"details": {"message": "app crashed", "error": err.message, "stackTrace": err.stack}
};
};
retObj["sendErrorMessage"] = function (err , cb){
var message = retObj.createErrorMessage(err);
amqpManager.publishTopic(messaging_exchange, "fh.events.nodeapp.app.crashed", message, function (err) {
//ignore errors
cb();
});
};
cb(undefined,retObj);
});
}else if(messaging_enabled && messaging_enabled !== "false" && retAmqp){
cb(undefined,retAmqp);
}else{
cb({"message":"messaging not enabled","code":503});
}
}
|
identifier_body
|
message.js
|
var messaging_enabled = process.env.FH_AMQP_APP_ENABLED;
var messaging_user = process.env.FH_AMQP_USER;
var message_pass = process.env.FH_AMQP_PASS;
var messaging_nodes = process.env.FH_AMQP_NODES;
var messaging_max_cons = process.env.FH_AMQP_CONN_MAX || 10;
var messaging_vhost = process.env.FH_AMQP_VHOST;
var messaging_exchange ="fh-events";
var amqpManager;
var retAmqp;
module.exports = function (){
//first time requed set up singletons
function connect(cb){
if (messaging_enabled && messaging_enabled !== "false" && ! retAmqp) {
var clusterNodes = [];
var nodes = (messaging_nodes && messaging_nodes.split) ? messaging_nodes.split(",") : [];
var vhost = "";
if(messaging_vhost && messaging_vhost.trim() !== "/"){
vhost = messaging_vhost.trim();
if(vhost.indexOf("/") !== 0){
vhost = "/" + vhost;
}
}
for(var i=0; i < nodes.length; i++){
var node = nodes[i].trim();
|
var conf = {
"enabled": messaging_enabled,
"clusterNodes": clusterNodes,
"maxReconnectAttempts": messaging_max_cons
};
var amqpjs = require('fh-amqp-js');
amqpManager = new amqpjs.AMQPManager(conf);
amqpManager.connectToCluster();
amqpManager.on("error", function(err){
cb(err);
});
//all good set up object
amqpManager.on("connection", function(){
var retObj = {};
retObj["createErrorMessage"] = function (err){
err = err || {};
return {
"uid": process.env.FH_INSTANCE,
"timestamp": new Date().getTime(),
"eventType": "CRASHED",
"eventClass": "APP_STATE",
"eventLevel": "ERROR",
"domain": process.env.FH_DOMAIN,
"appName":process.env.FH_APPNAME,
"env": process.env.FH_ENV || "",
"updatedBy":"System",
"dyno": "",
"details": {"message": "app crashed", "error": err.message, "stackTrace": err.stack}
};
};
retObj["sendErrorMessage"] = function (err , cb){
var message = retObj.createErrorMessage(err);
amqpManager.publishTopic(messaging_exchange, "fh.events.nodeapp.app.crashed", message, function (err) {
//ignore errors
cb();
});
};
cb(undefined,retObj);
});
}else if(messaging_enabled && messaging_enabled !== "false" && retAmqp){
cb(undefined,retAmqp);
}else{
cb({"message":"messaging not enabled","code":503});
}
};
connect(function (er, conn){
retAmqp = conn;
});
return {
"getAmqp": function (cb){
connect(cb);
},
"getAmqpManager": function (){
return amqpManager;
}
};
};
|
node = "amqp://"+messaging_user+":"+message_pass+"@"+node+vhost;
clusterNodes.push(node);
}
|
random_line_split
|
message.js
|
var messaging_enabled = process.env.FH_AMQP_APP_ENABLED;
var messaging_user = process.env.FH_AMQP_USER;
var message_pass = process.env.FH_AMQP_PASS;
var messaging_nodes = process.env.FH_AMQP_NODES;
var messaging_max_cons = process.env.FH_AMQP_CONN_MAX || 10;
var messaging_vhost = process.env.FH_AMQP_VHOST;
var messaging_exchange ="fh-events";
var amqpManager;
var retAmqp;
module.exports = function (){
//first time requed set up singletons
function
|
(cb){
if (messaging_enabled && messaging_enabled !== "false" && ! retAmqp) {
var clusterNodes = [];
var nodes = (messaging_nodes && messaging_nodes.split) ? messaging_nodes.split(",") : [];
var vhost = "";
if(messaging_vhost && messaging_vhost.trim() !== "/"){
vhost = messaging_vhost.trim();
if(vhost.indexOf("/") !== 0){
vhost = "/" + vhost;
}
}
for(var i=0; i < nodes.length; i++){
var node = nodes[i].trim();
node = "amqp://"+messaging_user+":"+message_pass+"@"+node+vhost;
clusterNodes.push(node);
}
var conf = {
"enabled": messaging_enabled,
"clusterNodes": clusterNodes,
"maxReconnectAttempts": messaging_max_cons
};
var amqpjs = require('fh-amqp-js');
amqpManager = new amqpjs.AMQPManager(conf);
amqpManager.connectToCluster();
amqpManager.on("error", function(err){
cb(err);
});
//all good set up object
amqpManager.on("connection", function(){
var retObj = {};
retObj["createErrorMessage"] = function (err){
err = err || {};
return {
"uid": process.env.FH_INSTANCE,
"timestamp": new Date().getTime(),
"eventType": "CRASHED",
"eventClass": "APP_STATE",
"eventLevel": "ERROR",
"domain": process.env.FH_DOMAIN,
"appName":process.env.FH_APPNAME,
"env": process.env.FH_ENV || "",
"updatedBy":"System",
"dyno": "",
"details": {"message": "app crashed", "error": err.message, "stackTrace": err.stack}
};
};
retObj["sendErrorMessage"] = function (err , cb){
var message = retObj.createErrorMessage(err);
amqpManager.publishTopic(messaging_exchange, "fh.events.nodeapp.app.crashed", message, function (err) {
//ignore errors
cb();
});
};
cb(undefined,retObj);
});
}else if(messaging_enabled && messaging_enabled !== "false" && retAmqp){
cb(undefined,retAmqp);
}else{
cb({"message":"messaging not enabled","code":503});
}
};
connect(function (er, conn){
retAmqp = conn;
});
return {
"getAmqp": function (cb){
connect(cb);
},
"getAmqpManager": function (){
return amqpManager;
}
};
};
|
connect
|
identifier_name
|
message.js
|
var messaging_enabled = process.env.FH_AMQP_APP_ENABLED;
var messaging_user = process.env.FH_AMQP_USER;
var message_pass = process.env.FH_AMQP_PASS;
var messaging_nodes = process.env.FH_AMQP_NODES;
var messaging_max_cons = process.env.FH_AMQP_CONN_MAX || 10;
var messaging_vhost = process.env.FH_AMQP_VHOST;
var messaging_exchange ="fh-events";
var amqpManager;
var retAmqp;
module.exports = function (){
//first time requed set up singletons
function connect(cb){
if (messaging_enabled && messaging_enabled !== "false" && ! retAmqp) {
var clusterNodes = [];
var nodes = (messaging_nodes && messaging_nodes.split) ? messaging_nodes.split(",") : [];
var vhost = "";
if(messaging_vhost && messaging_vhost.trim() !== "/"){
vhost = messaging_vhost.trim();
if(vhost.indexOf("/") !== 0){
vhost = "/" + vhost;
}
}
for(var i=0; i < nodes.length; i++){
var node = nodes[i].trim();
node = "amqp://"+messaging_user+":"+message_pass+"@"+node+vhost;
clusterNodes.push(node);
}
var conf = {
"enabled": messaging_enabled,
"clusterNodes": clusterNodes,
"maxReconnectAttempts": messaging_max_cons
};
var amqpjs = require('fh-amqp-js');
amqpManager = new amqpjs.AMQPManager(conf);
amqpManager.connectToCluster();
amqpManager.on("error", function(err){
cb(err);
});
//all good set up object
amqpManager.on("connection", function(){
var retObj = {};
retObj["createErrorMessage"] = function (err){
err = err || {};
return {
"uid": process.env.FH_INSTANCE,
"timestamp": new Date().getTime(),
"eventType": "CRASHED",
"eventClass": "APP_STATE",
"eventLevel": "ERROR",
"domain": process.env.FH_DOMAIN,
"appName":process.env.FH_APPNAME,
"env": process.env.FH_ENV || "",
"updatedBy":"System",
"dyno": "",
"details": {"message": "app crashed", "error": err.message, "stackTrace": err.stack}
};
};
retObj["sendErrorMessage"] = function (err , cb){
var message = retObj.createErrorMessage(err);
amqpManager.publishTopic(messaging_exchange, "fh.events.nodeapp.app.crashed", message, function (err) {
//ignore errors
cb();
});
};
cb(undefined,retObj);
});
}else if(messaging_enabled && messaging_enabled !== "false" && retAmqp){
cb(undefined,retAmqp);
}else
|
};
connect(function (er, conn){
retAmqp = conn;
});
return {
"getAmqp": function (cb){
connect(cb);
},
"getAmqpManager": function (){
return amqpManager;
}
};
};
|
{
cb({"message":"messaging not enabled","code":503});
}
|
conditional_block
|
network-records-to-devtools-log.js
|
// @ts-nocheck
/**
* @license Copyright 2018 The Lighthouse Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
const NetworkRecorder = require('../../lighthouse-core/lib/network-recorder.js');
/** @typedef {import('../../lighthouse-core/lib/network-request.js')} NetworkRequest */
const idBase = '127122';
const exampleUrl = 'https://testingurl.com/';
const redirectSuffix = ':redirect';
/* eslint-env jest */
/**
* Extract requestId without any `:redirect` strings.
* @param {Partial<NetworkRequest>} record
*/
function getBaseRequestId(record) {
if (!record.requestId) return;
const match = /^([\w.]+)(?::redirect)*$/.exec(record.requestId);
return match?.[1];
}
/**
* @param {Array<HeaderEntry>=} headersArray
* @return {LH.Crdp.Network.Headers}
*/
function headersArrayToHeadersDict(headersArray = []) {
const headersDict = {};
headersArray.forEach(headerItem => {
const value = headersDict[headerItem.name] !== undefined ?
headersDict[headerItem.name] + '\n' : '';
headersDict[headerItem.name] = value + headerItem.value;
});
return headersDict;
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function
|
(networkRecord, index) {
let initiator = {type: 'other'};
if (networkRecord.initiator) {
initiator = {...networkRecord.initiator};
}
return {
method: 'Network.requestWillBeSent',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
documentURL: networkRecord.documentURL || exampleUrl,
request: {
url: networkRecord.url || exampleUrl,
method: networkRecord.requestMethod || 'GET',
headers: {},
initialPriority: networkRecord.priority || 'Low',
isLinkPreload: networkRecord.isLinkPreload,
},
timestamp: networkRecord.redirectResponseTimestamp || networkRecord.startTime || 0,
wallTime: 0,
initiator,
type: networkRecord.resourceType || 'Document',
frameId: networkRecord.frameId || `${idBase}.1`,
redirectResponse: networkRecord.redirectResponse,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getRequestServedFromCacheEvent(networkRecord, index) {
return {
method: 'Network.requestServedFromCache',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getResponseReceivedEvent(networkRecord, index) {
const headers = headersArrayToHeadersDict(networkRecord.responseHeaders);
let timing;
if (networkRecord.timing) {
timing = {...networkRecord.timing};
if (timing.requestTime === undefined) {
timing.requestTime = networkRecord.startTime || 0;
}
}
return {
method: 'Network.responseReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.responseReceivedTime || 1,
type: networkRecord.resourceType || undefined,
response: {
url: networkRecord.url || exampleUrl,
status: networkRecord.statusCode || 200,
headers,
mimeType: typeof networkRecord.mimeType === 'string' ? networkRecord.mimeType : 'text/html',
connectionReused: networkRecord.connectionReused || false,
connectionId: networkRecord.connectionId || 140,
fromDiskCache: networkRecord.fromDiskCache || false,
fromServiceWorker: networkRecord.fetchedViaServiceWorker || false,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
timing,
protocol: networkRecord.protocol || 'http/1.1',
},
frameId: networkRecord.frameId || `${idBase}.1`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getDataReceivedEvent(networkRecord, index) {
return {
method: 'Network.dataReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
dataLength: networkRecord.resourceSize || 0,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFinishedEvent(networkRecord, index) {
return {
method: 'Network.loadingFinished',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFailedEvent(networkRecord, index) {
return {
method: 'Network.loadingFailed',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
errorText: networkRecord.localizedFailDescription || 'Request failed',
},
};
}
/**
* Returns true if `record` is redirected by another record.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {boolean}
*/
function willBeRedirected(networkRecords, record) {
if (!record.requestId) {
return false;
}
const redirectId = record.requestId + redirectSuffix;
return networkRecords.some(otherRecord => otherRecord.requestId === redirectId);
}
/**
* If `record` is a redirect of another record, create a fake redirect respose
* to keep the original request defined correctly.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {Partial<NetworkRequest>}
*/
function addRedirectResponseIfNeeded(networkRecords, record) {
if (!record.requestId || !record.requestId.endsWith(redirectSuffix)) {
return record;
}
const originalId = record.requestId.slice(0, -redirectSuffix.length);
const originalRecord = networkRecords.find(record => record.requestId === originalId);
if (!originalRecord) {
throw new Error(`redirect with id ${record.requestId} has no original request`);
}
// populate `redirectResponse` with original's data, more or less.
const originalResponse = getResponseReceivedEvent(originalRecord).params.response;
originalResponse.status = originalRecord.statusCode || 302;
return {
...record,
redirectResponseTimestamp: originalRecord.endTime,
redirectResponse: originalResponse,
};
}
/**
* Generate a devtoolsLog that can regenerate the passed-in `networkRecords`.
* Generally best at replicating artificial or pruned networkRecords used for
* testing. If run from a test runner, verifies that everything in
* `networkRecords` will be in any network records generated from the output
* (use `skipVerification` to manually skip this assertion).
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {{skipVerification?: boolean}=} options
* @return {LH.DevtoolsLog}
*/
function networkRecordsToDevtoolsLog(networkRecords, options = {}) {
const devtoolsLog = [];
networkRecords.forEach((networkRecord, index) => {
networkRecord = addRedirectResponseIfNeeded(networkRecords, networkRecord);
devtoolsLog.push(getRequestWillBeSentEvent(networkRecord, index));
if (willBeRedirected(networkRecords, networkRecord)) {
// If record is going to redirect, only issue the first event.
return;
}
if (networkRecord.fromMemoryCache) {
devtoolsLog.push(getRequestServedFromCacheEvent(networkRecord, index));
}
if (networkRecord.failed) {
devtoolsLog.push(getLoadingFailedEvent(networkRecord, index));
return;
}
devtoolsLog.push(getResponseReceivedEvent(networkRecord, index));
devtoolsLog.push(getDataReceivedEvent(networkRecord, index));
devtoolsLog.push(getLoadingFinishedEvent(networkRecord, index));
});
// If in a test, assert that the log will turn into an equivalent networkRecords.
if (global.expect && !options.skipVerification) {
const roundTrippedNetworkRecords = NetworkRecorder.recordsFromLogs(devtoolsLog);
expect(roundTrippedNetworkRecords).toMatchObject(networkRecords);
}
return devtoolsLog;
}
module.exports = networkRecordsToDevtoolsLog;
|
getRequestWillBeSentEvent
|
identifier_name
|
network-records-to-devtools-log.js
|
// @ts-nocheck
/**
* @license Copyright 2018 The Lighthouse Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
const NetworkRecorder = require('../../lighthouse-core/lib/network-recorder.js');
/** @typedef {import('../../lighthouse-core/lib/network-request.js')} NetworkRequest */
const idBase = '127122';
const exampleUrl = 'https://testingurl.com/';
const redirectSuffix = ':redirect';
/* eslint-env jest */
/**
* Extract requestId without any `:redirect` strings.
* @param {Partial<NetworkRequest>} record
*/
function getBaseRequestId(record) {
if (!record.requestId) return;
const match = /^([\w.]+)(?::redirect)*$/.exec(record.requestId);
return match?.[1];
}
/**
* @param {Array<HeaderEntry>=} headersArray
* @return {LH.Crdp.Network.Headers}
*/
function headersArrayToHeadersDict(headersArray = []) {
const headersDict = {};
headersArray.forEach(headerItem => {
const value = headersDict[headerItem.name] !== undefined ?
headersDict[headerItem.name] + '\n' : '';
headersDict[headerItem.name] = value + headerItem.value;
});
return headersDict;
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getRequestWillBeSentEvent(networkRecord, index) {
let initiator = {type: 'other'};
if (networkRecord.initiator) {
initiator = {...networkRecord.initiator};
}
return {
method: 'Network.requestWillBeSent',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
documentURL: networkRecord.documentURL || exampleUrl,
request: {
url: networkRecord.url || exampleUrl,
method: networkRecord.requestMethod || 'GET',
headers: {},
initialPriority: networkRecord.priority || 'Low',
isLinkPreload: networkRecord.isLinkPreload,
},
timestamp: networkRecord.redirectResponseTimestamp || networkRecord.startTime || 0,
wallTime: 0,
initiator,
type: networkRecord.resourceType || 'Document',
frameId: networkRecord.frameId || `${idBase}.1`,
redirectResponse: networkRecord.redirectResponse,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getRequestServedFromCacheEvent(networkRecord, index) {
return {
method: 'Network.requestServedFromCache',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getResponseReceivedEvent(networkRecord, index) {
const headers = headersArrayToHeadersDict(networkRecord.responseHeaders);
let timing;
if (networkRecord.timing) {
timing = {...networkRecord.timing};
if (timing.requestTime === undefined) {
timing.requestTime = networkRecord.startTime || 0;
}
}
return {
method: 'Network.responseReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.responseReceivedTime || 1,
type: networkRecord.resourceType || undefined,
response: {
url: networkRecord.url || exampleUrl,
status: networkRecord.statusCode || 200,
headers,
mimeType: typeof networkRecord.mimeType === 'string' ? networkRecord.mimeType : 'text/html',
connectionReused: networkRecord.connectionReused || false,
connectionId: networkRecord.connectionId || 140,
fromDiskCache: networkRecord.fromDiskCache || false,
fromServiceWorker: networkRecord.fetchedViaServiceWorker || false,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
timing,
protocol: networkRecord.protocol || 'http/1.1',
},
frameId: networkRecord.frameId || `${idBase}.1`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getDataReceivedEvent(networkRecord, index) {
return {
method: 'Network.dataReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
dataLength: networkRecord.resourceSize || 0,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFinishedEvent(networkRecord, index)
|
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFailedEvent(networkRecord, index) {
return {
method: 'Network.loadingFailed',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
errorText: networkRecord.localizedFailDescription || 'Request failed',
},
};
}
/**
* Returns true if `record` is redirected by another record.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {boolean}
*/
function willBeRedirected(networkRecords, record) {
if (!record.requestId) {
return false;
}
const redirectId = record.requestId + redirectSuffix;
return networkRecords.some(otherRecord => otherRecord.requestId === redirectId);
}
/**
* If `record` is a redirect of another record, create a fake redirect respose
* to keep the original request defined correctly.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {Partial<NetworkRequest>}
*/
function addRedirectResponseIfNeeded(networkRecords, record) {
if (!record.requestId || !record.requestId.endsWith(redirectSuffix)) {
return record;
}
const originalId = record.requestId.slice(0, -redirectSuffix.length);
const originalRecord = networkRecords.find(record => record.requestId === originalId);
if (!originalRecord) {
throw new Error(`redirect with id ${record.requestId} has no original request`);
}
// populate `redirectResponse` with original's data, more or less.
const originalResponse = getResponseReceivedEvent(originalRecord).params.response;
originalResponse.status = originalRecord.statusCode || 302;
return {
...record,
redirectResponseTimestamp: originalRecord.endTime,
redirectResponse: originalResponse,
};
}
/**
* Generate a devtoolsLog that can regenerate the passed-in `networkRecords`.
* Generally best at replicating artificial or pruned networkRecords used for
* testing. If run from a test runner, verifies that everything in
* `networkRecords` will be in any network records generated from the output
* (use `skipVerification` to manually skip this assertion).
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {{skipVerification?: boolean}=} options
* @return {LH.DevtoolsLog}
*/
function networkRecordsToDevtoolsLog(networkRecords, options = {}) {
const devtoolsLog = [];
networkRecords.forEach((networkRecord, index) => {
networkRecord = addRedirectResponseIfNeeded(networkRecords, networkRecord);
devtoolsLog.push(getRequestWillBeSentEvent(networkRecord, index));
if (willBeRedirected(networkRecords, networkRecord)) {
// If record is going to redirect, only issue the first event.
return;
}
if (networkRecord.fromMemoryCache) {
devtoolsLog.push(getRequestServedFromCacheEvent(networkRecord, index));
}
if (networkRecord.failed) {
devtoolsLog.push(getLoadingFailedEvent(networkRecord, index));
return;
}
devtoolsLog.push(getResponseReceivedEvent(networkRecord, index));
devtoolsLog.push(getDataReceivedEvent(networkRecord, index));
devtoolsLog.push(getLoadingFinishedEvent(networkRecord, index));
});
// If in a test, assert that the log will turn into an equivalent networkRecords.
if (global.expect && !options.skipVerification) {
const roundTrippedNetworkRecords = NetworkRecorder.recordsFromLogs(devtoolsLog);
expect(roundTrippedNetworkRecords).toMatchObject(networkRecords);
}
return devtoolsLog;
}
module.exports = networkRecordsToDevtoolsLog;
|
{
return {
method: 'Network.loadingFinished',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
|
identifier_body
|
network-records-to-devtools-log.js
|
// @ts-nocheck
/**
* @license Copyright 2018 The Lighthouse Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
const NetworkRecorder = require('../../lighthouse-core/lib/network-recorder.js');
/** @typedef {import('../../lighthouse-core/lib/network-request.js')} NetworkRequest */
const idBase = '127122';
const exampleUrl = 'https://testingurl.com/';
const redirectSuffix = ':redirect';
/* eslint-env jest */
/**
* Extract requestId without any `:redirect` strings.
* @param {Partial<NetworkRequest>} record
*/
function getBaseRequestId(record) {
if (!record.requestId) return;
const match = /^([\w.]+)(?::redirect)*$/.exec(record.requestId);
return match?.[1];
}
/**
* @param {Array<HeaderEntry>=} headersArray
* @return {LH.Crdp.Network.Headers}
*/
function headersArrayToHeadersDict(headersArray = []) {
const headersDict = {};
headersArray.forEach(headerItem => {
const value = headersDict[headerItem.name] !== undefined ?
headersDict[headerItem.name] + '\n' : '';
headersDict[headerItem.name] = value + headerItem.value;
});
return headersDict;
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getRequestWillBeSentEvent(networkRecord, index) {
let initiator = {type: 'other'};
if (networkRecord.initiator) {
initiator = {...networkRecord.initiator};
}
return {
method: 'Network.requestWillBeSent',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
documentURL: networkRecord.documentURL || exampleUrl,
request: {
url: networkRecord.url || exampleUrl,
method: networkRecord.requestMethod || 'GET',
headers: {},
initialPriority: networkRecord.priority || 'Low',
isLinkPreload: networkRecord.isLinkPreload,
},
timestamp: networkRecord.redirectResponseTimestamp || networkRecord.startTime || 0,
wallTime: 0,
initiator,
type: networkRecord.resourceType || 'Document',
frameId: networkRecord.frameId || `${idBase}.1`,
redirectResponse: networkRecord.redirectResponse,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getRequestServedFromCacheEvent(networkRecord, index) {
return {
method: 'Network.requestServedFromCache',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getResponseReceivedEvent(networkRecord, index) {
const headers = headersArrayToHeadersDict(networkRecord.responseHeaders);
let timing;
if (networkRecord.timing) {
timing = {...networkRecord.timing};
if (timing.requestTime === undefined) {
timing.requestTime = networkRecord.startTime || 0;
}
}
return {
method: 'Network.responseReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.responseReceivedTime || 1,
type: networkRecord.resourceType || undefined,
response: {
url: networkRecord.url || exampleUrl,
status: networkRecord.statusCode || 200,
headers,
mimeType: typeof networkRecord.mimeType === 'string' ? networkRecord.mimeType : 'text/html',
connectionReused: networkRecord.connectionReused || false,
connectionId: networkRecord.connectionId || 140,
fromDiskCache: networkRecord.fromDiskCache || false,
fromServiceWorker: networkRecord.fetchedViaServiceWorker || false,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
timing,
protocol: networkRecord.protocol || 'http/1.1',
},
frameId: networkRecord.frameId || `${idBase}.1`,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getDataReceivedEvent(networkRecord, index) {
return {
method: 'Network.dataReceived',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
dataLength: networkRecord.resourceSize || 0,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFinishedEvent(networkRecord, index) {
return {
method: 'Network.loadingFinished',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
encodedDataLength: networkRecord.transferSize === undefined ?
0 : networkRecord.transferSize,
},
};
}
/**
* @param {Partial<NetworkRequest>} networkRecord
* @return {LH.Protocol.RawEventMessage}
*/
function getLoadingFailedEvent(networkRecord, index) {
return {
method: 'Network.loadingFailed',
params: {
requestId: getBaseRequestId(networkRecord) || `${idBase}.${index}`,
timestamp: networkRecord.endTime || 3,
errorText: networkRecord.localizedFailDescription || 'Request failed',
},
};
}
/**
* Returns true if `record` is redirected by another record.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {boolean}
*/
function willBeRedirected(networkRecords, record) {
if (!record.requestId) {
return false;
}
const redirectId = record.requestId + redirectSuffix;
return networkRecords.some(otherRecord => otherRecord.requestId === redirectId);
}
/**
* If `record` is a redirect of another record, create a fake redirect respose
* to keep the original request defined correctly.
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {Partial<NetworkRequest>} record
* @return {Partial<NetworkRequest>}
*/
function addRedirectResponseIfNeeded(networkRecords, record) {
if (!record.requestId || !record.requestId.endsWith(redirectSuffix)) {
return record;
}
const originalId = record.requestId.slice(0, -redirectSuffix.length);
const originalRecord = networkRecords.find(record => record.requestId === originalId);
if (!originalRecord) {
throw new Error(`redirect with id ${record.requestId} has no original request`);
}
// populate `redirectResponse` with original's data, more or less.
const originalResponse = getResponseReceivedEvent(originalRecord).params.response;
originalResponse.status = originalRecord.statusCode || 302;
return {
...record,
redirectResponseTimestamp: originalRecord.endTime,
redirectResponse: originalResponse,
};
}
/**
* Generate a devtoolsLog that can regenerate the passed-in `networkRecords`.
* Generally best at replicating artificial or pruned networkRecords used for
* testing. If run from a test runner, verifies that everything in
* `networkRecords` will be in any network records generated from the output
* (use `skipVerification` to manually skip this assertion).
* @param {Array<Partial<NetworkRequest>>} networkRecords
* @param {{skipVerification?: boolean}=} options
* @return {LH.DevtoolsLog}
*/
function networkRecordsToDevtoolsLog(networkRecords, options = {}) {
const devtoolsLog = [];
networkRecords.forEach((networkRecord, index) => {
networkRecord = addRedirectResponseIfNeeded(networkRecords, networkRecord);
devtoolsLog.push(getRequestWillBeSentEvent(networkRecord, index));
if (willBeRedirected(networkRecords, networkRecord)) {
// If record is going to redirect, only issue the first event.
|
}
if (networkRecord.fromMemoryCache) {
devtoolsLog.push(getRequestServedFromCacheEvent(networkRecord, index));
}
if (networkRecord.failed) {
devtoolsLog.push(getLoadingFailedEvent(networkRecord, index));
return;
}
devtoolsLog.push(getResponseReceivedEvent(networkRecord, index));
devtoolsLog.push(getDataReceivedEvent(networkRecord, index));
devtoolsLog.push(getLoadingFinishedEvent(networkRecord, index));
});
// If in a test, assert that the log will turn into an equivalent networkRecords.
if (global.expect && !options.skipVerification) {
const roundTrippedNetworkRecords = NetworkRecorder.recordsFromLogs(devtoolsLog);
expect(roundTrippedNetworkRecords).toMatchObject(networkRecords);
}
return devtoolsLog;
}
module.exports = networkRecordsToDevtoolsLog;
|
return;
|
random_line_split
|
testsuite.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
|
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def np_value_to_fluid_value(input):
if input.dtype == np.float16:
input = input.view(np.uint16)
return input
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor._set_dims(var.shape)
tensor.set(np_value_to_fluid_value(var), place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
# inferece the dtype from numpy value.
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
# NOTE(dzhwinter): type hacking
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of uint16 datatype. Because
# the internal memory representation of float16 is
# actually uint16_t in paddle. So we use np.uint16 in numpy for
# raw memory, it can pass through the pybind. So in the testcase,
# we feed data use data.view(uint16), but the dtype is float16 in fact.
# The data.view(uint16) means do not cast the data type, but process data as the uint16
if dtype == np.uint16:
dtype = np.float16
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def append_loss_ops(block, output_names):
mean_inputs = list(map(block.var, output_names))
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
|
__create_var__(out_name, out_name)
|
conditional_block
|
testsuite.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def np_value_to_fluid_value(input):
if input.dtype == np.float16:
input = input.view(np.uint16)
return input
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor._set_dims(var.shape)
tensor.set(np_value_to_fluid_value(var), place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
# inferece the dtype from numpy value.
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
# NOTE(dzhwinter): type hacking
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of uint16 datatype. Because
# the internal memory representation of float16 is
# actually uint16_t in paddle. So we use np.uint16 in numpy for
# raw memory, it can pass through the pybind. So in the testcase,
# we feed data use data.view(uint16), but the dtype is float16 in fact.
# The data.view(uint16) means do not cast the data type, but process data as the uint16
if dtype == np.uint16:
dtype = np.float16
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def
|
(block, output_names):
mean_inputs = list(map(block.var, output_names))
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
|
append_loss_ops
|
identifier_name
|
testsuite.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
|
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def np_value_to_fluid_value(input):
if input.dtype == np.float16:
input = input.view(np.uint16)
return input
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor._set_dims(var.shape)
tensor.set(np_value_to_fluid_value(var), place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
# inferece the dtype from numpy value.
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
# NOTE(dzhwinter): type hacking
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of uint16 datatype. Because
# the internal memory representation of float16 is
# actually uint16_t in paddle. So we use np.uint16 in numpy for
# raw memory, it can pass through the pybind. So in the testcase,
# we feed data use data.view(uint16), but the dtype is float16 in fact.
# The data.view(uint16) means do not cast the data type, but process data as the uint16
if dtype == np.uint16:
dtype = np.float16
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def append_loss_ops(block, output_names):
mean_inputs = list(map(block.var, output_names))
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
|
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
|
identifier_body
|
testsuite.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def np_value_to_fluid_value(input):
if input.dtype == np.float16:
input = input.view(np.uint16)
return input
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor._set_dims(var.shape)
tensor.set(np_value_to_fluid_value(var), place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
|
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
# inferece the dtype from numpy value.
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
# NOTE(dzhwinter): type hacking
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of uint16 datatype. Because
# the internal memory representation of float16 is
# actually uint16_t in paddle. So we use np.uint16 in numpy for
# raw memory, it can pass through the pybind. So in the testcase,
# we feed data use data.view(uint16), but the dtype is float16 in fact.
# The data.view(uint16) means do not cast the data type, but process data as the uint16
if dtype == np.uint16:
dtype = np.float16
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def append_loss_ops(block, output_names):
mean_inputs = list(map(block.var, output_names))
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
|
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
|
random_line_split
|
progress.rs
|
//! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String
|
{
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
|
identifier_body
|
|
progress.rs
|
//! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
|
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
|
message: String::new(),
|
random_line_split
|
progress.rs
|
//! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn
|
(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
|
next
|
identifier_name
|
progress.rs
|
//! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else
|
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
|
{
OffsetDateTime::now_utc() + Duration::seconds(5)
}
|
conditional_block
|
project.config.js
|
const NODE_ENV = process.env.NODE_ENV || 'development'
module.exports = {
/** The environment to use when building the project */
env: NODE_ENV,
/** The full path to the project's root directory */
basePath: __dirname,
/** The name of the directory containing the application source code */
srcDir: 'src',
/** The file name of the application's entry point */
main: 'main',
/** The name of the directory in which to emit compiled assets */
outDir: 'dist',
/** The base path for all projects assets (relative to the website root) */
publicPath: '/admin/',
/** The root path for web */
urlRoot: 'admin',
/** Whether to generate sourcemaps */
sourcemaps: true,
/** A hash map of keys that the compiler should treat as external to the project */
externals: {},
/** A hash map of variables and their values to expose globally */
globals: {},
/** Whether to enable verbose logging */
verbose: false,
/** The list of modules to bundle separately from the core application code */
vendors: [
'react',
|
'redux',
'react-redux',
'redux-thunk',
'react-router',
],
apiProxy: {
url: 'http://47.94.172.79'
}
}
|
'react-dom',
|
random_line_split
|
processor.ts
|
import path from "path";
import {
CoreConfig,
WorkingDirectoryInfo,
PluginCreateOptions,
KeyGeneratorPlugin,
PublisherPlugin,
NotifierPlugin,
NotifyParams,
PluginLogger,
ComparisonResult,
} from "reg-suit-interface";
import { EventEmitter } from "events";
const compare = require("reg-cli");
const rimraf = require("rimraf");
const cpx = require("cpx");
export interface ProcessorOptions {
keyGenerator?: KeyGeneratorPlugin<any>;
publisher?: PublisherPlugin<any>;
notifiers: NotifierPlugin<any>[];
userDirs: {
actualDir: string;
};
}
export interface StepResultAfterExpectedKey {
expectedKey: string | null;
}
export interface StepResultAfterComparison extends StepResultAfterExpectedKey {
comparisonResult: ComparisonResult;
}
export interface StepResultAfterActualKey extends StepResultAfterComparison {
actualKey: string;
}
export interface StepResultAfterPublish extends StepResultAfterActualKey {
reportUrl?: string;
}
export class RegProcessor {
private _logger: PluginLogger;
private _config: CoreConfig;
private _directoryInfo: {
workingDirs: WorkingDirectoryInfo;
userDirs: {
actualDir: string;
};
};
private _keyGenerator?: KeyGeneratorPlugin<any>;
private _publisher?: PublisherPlugin<any>;
private _notifiers: NotifierPlugin<any>[];
constructor(opt: PluginCreateOptions<ProcessorOptions>) {
this._logger = opt.logger;
this._config = opt.coreConfig;
this._directoryInfo = {
workingDirs: opt.workingDirs,
userDirs: opt.options.userDirs,
};
this._keyGenerator = opt.options.keyGenerator;
this._publisher = opt.options.publisher;
this._notifiers = opt.options.notifiers;
}
runAll() {
return this.getExpectedKey()
.then(ctx => this.syncExpected(ctx))
.then(ctx => this.compare(ctx))
.then(ctx => this.getActualKey(ctx))
.then(ctx => this.publish(ctx))
.then(ctx => this.notify(ctx));
}
getExpectedKey(): Promise<StepResultAfterExpectedKey> {
if (this._keyGenerator) {
return this._keyGenerator
.getExpectedKey()
.then(key => {
this._logger.info(`Detected the previous snapshot key: '${key}'`);
return { expectedKey: key };
})
.catch(reason => {
this._logger.warn("Failed to detect the previous snapshot key");
if (reason) this._logger.error(reason);
return Promise.resolve({ expectedKey: null });
});
} else {
this._logger.info("Skipped to detect the previous snapshot key because key generator plugin is not set up.");
return Promise.resolve({ expectedKey: null });
}
}
compare(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterComparison> {
const { actualDir, expectedDir, diffDir } = this._directoryInfo.workingDirs;
const json = path.join(this._directoryInfo.workingDirs.base, "out.json");
const report = path.join(this._directoryInfo.workingDirs.base, "index.html");
const ximgdiffConf = this._config.ximgdiff || { invocationType: "cli" };
rimraf.sync(actualDir);
cpx.copySync(`${this._directoryInfo.userDirs.actualDir}/**/*.{png,jpg,jpeg,tiff,bmp,gif}`, actualDir);
const emitter = compare({
actualDir,
expectedDir,
diffDir,
json,
report,
update: false,
ignoreChange: true,
urlPrefix: "",
threshold: this._config.threshold,
thresholdPixel: this._config.thresholdPixel,
thresholdRate: this._config.thresholdRate,
matchingThreshold: this._config.matchingThreshold ?? 0, // matchingThreshold should not be undefined
enableAntialias: this._config.enableAntialias,
enableCliAdditionalDetection: ximgdiffConf.invocationType === "cli",
enableClientAdditionalDetection: ximgdiffConf.invocationType !== "none",
}) as EventEmitter;
emitter.on("compare", (compareItem: { type: string; path: string }) => {
this._logger.verbose(
`${this._logger.colors.red(compareItem.type)}: ${this._logger.colors.magenta(compareItem.path)}`,
);
});
const comparisonResult = new Promise<ComparisonResult>((resolve, reject) => {
emitter.once("complete", (result: ComparisonResult) => resolve(result));
emitter.once("error", (reason: any) => reject(reason));
});
return comparisonResult
.then(result => {
this._logger.info("Comparison Complete");
this._logger.info(this._logger.colors.red(" Changed items: " + result.failedItems.length));
this._logger.info(this._logger.colors.cyan(" New items: " + result.newItems.length));
this._logger.info(this._logger.colors.redBright(" Deleted items: " + result.deletedItems.length));
this._logger.info(this._logger.colors.green(" Passed items: " + result.passedItems.length));
this._logger.verbose("Comparison details:", result);
return { ...ctx, comparisonResult: result };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during compare images:");
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterComparison>(reason);
});
}
getActualKey(ctx: StepResultAfterComparison): Promise<StepResultAfterActualKey> {
const fallbackFn = () => "snapshot_" + ~~(new Date().getTime() / 1000);
if (this._keyGenerator) {
return this._keyGenerator
.getActualKey()
.then(key => {
if (!key) {
this._logger.warn("Failed to generate the current snapshot key.");
return { ...ctx, actualKey: fallbackFn() };
}
this._logger.info(`The current snapshot key: '${key}'`);
return { ...ctx, actualKey: key };
})
.catch(reason => {
this._logger.warn("Failed to gerenate the current snapshot key.");
if (reason) this._logger.error(reason);
return Promise.resolve({ ...ctx, actualKey: fallbackFn() });
});
} else {
const fallbackKey = fallbackFn();
this._logger.info(`Use '${fallbackKey}' as the current snapshot key because key generator plugin is not set up.`);
return Promise.resolve({ ...ctx, actualKey: fallbackKey });
}
}
syncExpected(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterExpectedKey>
|
publish(ctx: StepResultAfterActualKey): Promise<StepResultAfterPublish> {
if (this._publisher) {
return this._publisher
.publish(ctx.actualKey)
.then(result => {
this._logger.info(`Published snapshot '${ctx.actualKey}' successfully.`);
if (result.reportUrl) {
this._logger.info(`Report URL: ${result.reportUrl}`);
}
this._logger.verbose("Publish result:", result);
return { ...ctx, reportUrl: result.reportUrl };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during publishing snapshot:");
if (reason.code === "CredentialsError") {
this._logger.error("Failed to read AWS credentials.");
this._logger.error(
`Create ${this._logger.colors.magenta("~/.aws/credentials")} or export ${this._logger.colors.green(
"$AWS_ACCESS_KEY_ID",
)} and ${this._logger.colors.green("$AWS_SECRET_ACCESS_KEY")}.`,
);
}
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterPublish>(reason);
});
} else {
this._logger.info("Skipped to publish the snapshot data because publisher plugin is not set up.");
return Promise.resolve(ctx);
}
}
notify(ctx: StepResultAfterPublish): Promise<StepResultAfterPublish> {
const notifyParams: NotifyParams = {
...ctx,
};
if (!this._notifiers.length) {
this._logger.info("Skipped to notify result because notifier plugins are not set up.");
}
this._logger.verbose("Notify parameters:", notifyParams);
return this._notifiers
.reduce((queue, notifier) => {
return queue
.then(() => notifier.notify(notifyParams))
.catch(reason => {
// Don't re-throw notifiers error because it's not fatal.
this._logger.error("An error occurs during notify:");
this._logger.error(reason);
return Promise.resolve();
});
}, Promise.resolve())
.then(() => ctx);
}
}
|
{
const keyForExpected = ctx.expectedKey;
if (this._publisher && keyForExpected) {
return this._publisher.fetch(keyForExpected);
} else if (!keyForExpected) {
this._logger.info("Skipped to fetch the expected data because expected key is null.");
return Promise.resolve(ctx);
} else if (!this._publisher) {
this._logger.info("Skipped to fetch the expected data because publisher plugin is not set up.");
return Promise.resolve(ctx);
} else {
return Promise.resolve(ctx);
}
}
|
identifier_body
|
processor.ts
|
import path from "path";
import {
CoreConfig,
WorkingDirectoryInfo,
PluginCreateOptions,
KeyGeneratorPlugin,
PublisherPlugin,
NotifierPlugin,
NotifyParams,
PluginLogger,
ComparisonResult,
} from "reg-suit-interface";
import { EventEmitter } from "events";
const compare = require("reg-cli");
const rimraf = require("rimraf");
const cpx = require("cpx");
export interface ProcessorOptions {
keyGenerator?: KeyGeneratorPlugin<any>;
publisher?: PublisherPlugin<any>;
notifiers: NotifierPlugin<any>[];
userDirs: {
actualDir: string;
};
}
export interface StepResultAfterExpectedKey {
expectedKey: string | null;
}
export interface StepResultAfterComparison extends StepResultAfterExpectedKey {
comparisonResult: ComparisonResult;
}
export interface StepResultAfterActualKey extends StepResultAfterComparison {
actualKey: string;
}
export interface StepResultAfterPublish extends StepResultAfterActualKey {
reportUrl?: string;
}
export class RegProcessor {
private _logger: PluginLogger;
private _config: CoreConfig;
private _directoryInfo: {
workingDirs: WorkingDirectoryInfo;
userDirs: {
actualDir: string;
};
};
private _keyGenerator?: KeyGeneratorPlugin<any>;
private _publisher?: PublisherPlugin<any>;
private _notifiers: NotifierPlugin<any>[];
constructor(opt: PluginCreateOptions<ProcessorOptions>) {
this._logger = opt.logger;
this._config = opt.coreConfig;
this._directoryInfo = {
workingDirs: opt.workingDirs,
userDirs: opt.options.userDirs,
};
this._keyGenerator = opt.options.keyGenerator;
this._publisher = opt.options.publisher;
this._notifiers = opt.options.notifiers;
}
runAll() {
return this.getExpectedKey()
.then(ctx => this.syncExpected(ctx))
.then(ctx => this.compare(ctx))
.then(ctx => this.getActualKey(ctx))
.then(ctx => this.publish(ctx))
.then(ctx => this.notify(ctx));
}
getExpectedKey(): Promise<StepResultAfterExpectedKey> {
if (this._keyGenerator) {
return this._keyGenerator
.getExpectedKey()
.then(key => {
this._logger.info(`Detected the previous snapshot key: '${key}'`);
return { expectedKey: key };
})
.catch(reason => {
this._logger.warn("Failed to detect the previous snapshot key");
if (reason) this._logger.error(reason);
return Promise.resolve({ expectedKey: null });
});
} else {
this._logger.info("Skipped to detect the previous snapshot key because key generator plugin is not set up.");
return Promise.resolve({ expectedKey: null });
}
}
compare(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterComparison> {
const { actualDir, expectedDir, diffDir } = this._directoryInfo.workingDirs;
const json = path.join(this._directoryInfo.workingDirs.base, "out.json");
const report = path.join(this._directoryInfo.workingDirs.base, "index.html");
const ximgdiffConf = this._config.ximgdiff || { invocationType: "cli" };
rimraf.sync(actualDir);
cpx.copySync(`${this._directoryInfo.userDirs.actualDir}/**/*.{png,jpg,jpeg,tiff,bmp,gif}`, actualDir);
const emitter = compare({
actualDir,
expectedDir,
diffDir,
json,
report,
update: false,
ignoreChange: true,
urlPrefix: "",
threshold: this._config.threshold,
thresholdPixel: this._config.thresholdPixel,
thresholdRate: this._config.thresholdRate,
matchingThreshold: this._config.matchingThreshold ?? 0, // matchingThreshold should not be undefined
enableAntialias: this._config.enableAntialias,
enableCliAdditionalDetection: ximgdiffConf.invocationType === "cli",
enableClientAdditionalDetection: ximgdiffConf.invocationType !== "none",
}) as EventEmitter;
emitter.on("compare", (compareItem: { type: string; path: string }) => {
this._logger.verbose(
`${this._logger.colors.red(compareItem.type)}: ${this._logger.colors.magenta(compareItem.path)}`,
);
});
const comparisonResult = new Promise<ComparisonResult>((resolve, reject) => {
emitter.once("complete", (result: ComparisonResult) => resolve(result));
emitter.once("error", (reason: any) => reject(reason));
});
return comparisonResult
.then(result => {
this._logger.info("Comparison Complete");
this._logger.info(this._logger.colors.red(" Changed items: " + result.failedItems.length));
this._logger.info(this._logger.colors.cyan(" New items: " + result.newItems.length));
this._logger.info(this._logger.colors.redBright(" Deleted items: " + result.deletedItems.length));
this._logger.info(this._logger.colors.green(" Passed items: " + result.passedItems.length));
this._logger.verbose("Comparison details:", result);
return { ...ctx, comparisonResult: result };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during compare images:");
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterComparison>(reason);
});
}
getActualKey(ctx: StepResultAfterComparison): Promise<StepResultAfterActualKey> {
const fallbackFn = () => "snapshot_" + ~~(new Date().getTime() / 1000);
if (this._keyGenerator) {
return this._keyGenerator
.getActualKey()
.then(key => {
if (!key) {
this._logger.warn("Failed to generate the current snapshot key.");
return { ...ctx, actualKey: fallbackFn() };
}
this._logger.info(`The current snapshot key: '${key}'`);
return { ...ctx, actualKey: key };
})
.catch(reason => {
this._logger.warn("Failed to gerenate the current snapshot key.");
if (reason) this._logger.error(reason);
return Promise.resolve({ ...ctx, actualKey: fallbackFn() });
});
} else {
const fallbackKey = fallbackFn();
this._logger.info(`Use '${fallbackKey}' as the current snapshot key because key generator plugin is not set up.`);
return Promise.resolve({ ...ctx, actualKey: fallbackKey });
}
}
syncExpected(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterExpectedKey> {
const keyForExpected = ctx.expectedKey;
if (this._publisher && keyForExpected) {
return this._publisher.fetch(keyForExpected);
} else if (!keyForExpected) {
this._logger.info("Skipped to fetch the expected data because expected key is null.");
return Promise.resolve(ctx);
} else if (!this._publisher) {
this._logger.info("Skipped to fetch the expected data because publisher plugin is not set up.");
return Promise.resolve(ctx);
} else {
return Promise.resolve(ctx);
}
}
|
(ctx: StepResultAfterActualKey): Promise<StepResultAfterPublish> {
if (this._publisher) {
return this._publisher
.publish(ctx.actualKey)
.then(result => {
this._logger.info(`Published snapshot '${ctx.actualKey}' successfully.`);
if (result.reportUrl) {
this._logger.info(`Report URL: ${result.reportUrl}`);
}
this._logger.verbose("Publish result:", result);
return { ...ctx, reportUrl: result.reportUrl };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during publishing snapshot:");
if (reason.code === "CredentialsError") {
this._logger.error("Failed to read AWS credentials.");
this._logger.error(
`Create ${this._logger.colors.magenta("~/.aws/credentials")} or export ${this._logger.colors.green(
"$AWS_ACCESS_KEY_ID",
)} and ${this._logger.colors.green("$AWS_SECRET_ACCESS_KEY")}.`,
);
}
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterPublish>(reason);
});
} else {
this._logger.info("Skipped to publish the snapshot data because publisher plugin is not set up.");
return Promise.resolve(ctx);
}
}
notify(ctx: StepResultAfterPublish): Promise<StepResultAfterPublish> {
const notifyParams: NotifyParams = {
...ctx,
};
if (!this._notifiers.length) {
this._logger.info("Skipped to notify result because notifier plugins are not set up.");
}
this._logger.verbose("Notify parameters:", notifyParams);
return this._notifiers
.reduce((queue, notifier) => {
return queue
.then(() => notifier.notify(notifyParams))
.catch(reason => {
// Don't re-throw notifiers error because it's not fatal.
this._logger.error("An error occurs during notify:");
this._logger.error(reason);
return Promise.resolve();
});
}, Promise.resolve())
.then(() => ctx);
}
}
|
publish
|
identifier_name
|
processor.ts
|
import path from "path";
import {
CoreConfig,
WorkingDirectoryInfo,
PluginCreateOptions,
KeyGeneratorPlugin,
PublisherPlugin,
NotifierPlugin,
NotifyParams,
PluginLogger,
ComparisonResult,
} from "reg-suit-interface";
import { EventEmitter } from "events";
const compare = require("reg-cli");
const rimraf = require("rimraf");
const cpx = require("cpx");
export interface ProcessorOptions {
keyGenerator?: KeyGeneratorPlugin<any>;
publisher?: PublisherPlugin<any>;
notifiers: NotifierPlugin<any>[];
userDirs: {
actualDir: string;
};
}
export interface StepResultAfterExpectedKey {
expectedKey: string | null;
}
export interface StepResultAfterComparison extends StepResultAfterExpectedKey {
comparisonResult: ComparisonResult;
}
export interface StepResultAfterActualKey extends StepResultAfterComparison {
actualKey: string;
}
export interface StepResultAfterPublish extends StepResultAfterActualKey {
reportUrl?: string;
}
export class RegProcessor {
private _logger: PluginLogger;
private _config: CoreConfig;
private _directoryInfo: {
workingDirs: WorkingDirectoryInfo;
userDirs: {
actualDir: string;
};
};
private _keyGenerator?: KeyGeneratorPlugin<any>;
private _publisher?: PublisherPlugin<any>;
private _notifiers: NotifierPlugin<any>[];
constructor(opt: PluginCreateOptions<ProcessorOptions>) {
this._logger = opt.logger;
this._config = opt.coreConfig;
this._directoryInfo = {
workingDirs: opt.workingDirs,
userDirs: opt.options.userDirs,
};
this._keyGenerator = opt.options.keyGenerator;
this._publisher = opt.options.publisher;
this._notifiers = opt.options.notifiers;
}
runAll() {
return this.getExpectedKey()
.then(ctx => this.syncExpected(ctx))
.then(ctx => this.compare(ctx))
.then(ctx => this.getActualKey(ctx))
.then(ctx => this.publish(ctx))
|
getExpectedKey(): Promise<StepResultAfterExpectedKey> {
if (this._keyGenerator) {
return this._keyGenerator
.getExpectedKey()
.then(key => {
this._logger.info(`Detected the previous snapshot key: '${key}'`);
return { expectedKey: key };
})
.catch(reason => {
this._logger.warn("Failed to detect the previous snapshot key");
if (reason) this._logger.error(reason);
return Promise.resolve({ expectedKey: null });
});
} else {
this._logger.info("Skipped to detect the previous snapshot key because key generator plugin is not set up.");
return Promise.resolve({ expectedKey: null });
}
}
compare(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterComparison> {
const { actualDir, expectedDir, diffDir } = this._directoryInfo.workingDirs;
const json = path.join(this._directoryInfo.workingDirs.base, "out.json");
const report = path.join(this._directoryInfo.workingDirs.base, "index.html");
const ximgdiffConf = this._config.ximgdiff || { invocationType: "cli" };
rimraf.sync(actualDir);
cpx.copySync(`${this._directoryInfo.userDirs.actualDir}/**/*.{png,jpg,jpeg,tiff,bmp,gif}`, actualDir);
const emitter = compare({
actualDir,
expectedDir,
diffDir,
json,
report,
update: false,
ignoreChange: true,
urlPrefix: "",
threshold: this._config.threshold,
thresholdPixel: this._config.thresholdPixel,
thresholdRate: this._config.thresholdRate,
matchingThreshold: this._config.matchingThreshold ?? 0, // matchingThreshold should not be undefined
enableAntialias: this._config.enableAntialias,
enableCliAdditionalDetection: ximgdiffConf.invocationType === "cli",
enableClientAdditionalDetection: ximgdiffConf.invocationType !== "none",
}) as EventEmitter;
emitter.on("compare", (compareItem: { type: string; path: string }) => {
this._logger.verbose(
`${this._logger.colors.red(compareItem.type)}: ${this._logger.colors.magenta(compareItem.path)}`,
);
});
const comparisonResult = new Promise<ComparisonResult>((resolve, reject) => {
emitter.once("complete", (result: ComparisonResult) => resolve(result));
emitter.once("error", (reason: any) => reject(reason));
});
return comparisonResult
.then(result => {
this._logger.info("Comparison Complete");
this._logger.info(this._logger.colors.red(" Changed items: " + result.failedItems.length));
this._logger.info(this._logger.colors.cyan(" New items: " + result.newItems.length));
this._logger.info(this._logger.colors.redBright(" Deleted items: " + result.deletedItems.length));
this._logger.info(this._logger.colors.green(" Passed items: " + result.passedItems.length));
this._logger.verbose("Comparison details:", result);
return { ...ctx, comparisonResult: result };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during compare images:");
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterComparison>(reason);
});
}
getActualKey(ctx: StepResultAfterComparison): Promise<StepResultAfterActualKey> {
const fallbackFn = () => "snapshot_" + ~~(new Date().getTime() / 1000);
if (this._keyGenerator) {
return this._keyGenerator
.getActualKey()
.then(key => {
if (!key) {
this._logger.warn("Failed to generate the current snapshot key.");
return { ...ctx, actualKey: fallbackFn() };
}
this._logger.info(`The current snapshot key: '${key}'`);
return { ...ctx, actualKey: key };
})
.catch(reason => {
this._logger.warn("Failed to gerenate the current snapshot key.");
if (reason) this._logger.error(reason);
return Promise.resolve({ ...ctx, actualKey: fallbackFn() });
});
} else {
const fallbackKey = fallbackFn();
this._logger.info(`Use '${fallbackKey}' as the current snapshot key because key generator plugin is not set up.`);
return Promise.resolve({ ...ctx, actualKey: fallbackKey });
}
}
syncExpected(ctx: StepResultAfterExpectedKey): Promise<StepResultAfterExpectedKey> {
const keyForExpected = ctx.expectedKey;
if (this._publisher && keyForExpected) {
return this._publisher.fetch(keyForExpected);
} else if (!keyForExpected) {
this._logger.info("Skipped to fetch the expected data because expected key is null.");
return Promise.resolve(ctx);
} else if (!this._publisher) {
this._logger.info("Skipped to fetch the expected data because publisher plugin is not set up.");
return Promise.resolve(ctx);
} else {
return Promise.resolve(ctx);
}
}
publish(ctx: StepResultAfterActualKey): Promise<StepResultAfterPublish> {
if (this._publisher) {
return this._publisher
.publish(ctx.actualKey)
.then(result => {
this._logger.info(`Published snapshot '${ctx.actualKey}' successfully.`);
if (result.reportUrl) {
this._logger.info(`Report URL: ${result.reportUrl}`);
}
this._logger.verbose("Publish result:", result);
return { ...ctx, reportUrl: result.reportUrl };
})
.catch(reason => {
// re-throw notifiers error because it's fatal.
this._logger.error("An error occurs during publishing snapshot:");
if (reason.code === "CredentialsError") {
this._logger.error("Failed to read AWS credentials.");
this._logger.error(
`Create ${this._logger.colors.magenta("~/.aws/credentials")} or export ${this._logger.colors.green(
"$AWS_ACCESS_KEY_ID",
)} and ${this._logger.colors.green("$AWS_SECRET_ACCESS_KEY")}.`,
);
}
if (reason) this._logger.error(reason);
return Promise.reject<StepResultAfterPublish>(reason);
});
} else {
this._logger.info("Skipped to publish the snapshot data because publisher plugin is not set up.");
return Promise.resolve(ctx);
}
}
notify(ctx: StepResultAfterPublish): Promise<StepResultAfterPublish> {
const notifyParams: NotifyParams = {
...ctx,
};
if (!this._notifiers.length) {
this._logger.info("Skipped to notify result because notifier plugins are not set up.");
}
this._logger.verbose("Notify parameters:", notifyParams);
return this._notifiers
.reduce((queue, notifier) => {
return queue
.then(() => notifier.notify(notifyParams))
.catch(reason => {
// Don't re-throw notifiers error because it's not fatal.
this._logger.error("An error occurs during notify:");
this._logger.error(reason);
return Promise.resolve();
});
}, Promise.resolve())
.then(() => ctx);
}
}
|
.then(ctx => this.notify(ctx));
}
|
random_line_split
|
load.js
|
var loadState={
preload:function(){
//add a loading label on screen
var loadingLabel = game.add.text(game.width/2, 150,
'loading...', { font:'30px Arial', fill:'#ffffff'});
loadingLabel.anchor.setTo(0.5,0.5);
//display progress bar
var progressBar= game.add.sprite(game.width/2,200,'progressBar');
progressBar.anchor.setTo(0.5,0.5);
game.load.setPreloadSprite(progressBar);
//load all assest
game.load.spritesheet('player','assets/player2.png',20,20);
game.load.image('wallV', 'assets/wallVertical.png');
game.load.image('wallH', 'assets/wallHorizontal.png');
game.load.image('coin', 'assets/coin.png');
game.load.image('enemy', 'assets/enemy.png');
//load new assets for use in menu state
game.load.image('background','assets/background.png');
//jump sound
game.load.audio('jump',['assets/jump.ogg','assets/jump.mp3']);
//take coin sound
game.load.audio('coin',['assets/coin.ogg','assets/coin.mp3']);
//player die sound
game.load.audio('dead',['assets/dead.ogg','assets/dead.mp3']);
game.load.image('pixel','assets/pixel.png');
game.load.spritesheet('mute', 'assets/muteButton.png', 28, 22);
game.load.image('jumpButton', 'assets/jumpButton.png');
game.load.image('rightButton', 'assets/rightButton.png');
game.load.image('leftButton', 'assets/leftButton.png');
|
};
|
},
create:function(){
//go to menu state
game.state.start('menu');
}
|
random_line_split
|
serializers.py
|
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
|
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object): # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
|
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
|
identifier_body
|
serializers.py
|
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
|
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object): # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
|
random_line_split
|
|
serializers.py
|
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
|
else:
return {}
class Meta(object): # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
|
return {
"url": certificate_info['download_url'],
}
|
conditional_block
|
serializers.py
|
"""
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.template import defaultfilters
from courseware.access import has_access
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
from xmodule.course_module import DEFAULT_START_DATE
class
|
(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_representation(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
if course_overview.advertised_start is not None:
start_type = "string"
start_display = course_overview.advertised_start
elif course_overview.start != DEFAULT_START_DATE:
start_type = "timestamp"
start_display = defaultfilters.date(course_overview.start, "DATE_FORMAT")
else:
start_type = "empty"
start_display = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"start_display": start_display,
"start_type": start_type,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
"courseware_access": has_access(request.user, 'load_mobile', course_overview).to_json() if request else None
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview", read_only=True)
certificate = serializers.SerializerMethodField()
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object): # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.ReadOnlyField(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
|
CourseOverviewField
|
identifier_name
|
shared_lock.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
use crate::str::{CssString, CssStringWriter};
use crate::stylesheets::Origin;
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "servo")]
use std::mem;
#[cfg(feature = "gecko")]
use std::ptr;
use to_shmem::{SharedMemoryBuilder, ToShmem};
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
///
/// Gecko also needs the ability to have "read only" SharedRwLocks, which are
/// used for objects stored in (read only) shared memory. Attempting to acquire
/// write access to objects protected by a read only SharedRwLock will panic.
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Option<Arc<AtomicRefCell<SomethingZeroSizedButTyped>>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(())),
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Some(Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))),
}
}
/// Create a new global shared lock (servo).
#[cfg(feature = "servo")]
pub fn new_leaked() -> Self {
SharedRwLock {
arc: Arc::new_leaked(RwLock::new(())),
}
}
/// Create a new global shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new_leaked() -> Self {
SharedRwLock {
cell: Some(Arc::new_leaked(AtomicRefCell::new(
SomethingZeroSizedButTyped,
))),
}
}
/// Create a new read-only shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn read_only() -> Self {
SharedRwLock { cell: None }
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
mem::forget(self.arc.read());
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.as_ref().map(|cell| cell.borrow()))
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
mem::forget(self.arc.write());
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.as_ref().unwrap().borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for reading (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(Option<AtomicRef<'a, SomethingZeroSizedButTyped>>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_read() }
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_write() }
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "gecko")]
#[inline]
fn is_read_only_lock(&self) -> bool {
self.shared_lock.cell.is_none()
}
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: Option<&SomethingZeroSizedButTyped>) -> bool {
ptr::eq(
self.shared_lock
.cell
.as_ref()
.map(|cell| cell.as_ptr())
.unwrap_or(ptr::null_mut()),
derefed_guard
.map(|guard| guard as *const _ as *mut _)
.unwrap_or(ptr::null_mut()),
)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
#[cfg(feature = "gecko")]
assert!(
self.is_read_only_lock() || self.same_lock_as(guard.0.as_ref().map(|r| &**r)),
"Locked::read_with called with a guard from an unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe { &*ptr }
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
#[cfg(feature = "gecko")]
assert!(
!self.is_read_only_lock() && self.same_lock_as(Some(&guard.0)),
"Locked::write_with called with a guard from a read only or unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe { &mut *ptr }
}
}
#[cfg(feature = "gecko")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
use std::mem::ManuallyDrop;
let guard = self.shared_lock.read();
Ok(ManuallyDrop::new(Locked {
shared_lock: SharedRwLock::read_only(),
data: UnsafeCell::new(ManuallyDrop::into_inner(
self.read_with(&guard).to_shmem(builder)?,
)),
}))
}
}
#[cfg(feature = "servo")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, _builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
panic!("ToShmem not supported in Servo currently")
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard: !Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard: !Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard: !Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard: !Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const crate::gecko_bindings::structs::StyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
|
/// A trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock: Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
|
pub struct DeepCloneParams;
|
random_line_split
|
shared_lock.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
use crate::str::{CssString, CssStringWriter};
use crate::stylesheets::Origin;
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "servo")]
use std::mem;
#[cfg(feature = "gecko")]
use std::ptr;
use to_shmem::{SharedMemoryBuilder, ToShmem};
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
///
/// Gecko also needs the ability to have "read only" SharedRwLocks, which are
/// used for objects stored in (read only) shared memory. Attempting to acquire
/// write access to objects protected by a read only SharedRwLock will panic.
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Option<Arc<AtomicRefCell<SomethingZeroSizedButTyped>>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(())),
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Some(Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))),
}
}
/// Create a new global shared lock (servo).
#[cfg(feature = "servo")]
pub fn new_leaked() -> Self {
SharedRwLock {
arc: Arc::new_leaked(RwLock::new(())),
}
}
/// Create a new global shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new_leaked() -> Self {
SharedRwLock {
cell: Some(Arc::new_leaked(AtomicRefCell::new(
SomethingZeroSizedButTyped,
))),
}
}
/// Create a new read-only shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn read_only() -> Self {
SharedRwLock { cell: None }
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
mem::forget(self.arc.read());
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.as_ref().map(|cell| cell.borrow()))
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
mem::forget(self.arc.write());
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.as_ref().unwrap().borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for reading (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(Option<AtomicRef<'a, SomethingZeroSizedButTyped>>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_read() }
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_write() }
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "gecko")]
#[inline]
fn is_read_only_lock(&self) -> bool {
self.shared_lock.cell.is_none()
}
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: Option<&SomethingZeroSizedButTyped>) -> bool {
ptr::eq(
self.shared_lock
.cell
.as_ref()
.map(|cell| cell.as_ptr())
.unwrap_or(ptr::null_mut()),
derefed_guard
.map(|guard| guard as *const _ as *mut _)
.unwrap_or(ptr::null_mut()),
)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
#[cfg(feature = "gecko")]
assert!(
self.is_read_only_lock() || self.same_lock_as(guard.0.as_ref().map(|r| &**r)),
"Locked::read_with called with a guard from an unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe { &*ptr }
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
#[cfg(feature = "gecko")]
assert!(
!self.is_read_only_lock() && self.same_lock_as(Some(&guard.0)),
"Locked::write_with called with a guard from a read only or unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe { &mut *ptr }
}
}
#[cfg(feature = "gecko")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
use std::mem::ManuallyDrop;
let guard = self.shared_lock.read();
Ok(ManuallyDrop::new(Locked {
shared_lock: SharedRwLock::read_only(),
data: UnsafeCell::new(ManuallyDrop::into_inner(
self.read_with(&guard).to_shmem(builder)?,
)),
}))
}
}
#[cfg(feature = "servo")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, _builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
panic!("ToShmem not supported in Servo currently")
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard: !Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard: !Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard: !Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard: !Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const crate::gecko_bindings::structs::StyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
pub struct DeepClon
|
trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock: Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
|
eParams;
/// A
|
identifier_name
|
mta-vs-native.js
|
// 2016-08-22
//
// This is the wrapper for the native side
/*
prelim: start a server from the lib dir:
python -m SimpleHTTPServer (python 2.x) (linux)
python -m http.server (python 3.x) (windows)
1a) load jquery (note: may need a real active file open for this to work)
Note: get message 'VM148:52 Uncaught (in promise) TypeError: Failed to execute 'observe' on 'MutationObserver': parameter 1 is not of type 'Node'.(…)'
if a real file is not open and active, when loading the second file.
Note: refer to yaml libray as 'jsyaml' e.g. 'jsyaml.load()'
fetch('http://code.jquery.com/jquery-latest.min.js').then(r => r.text()).then(r => {eval(r); eval(r);});
1b) make sure you're editing a real file.
test: make sure:
document.getElementById('workbench.editors.files.textFileEditor')
1c) load all the other files
Note: you'll get this message if not in a split-panel
'editor-theme-change-listener.js:39 Uncaught (in promise) TypeError: Failed to execute 'observe' on 'MutationObserver': parameter 1 is not of type 'Node''
document.MTA_VS = {};
$.when(
fetch('http://localhost:8000/editor-theme-change-listener.js').then(r => r.text()).then(r => eval(r)),
fetch('http://localhost:8000/local-theme-manager-native.js').then(r => r.text()).then(r => eval(r)),
fetch('http://localhost:8000/mta-vs-native.js').then(r => r.text()).then(r => eval(r))
)
|
.done(function(first_call, second_call, third_call){
console.log('all loaded');
})
.fail(function(){
console.log('load failed');
});
// Note: server now automatically starts when your run 'mta-vs'
3)
// then do a word setup so a "Theme:" appears in the status line
4) then change the theme with 'mta-vs'
*/
fetch('http://localhost:8000/cmd-channel-listener-native.js')
.then(r => r.text())
.then(r => eval(r))
.then(() => {
console.log('mta-vs-naitve: Now in final then for cmd-channel-listener setup.')
var cmdChannelListenerNative = new document.CmdChannelListenerNative();
cmdChannelListenerNative.observe();
});
//@ sourceURL=mta-vs-native.js
|
random_line_split
|
|
rotation.rs
|
// Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2() {
let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity());
}
#[test]
fn
|
() {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
}
|
test_invert_basis3
|
identifier_name
|
rotation.rs
|
// Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2() {
|
}
#[test]
fn test_invert_basis3() {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
}
|
let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity());
|
random_line_split
|
rotation.rs
|
// Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2()
|
#[test]
fn test_invert_basis3() {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
}
|
{
let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity());
}
|
identifier_body
|
kde.js
|
// Based on http://bl.ocks.org/900762 by John Firebaugh
d3.json("../data/faithful.json", function(faithful) {
data = faithful;
var w = 800,
h = 400,
x = d3.scale.linear().domain([30, 110]).range([0, w]);
bins = d3.layout.histogram().frequency(false).bins(x.ticks(60))(data),
max = d3.max(bins, function(d) { return d.y; }),
y = d3.scale.linear().domain([0, .1]).range([0, h]),
kde = science.stats.kde().sample(data);
var vis = d3.select("body")
.append("svg")
.attr("width", w)
.attr("height", h);
var bars = vis.selectAll("g.bar")
.data(bins)
.enter().append("g")
.attr("class", "bar")
.attr("transform", function(d, i) {
return "translate(" + x(d.x) + "," + (h - y(d.y)) + ")";
});
bars.append("rect")
.attr("fill", "steelblue")
.attr("width", function(d) { return x(d.dx + 30) - 1; })
.attr("height", function(d) { return y(d.y); });
var line = d3.svg.line()
.x(function(d) { return x(d[0]); })
.y(function(d) { return h - y(d[1]); });
vis.selectAll("path")
.data(d3.values(science.stats.bandwidth))
.enter().append("path")
|
.attr("d", function(h) {
return line(kde.bandwidth(h)(d3.range(30, 110, .1)));
});
});
|
random_line_split
|
|
sample_cli.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from cffi import FFI
from pprint import pprint
ffi = FFI()
ffi.cdef('''
typedef struct {
int cmd;
int version;
} ProtoHelo;
''')
ffi.cdef('''
typedef struct {
int cmd;
int msgLen;
char msg[10];
} ProtoEcho;
''')
@asyncio.coroutine
def sample_cli(loop):
|
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_cli(loop))
loop.close()
|
reader, writer = yield from asyncio.open_connection(
'127.0.0.1', 8888, loop=loop
)
print('Connected.')
helo = ffi.new('ProtoHelo[]', 1)
ffi.buffer(helo)[:] = yield from reader.read(ffi.sizeof(helo))
print('Received Helo: {}, {}'.format(
helo[0].cmd, helo[0].version
))
for i in range(0, 100+1):
sendMsg = 'msg_{}'.format(i)
sendEcho = ffi.new('ProtoEcho[]', [(i, len(sendMsg), sendMsg.encode('utf-8'))])
writer.write(bytes(ffi.buffer(sendEcho)))
yield from writer.drain()
recvEcho = ffi.new('ProtoEcho[]', 1)
try:
ffi.buffer(recvEcho)[:] = yield from reader.read(ffi.sizeof(recvEcho))
except ValueError as e:
print('ValueError: ', e)
break
print('Received {}, {}, {}'.format(
recvEcho[0].cmd,
recvEcho[0].msgLen,
ffi.string(recvEcho[0].msg).decode('utf-8')
))
writer.close()
|
identifier_body
|
sample_cli.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from cffi import FFI
from pprint import pprint
ffi = FFI()
ffi.cdef('''
typedef struct {
int cmd;
int version;
} ProtoHelo;
''')
ffi.cdef('''
typedef struct {
int cmd;
int msgLen;
char msg[10];
} ProtoEcho;
''')
@asyncio.coroutine
def sample_cli(loop):
reader, writer = yield from asyncio.open_connection(
'127.0.0.1', 8888, loop=loop
)
print('Connected.')
helo = ffi.new('ProtoHelo[]', 1)
ffi.buffer(helo)[:] = yield from reader.read(ffi.sizeof(helo))
print('Received Helo: {}, {}'.format(
helo[0].cmd, helo[0].version
))
for i in range(0, 100+1):
|
writer.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_cli(loop))
loop.close()
|
sendMsg = 'msg_{}'.format(i)
sendEcho = ffi.new('ProtoEcho[]', [(i, len(sendMsg), sendMsg.encode('utf-8'))])
writer.write(bytes(ffi.buffer(sendEcho)))
yield from writer.drain()
recvEcho = ffi.new('ProtoEcho[]', 1)
try:
ffi.buffer(recvEcho)[:] = yield from reader.read(ffi.sizeof(recvEcho))
except ValueError as e:
print('ValueError: ', e)
break
print('Received {}, {}, {}'.format(
recvEcho[0].cmd,
recvEcho[0].msgLen,
ffi.string(recvEcho[0].msg).decode('utf-8')
))
|
conditional_block
|
sample_cli.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from cffi import FFI
from pprint import pprint
ffi = FFI()
ffi.cdef('''
typedef struct {
int cmd;
int version;
} ProtoHelo;
''')
|
} ProtoEcho;
''')
@asyncio.coroutine
def sample_cli(loop):
reader, writer = yield from asyncio.open_connection(
'127.0.0.1', 8888, loop=loop
)
print('Connected.')
helo = ffi.new('ProtoHelo[]', 1)
ffi.buffer(helo)[:] = yield from reader.read(ffi.sizeof(helo))
print('Received Helo: {}, {}'.format(
helo[0].cmd, helo[0].version
))
for i in range(0, 100+1):
sendMsg = 'msg_{}'.format(i)
sendEcho = ffi.new('ProtoEcho[]', [(i, len(sendMsg), sendMsg.encode('utf-8'))])
writer.write(bytes(ffi.buffer(sendEcho)))
yield from writer.drain()
recvEcho = ffi.new('ProtoEcho[]', 1)
try:
ffi.buffer(recvEcho)[:] = yield from reader.read(ffi.sizeof(recvEcho))
except ValueError as e:
print('ValueError: ', e)
break
print('Received {}, {}, {}'.format(
recvEcho[0].cmd,
recvEcho[0].msgLen,
ffi.string(recvEcho[0].msg).decode('utf-8')
))
writer.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_cli(loop))
loop.close()
|
ffi.cdef('''
typedef struct {
int cmd;
int msgLen;
char msg[10];
|
random_line_split
|
sample_cli.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from cffi import FFI
from pprint import pprint
ffi = FFI()
ffi.cdef('''
typedef struct {
int cmd;
int version;
} ProtoHelo;
''')
ffi.cdef('''
typedef struct {
int cmd;
int msgLen;
char msg[10];
} ProtoEcho;
''')
@asyncio.coroutine
def
|
(loop):
reader, writer = yield from asyncio.open_connection(
'127.0.0.1', 8888, loop=loop
)
print('Connected.')
helo = ffi.new('ProtoHelo[]', 1)
ffi.buffer(helo)[:] = yield from reader.read(ffi.sizeof(helo))
print('Received Helo: {}, {}'.format(
helo[0].cmd, helo[0].version
))
for i in range(0, 100+1):
sendMsg = 'msg_{}'.format(i)
sendEcho = ffi.new('ProtoEcho[]', [(i, len(sendMsg), sendMsg.encode('utf-8'))])
writer.write(bytes(ffi.buffer(sendEcho)))
yield from writer.drain()
recvEcho = ffi.new('ProtoEcho[]', 1)
try:
ffi.buffer(recvEcho)[:] = yield from reader.read(ffi.sizeof(recvEcho))
except ValueError as e:
print('ValueError: ', e)
break
print('Received {}, {}, {}'.format(
recvEcho[0].cmd,
recvEcho[0].msgLen,
ffi.string(recvEcho[0].msg).decode('utf-8')
))
writer.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_cli(loop))
loop.close()
|
sample_cli
|
identifier_name
|
doc_test_lints.rs
|
//! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
|
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem { .. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level != lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&& !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
|
random_line_split
|
|
doc_test_lints.rs
|
//! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem { .. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level != lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn
|
<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&& !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
|
look_for_tests
|
identifier_name
|
doc_test_lints.rs
|
//! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item>
|
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem { .. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level != lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&& !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
|
{
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
|
identifier_body
|
doc_test_lints.rs
|
//! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None
|
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem { .. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level != lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&& !cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
|
{
self.found_tests += 1;
}
|
conditional_block
|
engine.py
|
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import os
import smtplib
import tempfile
import time
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size, exclude_messages=[]):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
|
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
# A list of messages to be sent, usually contains messages that failed
exclude_messages = []
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size, exclude_messages=exclude_messages):
result = send_queued_message(message, connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
# Don't try to send this message again for now
exclude_messages.append(message.pk)
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided and a list of blacklisted email addresses.
Otherwise a new connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if connection is None:
connection = get_connection()
connection.open()
arg_connection = False
else:
arg_connection = True
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
result = send_message(message, connection=connection)
if not arg_connection:
connection.close()
return result
def send_message(message, connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided. Otherwise a new connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if connection is None:
connection = get_connection()
opened_connection = False
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
message.email_message(connection=connection).send()
message.queuedmessage.delete()
result = constants.RESULT_SENT
log_message = 'Sent'
except Exception, err:
if isinstance(err, settings.DEFER_ON_ERRORS):
message.queuedmessage.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
connection.close()
return result
|
queue = models.QueuedMessage.objects.non_deferred() \
.exclude(pk__in=exclude_messages).select_related()
if block_size:
queue = queue[:block_size]
return queue
|
identifier_body
|
engine.py
|
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import os
import smtplib
import tempfile
import time
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size, exclude_messages=[]):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred() \
.exclude(pk__in=exclude_messages).select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
# A list of messages to be sent, usually contains messages that failed
exclude_messages = []
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size, exclude_messages=exclude_messages):
result = send_queued_message(message, connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
# Don't try to send this message again for now
exclude_messages.append(message.pk)
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided and a list of blacklisted email addresses.
Otherwise a new connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if connection is None:
connection = get_connection()
connection.open()
arg_connection = False
else:
arg_connection = True
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
result = send_message(message, connection=connection)
if not arg_connection:
connection.close()
return result
def send_message(message, connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided. Otherwise a new connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if connection is None:
connection = get_connection()
opened_connection = False
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
message.email_message(connection=connection).send()
message.queuedmessage.delete()
result = constants.RESULT_SENT
log_message = 'Sent'
except Exception, err:
if isinstance(err, settings.DEFER_ON_ERRORS):
message.queuedmessage.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
connection.close()
|
return result
|
random_line_split
|
|
engine.py
|
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import os
import smtplib
import tempfile
import time
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size, exclude_messages=[]):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred() \
.exclude(pk__in=exclude_messages).select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
# A list of messages to be sent, usually contains messages that failed
exclude_messages = []
try:
if constants.EMAIL_BACKEND_SUPPORT:
|
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size, exclude_messages=exclude_messages):
result = send_queued_message(message, connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
# Don't try to send this message again for now
exclude_messages.append(message.pk)
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided and a list of blacklisted email addresses.
Otherwise a new connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if connection is None:
connection = get_connection()
connection.open()
arg_connection = False
else:
arg_connection = True
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
result = send_message(message, connection=connection)
if not arg_connection:
connection.close()
return result
def send_message(message, connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided. Otherwise a new connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if connection is None:
connection = get_connection()
opened_connection = False
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
message.email_message(connection=connection).send()
message.queuedmessage.delete()
result = constants.RESULT_SENT
log_message = 'Sent'
except Exception, err:
if isinstance(err, settings.DEFER_ON_ERRORS):
message.queuedmessage.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
connection.close()
return result
|
connection = get_connection(backend=backend)
|
conditional_block
|
engine.py
|
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import os
import smtplib
import tempfile
import time
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size, exclude_messages=[]):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred() \
.exclude(pk__in=exclude_messages).select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
# A list of messages to be sent, usually contains messages that failed
exclude_messages = []
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size, exclude_messages=exclude_messages):
result = send_queued_message(message, connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
# Don't try to send this message again for now
exclude_messages.append(message.pk)
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def
|
(queued_message, connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided and a list of blacklisted email addresses.
Otherwise a new connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if connection is None:
connection = get_connection()
connection.open()
arg_connection = False
else:
arg_connection = True
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
result = send_message(message, connection=connection)
if not arg_connection:
connection.close()
return result
def send_message(message, connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, a
connection can be provided. Otherwise a new connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if connection is None:
connection = get_connection()
opened_connection = False
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
message.email_message(connection=connection).send()
message.queuedmessage.delete()
result = constants.RESULT_SENT
log_message = 'Sent'
except Exception, err:
if isinstance(err, settings.DEFER_ON_ERRORS):
message.queuedmessage.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
connection.close()
return result
|
send_queued_message
|
identifier_name
|
index.d.ts
|
// Type definitions for sinon-chai 2.7.0
// Project: https://github.com/domenic/sinon-chai
// Definitions by: Kazi Manzur Rashid <https://github.com/kazimanzurrashid/>, Jed Mao <https://github.com/jedmao/>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
/// <reference types="chai" />
/// <reference types="sinon" />
import * as Sinon from 'sinon';
declare global {
export namespace Chai {
interface LanguageChains {
always: Assertion;
}
interface Assertion {
/**
* true if the spy was called at least once.
*/
called: Assertion;
/**
* @param count The number of recorded calls.
*/
callCount(count: number): Assertion;
/**
* true if the spy was called exactly once.
*/
calledOnce: Assertion;
/**
* true if the spy was called exactly twice.
*/
calledTwice: Assertion;
/**
* true if the spy was called exactly thrice.
*/
calledThrice: Assertion;
/**
* Returns true if the spy was called before anotherSpy.
*/
calledBefore(anotherSpy: Sinon.SinonSpy): Assertion;
/**
* Returns true if the spy was called after anotherSpy.
*/
calledAfter(anotherSpy: Sinon.SinonSpy): Assertion;
/**
* Returns true if spy/stub was called with the new operator. Beware that
* this is inferred based on the value of the this object and the spy
* function's prototype, so it may give false positives if you actively
* return the right kind of object.
*/
calledWithNew: Assertion;
/**
* Returns true if context was this for this call.
*/
calledOn(context: any): Assertion;
/**
* Returns true if call received provided arguments (and possibly others).
|
*/
calledWith(...args: any[]): Assertion;
/**
* Returns true if call received provided arguments and no others.
*/
calledWithExactly(...args: any[]): Assertion;
/**
* Returns true if call received matching arguments (and possibly others).
* This behaves the same as spyCall.calledWith(sinon.match(arg1), sinon.match(arg2), ...).
*/
calledWithMatch(...args: any[]): Assertion;
/**
* Returns true if spy returned the provided value at least once. Uses
* deep comparison for objects and arrays. Use spy.returned(sinon.match.same(obj))
* for strict comparison (see matchers).
*/
returned(obj: any): Assertion;
/**
* Returns true if spy threw the provided exception object at least once.
*/
thrown(obj?: Error | typeof Error | string): Assertion;
}
}
}
declare function sinonChai(chai: any, utils: any): void;
declare namespace sinonChai { }
export = sinonChai;
|
random_line_split
|
|
speedcd.py
|
# coding=utf-8
"""Provider code for Speed.cd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class SpeedCDProvider(TorrentProvider):
"""SpeedCD Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(SpeedCDProvider, self).__init__('Speedcd')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
'login_post': None,
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.freeleech = False
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c2': 1, # TV/Episodes
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
'freeleech': 'on' if self.freeleech else None
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
|
torrent_table = torrent_table.find('table') if torrent_table else None
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a', class_='torrent').get_text()
if cells[1].find('a', class_='torrent') else None
download_url = urljoin(self.url,
cells[2].find(title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[5].get_text(strip=True))
leechers = try_int(cells[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[4].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
if not self.urls['login_post'] and not self.login_url():
log.debug('Unable to get login URL')
return False
response = self.session.post(self.urls['login_post'], data=login_params)
if not response or not response.text:
log.debug('Unable to connect to provider using login URL: {url}',
{'url': self.urls['login_post']})
return False
if 'incorrect username or password. please try again' in response.text.lower():
log.warning('Invalid username or password. Check your settings')
return False
return True
log.warning('Unable to connect to provider')
return
def login_url(self):
"""Get the login url (post) as speed.cd keeps changing it."""
response = self.session.get(self.urls['login'])
if not response or not response.text:
log.debug('Unable to connect to provider to get login URL')
return
data = BS4Parser(response.text, 'html5lib')
login_post = data.soup.find('form', id='loginform').get('action')
if login_post:
self.urls['login_post'] = urljoin(self.url, login_post)
return True
provider = SpeedCDProvider()
|
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
|
random_line_split
|
speedcd.py
|
# coding=utf-8
"""Provider code for Speed.cd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class SpeedCDProvider(TorrentProvider):
"""SpeedCD Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(SpeedCDProvider, self).__init__('Speedcd')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
'login_post': None,
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.freeleech = False
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c2': 1, # TV/Episodes
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
'freeleech': 'on' if self.freeleech else None
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
|
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
if not self.urls['login_post'] and not self.login_url():
log.debug('Unable to get login URL')
return False
response = self.session.post(self.urls['login_post'], data=login_params)
if not response or not response.text:
log.debug('Unable to connect to provider using login URL: {url}',
{'url': self.urls['login_post']})
return False
if 'incorrect username or password. please try again' in response.text.lower():
log.warning('Invalid username or password. Check your settings')
return False
return True
log.warning('Unable to connect to provider')
return
def login_url(self):
"""Get the login url (post) as speed.cd keeps changing it."""
response = self.session.get(self.urls['login'])
if not response or not response.text:
log.debug('Unable to connect to provider to get login URL')
return
data = BS4Parser(response.text, 'html5lib')
login_post = data.soup.find('form', id='loginform').get('action')
if login_post:
self.urls['login_post'] = urljoin(self.url, login_post)
return True
provider = SpeedCDProvider()
|
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else None
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a', class_='torrent').get_text()
if cells[1].find('a', class_='torrent') else None
download_url = urljoin(self.url,
cells[2].find(title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[5].get_text(strip=True))
leechers = try_int(cells[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[4].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
|
identifier_body
|
speedcd.py
|
# coding=utf-8
"""Provider code for Speed.cd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class SpeedCDProvider(TorrentProvider):
"""SpeedCD Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(SpeedCDProvider, self).__init__('Speedcd')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
'login_post': None,
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.freeleech = False
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c2': 1, # TV/Episodes
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
'freeleech': 'on' if self.freeleech else None
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else None
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a', class_='torrent').get_text()
if cells[1].find('a', class_='torrent') else None
download_url = urljoin(self.url,
cells[2].find(title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[5].get_text(strip=True))
leechers = try_int(cells[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[4].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
|
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
if not self.urls['login_post'] and not self.login_url():
log.debug('Unable to get login URL')
return False
response = self.session.post(self.urls['login_post'], data=login_params)
if not response or not response.text:
log.debug('Unable to connect to provider using login URL: {url}',
{'url': self.urls['login_post']})
return False
if 'incorrect username or password. please try again' in response.text.lower():
log.warning('Invalid username or password. Check your settings')
return False
return True
log.warning('Unable to connect to provider')
return
def login_url(self):
"""Get the login url (post) as speed.cd keeps changing it."""
response = self.session.get(self.urls['login'])
if not response or not response.text:
log.debug('Unable to connect to provider to get login URL')
return
data = BS4Parser(response.text, 'html5lib')
login_post = data.soup.find('form', id='loginform').get('action')
if login_post:
self.urls['login_post'] = urljoin(self.url, login_post)
return True
provider = SpeedCDProvider()
|
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
|
conditional_block
|
speedcd.py
|
# coding=utf-8
"""Provider code for Speed.cd."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class SpeedCDProvider(TorrentProvider):
"""SpeedCD Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(SpeedCDProvider, self).__init__('Speedcd')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
'login_post': None,
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
self.freeleech = False
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c2': 1, # TV/Episodes
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
'freeleech': 'on' if self.freeleech else None
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else None
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a', class_='torrent').get_text()
if cells[1].find('a', class_='torrent') else None
download_url = urljoin(self.url,
cells[2].find(title='Download').parent['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[5].get_text(strip=True))
leechers = try_int(cells[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[4].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
if not self.urls['login_post'] and not self.login_url():
log.debug('Unable to get login URL')
return False
response = self.session.post(self.urls['login_post'], data=login_params)
if not response or not response.text:
log.debug('Unable to connect to provider using login URL: {url}',
{'url': self.urls['login_post']})
return False
if 'incorrect username or password. please try again' in response.text.lower():
log.warning('Invalid username or password. Check your settings')
return False
return True
log.warning('Unable to connect to provider')
return
def
|
(self):
"""Get the login url (post) as speed.cd keeps changing it."""
response = self.session.get(self.urls['login'])
if not response or not response.text:
log.debug('Unable to connect to provider to get login URL')
return
data = BS4Parser(response.text, 'html5lib')
login_post = data.soup.find('form', id='loginform').get('action')
if login_post:
self.urls['login_post'] = urljoin(self.url, login_post)
return True
provider = SpeedCDProvider()
|
login_url
|
identifier_name
|
worker-context.ts
|
/**
* @license
* Copyright 2021 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {LanguageServiceContext} from './language-service-context.js';
import {CachingCdn} from './caching-cdn.js';
|
/**
* Acquire the existing worker instance, or create a fresh one if missing.
* If the config differs from the existing instance's config, a new WorkerContext is
* instantiated and made the new instance.
*/
export function getWorkerContext(config: WorkerConfig) {
const configCacheKey = JSON.stringify(config);
if (workerContext && cacheKey === configCacheKey) {
return workerContext;
}
cacheKey = configCacheKey;
workerContext = new WorkerContext(config);
return workerContext;
}
export class WorkerContext {
readonly cdn: CachingCdn;
readonly importMapResolver: ImportMapResolver;
readonly languageServiceContext: LanguageServiceContext;
constructor(config: WorkerConfig) {
this.importMapResolver = new ImportMapResolver(config.importMap);
this.cdn = new CachingCdn(config.cdnBaseUrl ?? 'https://unpkg.com/');
this.languageServiceContext = new LanguageServiceContext();
}
}
|
import {ImportMapResolver} from './import-map-resolver.js';
import {WorkerConfig} from '../shared/worker-api.js';
let workerContext: WorkerContext | undefined;
let cacheKey = '';
|
random_line_split
|
worker-context.ts
|
/**
* @license
* Copyright 2021 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {LanguageServiceContext} from './language-service-context.js';
import {CachingCdn} from './caching-cdn.js';
import {ImportMapResolver} from './import-map-resolver.js';
import {WorkerConfig} from '../shared/worker-api.js';
let workerContext: WorkerContext | undefined;
let cacheKey = '';
/**
* Acquire the existing worker instance, or create a fresh one if missing.
* If the config differs from the existing instance's config, a new WorkerContext is
* instantiated and made the new instance.
*/
export function getWorkerContext(config: WorkerConfig) {
const configCacheKey = JSON.stringify(config);
if (workerContext && cacheKey === configCacheKey) {
return workerContext;
}
cacheKey = configCacheKey;
workerContext = new WorkerContext(config);
return workerContext;
}
export class WorkerContext {
readonly cdn: CachingCdn;
readonly importMapResolver: ImportMapResolver;
readonly languageServiceContext: LanguageServiceContext;
constructor(config: WorkerConfig)
|
}
|
{
this.importMapResolver = new ImportMapResolver(config.importMap);
this.cdn = new CachingCdn(config.cdnBaseUrl ?? 'https://unpkg.com/');
this.languageServiceContext = new LanguageServiceContext();
}
|
identifier_body
|
worker-context.ts
|
/**
* @license
* Copyright 2021 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {LanguageServiceContext} from './language-service-context.js';
import {CachingCdn} from './caching-cdn.js';
import {ImportMapResolver} from './import-map-resolver.js';
import {WorkerConfig} from '../shared/worker-api.js';
let workerContext: WorkerContext | undefined;
let cacheKey = '';
/**
* Acquire the existing worker instance, or create a fresh one if missing.
* If the config differs from the existing instance's config, a new WorkerContext is
* instantiated and made the new instance.
*/
export function getWorkerContext(config: WorkerConfig) {
const configCacheKey = JSON.stringify(config);
if (workerContext && cacheKey === configCacheKey) {
return workerContext;
}
cacheKey = configCacheKey;
workerContext = new WorkerContext(config);
return workerContext;
}
export class WorkerContext {
readonly cdn: CachingCdn;
readonly importMapResolver: ImportMapResolver;
readonly languageServiceContext: LanguageServiceContext;
|
(config: WorkerConfig) {
this.importMapResolver = new ImportMapResolver(config.importMap);
this.cdn = new CachingCdn(config.cdnBaseUrl ?? 'https://unpkg.com/');
this.languageServiceContext = new LanguageServiceContext();
}
}
|
constructor
|
identifier_name
|
worker-context.ts
|
/**
* @license
* Copyright 2021 Google LLC
* SPDX-License-Identifier: BSD-3-Clause
*/
import {LanguageServiceContext} from './language-service-context.js';
import {CachingCdn} from './caching-cdn.js';
import {ImportMapResolver} from './import-map-resolver.js';
import {WorkerConfig} from '../shared/worker-api.js';
let workerContext: WorkerContext | undefined;
let cacheKey = '';
/**
* Acquire the existing worker instance, or create a fresh one if missing.
* If the config differs from the existing instance's config, a new WorkerContext is
* instantiated and made the new instance.
*/
export function getWorkerContext(config: WorkerConfig) {
const configCacheKey = JSON.stringify(config);
if (workerContext && cacheKey === configCacheKey)
|
cacheKey = configCacheKey;
workerContext = new WorkerContext(config);
return workerContext;
}
export class WorkerContext {
readonly cdn: CachingCdn;
readonly importMapResolver: ImportMapResolver;
readonly languageServiceContext: LanguageServiceContext;
constructor(config: WorkerConfig) {
this.importMapResolver = new ImportMapResolver(config.importMap);
this.cdn = new CachingCdn(config.cdnBaseUrl ?? 'https://unpkg.com/');
this.languageServiceContext = new LanguageServiceContext();
}
}
|
{
return workerContext;
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.