file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
transaction_map.rs
use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied,Vacant}; use primitive::{UInt256,Transaction}; #[derive(PartialEq)] pub enum TransactionIndexStatus { Init = 0, Get = 1, } pub struct TransactionIndex { status: TransactionIndexStatus, hash: UInt256, transaction: Option<Transaction>, waiters: Vec<UInt256>, } impl TransactionIndex { pub fn new(hash: &UInt256) -> TransactionIndex { TransactionIndex { status: TransactionIndexStatus::Init, hash: hash.clone(), transaction: None, waiters: Vec::new(), } } pub fn is_init(&self) -> bool { self.status == TransactionIndexStatus::Init } pub fn get_hash(&self) -> &UInt256 { &self.hash } pub fn get_transaction(&self) -> &Option<Transaction> { &self.transaction } pub fn set_transaction(&mut self, transaction: Transaction) { self.transaction = Some(transaction); self.status = TransactionIndexStatus::Get; } pub fn add_waiter(&mut self, next: UInt256) { self.waiters.push(next); } pub fn move_waiters(&mut self, v:&mut Vec<UInt256>) { v.append(&mut self.waiters); } } #[derive(Default)] pub struct TransactionMap { map: HashMap< UInt256, TransactionIndex >, } impl TransactionMap { pub fn
(&self, hash: &UInt256) -> Option<&TransactionIndex> { self.map.get(hash) } pub fn get_mut(&mut self, hash: &UInt256) -> Option<&mut TransactionIndex> { self.map.get_mut(hash) } pub fn insert(&mut self, hash: &UInt256) -> Result<&mut TransactionIndex, &mut TransactionIndex> { match self.map.entry(hash.clone()) { Vacant(v) => Ok(v.insert(TransactionIndex::new(hash))), Occupied(o) => Err(o.into_mut()) } } }
get
identifier_name
simd_add.rs
#![feature(test)] #![feature(core)] use std::simd::f32x4; macro_rules! assert_equal_len { ($a:ident, $b: ident) => { assert!($a.len() == $b.len(), "add_assign: dimension mismatch: {:?} += {:?}", ($a.len(),), ($b.len(),)); } } // element-wise addition fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) { assert_equal_len!(xs, ys); for (x, y) in xs.iter_mut().zip(ys.iter()) { *x += *y; } } // simd accelerated addition fn
(xs: &mut Vec<f32>, ys: &Vec<f32>) { assert_equal_len!(xs, ys); let size = xs.len() as isize; let chunks = size / 4; // pointer to the start of the vector data let p_x: *mut f32 = xs.as_mut_ptr(); let p_y: *const f32 = ys.as_ptr(); // sum excess elements that don't fit in the simd vector for i in (4 * chunks)..size { // dereferencing a raw pointer requires an unsafe block unsafe { // offset by i elements *p_x.offset(i) += *p_y.offset(i); } } // treat f32 vector as an simd f32x4 vector let simd_p_x = p_x as *mut f32x4; let simd_p_y = p_y as *const f32x4; // sum "simd vector" for i in 0..chunks { unsafe { *simd_p_x.offset(i) += *simd_p_y.offset(i); } } } mod bench { extern crate test; use self::test::Bencher; use std::iter; static BENCH_SIZE: usize = 10_000; macro_rules! bench { ($name:ident, $func:ident) => { #[bench] fn $name(b: &mut Bencher) { let mut x: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); let y: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); b.iter(|| { super::$func(&mut x, &y); }) } } } bench!(vanilla, add_assign); bench!(simd, simd_add_assign); }
simd_add_assign
identifier_name
simd_add.rs
#![feature(test)] #![feature(core)] use std::simd::f32x4; macro_rules! assert_equal_len { ($a:ident, $b: ident) => { assert!($a.len() == $b.len(), "add_assign: dimension mismatch: {:?} += {:?}", ($a.len(),), ($b.len(),)); } } // element-wise addition fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>)
// simd accelerated addition fn simd_add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) { assert_equal_len!(xs, ys); let size = xs.len() as isize; let chunks = size / 4; // pointer to the start of the vector data let p_x: *mut f32 = xs.as_mut_ptr(); let p_y: *const f32 = ys.as_ptr(); // sum excess elements that don't fit in the simd vector for i in (4 * chunks)..size { // dereferencing a raw pointer requires an unsafe block unsafe { // offset by i elements *p_x.offset(i) += *p_y.offset(i); } } // treat f32 vector as an simd f32x4 vector let simd_p_x = p_x as *mut f32x4; let simd_p_y = p_y as *const f32x4; // sum "simd vector" for i in 0..chunks { unsafe { *simd_p_x.offset(i) += *simd_p_y.offset(i); } } } mod bench { extern crate test; use self::test::Bencher; use std::iter; static BENCH_SIZE: usize = 10_000; macro_rules! bench { ($name:ident, $func:ident) => { #[bench] fn $name(b: &mut Bencher) { let mut x: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); let y: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); b.iter(|| { super::$func(&mut x, &y); }) } } } bench!(vanilla, add_assign); bench!(simd, simd_add_assign); }
{ assert_equal_len!(xs, ys); for (x, y) in xs.iter_mut().zip(ys.iter()) { *x += *y; } }
identifier_body
simd_add.rs
#![feature(test)] #![feature(core)] use std::simd::f32x4; macro_rules! assert_equal_len { ($a:ident, $b: ident) => { assert!($a.len() == $b.len(), "add_assign: dimension mismatch: {:?} += {:?}", ($a.len(),), ($b.len(),)); } } // element-wise addition fn add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) { assert_equal_len!(xs, ys); for (x, y) in xs.iter_mut().zip(ys.iter()) { *x += *y; } } // simd accelerated addition fn simd_add_assign(xs: &mut Vec<f32>, ys: &Vec<f32>) { assert_equal_len!(xs, ys); let size = xs.len() as isize; let chunks = size / 4; // pointer to the start of the vector data let p_x: *mut f32 = xs.as_mut_ptr(); let p_y: *const f32 = ys.as_ptr(); // sum excess elements that don't fit in the simd vector for i in (4 * chunks)..size { // dereferencing a raw pointer requires an unsafe block unsafe { // offset by i elements *p_x.offset(i) += *p_y.offset(i); } } // treat f32 vector as an simd f32x4 vector let simd_p_x = p_x as *mut f32x4; let simd_p_y = p_y as *const f32x4;
} } } mod bench { extern crate test; use self::test::Bencher; use std::iter; static BENCH_SIZE: usize = 10_000; macro_rules! bench { ($name:ident, $func:ident) => { #[bench] fn $name(b: &mut Bencher) { let mut x: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); let y: Vec<_> = iter::repeat(1.0f32) .take(BENCH_SIZE) .collect(); b.iter(|| { super::$func(&mut x, &y); }) } } } bench!(vanilla, add_assign); bench!(simd, simd_add_assign); }
// sum "simd vector" for i in 0..chunks { unsafe { *simd_p_x.offset(i) += *simd_p_y.offset(i);
random_line_split
item_store.ts
// item_store contains the core interfaces and types for // encrypted items and storage of them import clone = require('clone'); import sprintf = require('sprintf'); import agile_keychain_crypto = require('./agile_keychain_crypto'); import asyncutil = require('./base/asyncutil'); import collectionutil = require('./base/collectionutil'); import dateutil = require('./base/dateutil'); import err_util = require('./base/err_util'); import event_stream = require('./base/event_stream'); import key_agent = require('./key_agent'); import sha1 = require('./crypto/sha1'); import stringutil = require('./base/stringutil'); // typedef for item type codes export interface ItemType extends String {} /** Constants for the different types of item * that a vault may contain. * * Item type codes are taken from 1Password v4 */ export class ItemTypes { // The most common type, for logins and other web forms static LOGIN = <ItemType>'webforms.WebForm'; // Other item types static CREDIT_CARD = <ItemType>'wallet.financial.CreditCard'; static ROUTER = <ItemType>'wallet.computer.Router'; static SECURE_NOTE = <ItemType>'securenotes.SecureNote'; static PASSWORD = <ItemType>'passwords.Password'; static EMAIL_ACCOUNT = <ItemType>'wallet.onlineservices.Email.v2'; static BANK_ACCOUNT = <ItemType>'wallet.financial.BankAccountUS'; static DATABASE = <ItemType>'wallet.computer.Database'; static DRIVERS_LICENSE = <ItemType>'wallet.government.DriversLicense'; static MEMBERSHIP = <ItemType>'wallet.membership.Membership'; static HUNTING_LICENSE = <ItemType>'wallet.government.HuntingLicense'; static PASSPORT = <ItemType>'wallet.government.Passport'; static REWARD_PROGRAM = <ItemType>'wallet.membership.RewardProgram'; static SERVER = <ItemType>'wallet.computer.UnixServer'; static SOCIAL_SECURITY = <ItemType>'wallet.government.SsnUS'; static SOFTWARE_LICENSE = <ItemType>'wallet.computer.License'; static IDENTITY = <ItemType>'identities.Identity'; // Non-item types static FOLDER = <ItemType>'system.folder.Regular'; static SAVED_SEARCH = <ItemType>'system.folder.SavedSearch'; // Marker type used to deleted items. The ID is preserved // but the type is set to Tombstone and all other data // is removed static TOMBSTONE = <ItemType>'system.Tombstone'; } /** Map of item type codes to human-readable item type names */ export var ITEM_TYPES: ItemTypeMap = { 'webforms.WebForm': { name: 'Login', shortAlias: 'login', }, 'wallet.financial.CreditCard': { name: 'Credit Card', shortAlias: 'card', }, 'wallet.computer.Router': { name: 'Wireless Router', shortAlias: 'router', }, 'securenotes.SecureNote': { name: 'Secure Note', shortAlias: 'note', }, 'passwords.Password': { name: 'Password', shortAlias: 'pass', }, 'wallet.onlineservices.Email.v2': { name: 'Email Account', shortAlias: 'email', }, 'system.folder.Regular': { name: 'Folder', shortAlias: 'folder', }, 'system.folder.SavedSearch': { name: 'Smart Folder', shortAlias: 'smart-folder', }, 'wallet.financial.BankAccountUS': { name: 'Bank Account', shortAlias: 'bank', }, 'wallet.computer.Database': { name: 'Database', shortAlias: 'db', }, 'wallet.government.DriversLicense': { name: "Driver's License", shortAlias: 'driver', }, 'wallet.membership.Membership': { name: 'Membership', shortAlias: 'membership', }, 'wallet.government.HuntingLicense': { name: 'Outdoor License', shortAlias: 'outdoor', }, 'wallet.government.Passport': { name: 'Passport', shortAlias: 'passport', }, 'wallet.membership.RewardProgram': { name: 'Reward Program', shortAlias: 'reward', }, 'wallet.computer.UnixServer': { name: 'Unix Server', shortAlias: 'server', }, 'wallet.government.SsnUS': { name: 'Social Security Number', shortAlias: 'social', }, 'wallet.computer.License': { name: 'Software License', shortAlias: 'software', }, 'identities.Identity': { name: 'Identity', shortAlias: 'id', }, // internal entry type created for items // that have been removed from the trash 'system.Tombstone': { name: 'Tombstone', shortAlias: 'tombstone', }, }; export interface ItemState { uuid: string; revision: string; deleted: boolean; } /** A convenience interface for passing around an item * and its contents together. */ export interface ItemAndContent { item: Item; content: ItemContent; } export interface ItemTypeInfo { name: string; shortAlias: string; } export interface ItemTypeMap { // map of ItemType -> ItemTypeInfo [index: string]: ItemTypeInfo; } export class UnsavedItemError extends err_util.BaseError { constructor() { super('Item has not been saved to a store'); } } /** Represents the content of an item, usually stored * encrypted in a vault. * * ItemContent and its dependent fields are plain interfaces * to facilitate easy (de-)serialization. */ export interface ItemContent { sections: ItemSection[]; urls: ItemUrl[]; notes: string; formFields: WebFormField[]; htmlMethod: string; htmlAction: string; htmlId: string; } /** Utility functions for creating and extracting * data from ItemContent instances. */ export let ContentUtil = { /** Creates a new ItemContent instance with all * fields set to default values. */ empty(): ItemContent { return { sections: [], urls: [], notes: '', formFields: [], htmlMethod: '', htmlAction: '', htmlId: '', }; }, /** Returns the account name associated with this item. * * The field used for the account name depends on the item * type. For logins, this is the 'username' field. * * Returns an empty string if the item has no associated account. */ account(content: ItemContent): string { let field = ContentUtil.accountField(content); return field ? field.value : ''; }, accountField(content: ItemContent): WebFormField { let accountFields = content.formFields.filter( field => field.designation === 'username' ); return accountFields.length > 0 ? accountFields[0] : null; }, /** Returns the primary password associated with this item. * * This depends upon the item type. For logins, this is * the 'password' field. * * Returns an empty password if the item has no associated * account. */ password(content: ItemContent): string { let field = ContentUtil.passwordField(content); return field ? field.value : ''; }, passwordField(content: ItemContent): WebFormField { var passFields = content.formFields.filter( field => field.designation === 'password' ); return passFields.length > 0 ? passFields[0] : null; }, }; /** Represents a single item in a 1Password vault. */ export class Item { // store which this item belongs to, or null // if the item has not yet been saved private store: Store; /** Identifies the version of an item. This is an opaque * string which is set when an item is saved to a store. * It will change each time an item is saved. */ revision: string; /** Identifies the previous version of an item. This is * an opaque string which is set to the current revision * just prior to a new version being saved to a store * which supports item history. It will be updated * each time an item is saved. */ parentRevision: string; /** Unique ID for this item within the vault */ uuid: string; /** ID of the folder that this item currently belongs to */ folderUuid: string; faveIndex: number; trashed: boolean; updatedAt: Date; createdAt: Date; /** Item type code for this item. This is one of the values * in the ItemTypes class. */ typeName: ItemType; /** Main title for this item. */ title: string; /** Additional metadata (eg. tags) * which is stored unencrypted for this item. */ openContents: ItemOpenContents; /** List of URLs that this item is associated with. */ locations: string[]; /** The account name or number that this item is associated with */ account: string; /** The decrypted content of the item, either set
/** Create a new item. @p store is the store * to associate the new item with. This can * be changed later via saveTo(). * * When importing an existing item or loading * an existing item from the store, @p uuid may be non-null. * Otherwise a random new UUID will be allocated for * the item. */ constructor(store?: Store, uuid?: string) { this.store = store; this.uuid = uuid || agile_keychain_crypto.newUUID(); this.trashed = false; this.typeName = ItemTypes.LOGIN; this.folderUuid = ''; this.locations = []; this.title = ''; } /** Retrieves and decrypts the content of a 1Password item. * * In the Agile Keychain format, items are stored in two parts. * The overview data is stored in both contents.js and replicated * in the <UUID>.1password file for the item and is unencrypted. * * The item content is stored in the <UUID>.1password file and * is encrypted using the store's master key. * * The item's store must be unlocked using Store.unlock() before * item content can be retrieved. */ getContent(): Promise<ItemContent> { if (this.content) { return Promise.resolve(this.content); } else if (!this.store) { this.content = ContentUtil.empty(); return Promise.resolve(this.content); } return this.store.getContent(this); } setContent(content: ItemContent) { this.content = content; } /** Return the raw decrypted JSON data for an item. * This is only available for saved items. */ getRawDecryptedData(): Promise<string> { if (!this.store) { return Promise.reject<string>(new UnsavedItemError()); } return this.store.getRawDecryptedData(this); } /** Save this item to its associated store */ save(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } return this.saveTo(this.store); } /** Save this item to the specified store */ saveTo(store: Store): Promise<void> { if (!this.content && !this.isSaved()) { return Promise.reject<void>( new Error('Unable to save new item, no content set') ); } this.store = store; return this.store.saveItem(this); } /** Remove the item from the store. * This erases all of the item's data and leaves behind a 'tombstone' * entry for syncing purposes. */ remove(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } this.typeName = ItemTypes.TOMBSTONE; this.title = 'Unnamed'; this.trashed = true; this.setContent(ContentUtil.empty()); this.folderUuid = ''; this.locations = []; this.faveIndex = null; this.openContents = null; return this.store.saveItem(this); } /** Returns true if this is a 'tombstone' entry remaining from * a deleted item. When an item is deleted, all of the properties except * the UUID are erased and the item's type is changed to 'system.Tombstone'. * * These 'tombstone' markers are preserved so that deletions are synced between * different 1Password clients. */ isTombstone(): boolean { return this.typeName == ItemTypes.TOMBSTONE; } /** Returns true if this is a regular item - ie. not a folder, * tombstone or saved search. */ isRegularItem(): boolean { return !stringutil.startsWith(<string>this.typeName, 'system.'); } /** Returns a shortened version of the item's UUID, suitable for disambiguation * between different items with the same type and title. */ shortID(): string { return this.uuid.slice(0, 4); } /** Returns the human-readable type name for this item's type. */ typeDescription(): string { if (ITEM_TYPES[<string>this.typeName]) { return ITEM_TYPES[<string>this.typeName].name; } else { return <string>this.typeName; } } /** Returns true if this item has been saved to a store. */ isSaved(): boolean { return this.store && this.updatedAt != null; } /** Set the last-modified time for the item to the current time. * If the created time for the item has not been initialized, it * is also set to the current time. */ updateTimestamps() { if (!this.createdAt) { this.createdAt = new Date(); } // update last-modified time var prevDate = this.updatedAt; this.updatedAt = new Date(); // ensure that last-modified time always advances by at least one // second from the previous time on save. // // This is required to ensure the 'updatedAt' time saved in contents.js // changes since it only stores second-level resolution if (prevDate && this.updatedAt.getTime() - prevDate.getTime() < 1000) { this.updatedAt = new Date(prevDate.getTime() + 1000); } } /** Returns the main URL associated with this item or an empty * string if there are no associated URLs. */ primaryLocation(): string { if (this.locations.length > 0) { return this.locations[0]; } else { return ''; } } /** Update item overview metadata to match the complete * content of an item. * * This updates the URL list for an item. */ updateOverviewFromContent(content: ItemContent) { this.locations = []; content.urls.forEach(url => { this.locations.push(url.url); }); this.account = ContentUtil.account(content); } } /** Content of an item which is usually stored unencrypted * as part of the overview data. */ export interface ItemOpenContents { tags: string[]; /** Indicates where this item will be displayed. * Known values are 'Always' (show everywhere) * and 'Never' (never shown in browser) */ scope: string; } /** A group of fields in an item. */ export interface ItemSection { /** Internal name of the section. */ name: string; /** User-visible title for the section. */ title: string; fields: ItemField[]; } /** A specific property/attribute of an item. * * Each field has a data type, an internal name/ID for the field, * a user-visible title and a current value. */ export interface ItemField { kind: FieldType; name: string; title: string; value: any; } export function fieldValueString(field: ItemField) { switch (field.kind) { case FieldType.Date: return dateutil.dateFromUnixTimestamp(field.value).toString(); case FieldType.MonthYear: var month = field.value % 100; var year = (field.value / 100) % 100; return sprintf('%02d/%d', month, year); default: return field.value; } } /** Type of input field in a web form. */ export enum FormFieldType { Text, Password, Email, Checkbox, Input, } /** Saved value of an input field in a web form. */ export interface WebFormField { value: string; /** 'id' attribute of the <input> element */ id: string; /** Name of the field. For web forms this is the 'name' * attribute of the <input> element. */ name: string; /** Type of input element used for this form field */ type: FormFieldType; /** Purpose of the field. Known values are 'username', 'password' */ designation: string; } /** Entry in an item's 'Websites' list. */ export interface ItemUrl { label: string; url: string; } /** Type of data stored in a field. * The set of types comes originally from those used * in the 1Password Agile Keychain format. */ export enum FieldType { Text, Password, Address, Date, MonthYear, URL, CreditCardType, PhoneNumber, Gender, Email, Menu, } export interface ListItemsOptions { /** Include 'tombstone' items which are left in the store * when an item is removed. */ includeTombstones?: boolean; } /** Specifies where an update came from when saving an item. */ export enum ChangeSource { /** Indicates a change resulting from a sync with another store. */ Sync, /** Indicates a local change. */ Local, } /** Interface for a store of encrypted items. * * A Store consists of a set of Item(s), identified by unique ID, * plus a set of encryption keys used to encrypt the contents of * those items. * * Items are versioned with an implementation-specific revision. * Stores may keep only the last revision of an item or they * may keep previous revisions as well. */ export interface Store { /** Emits events when items are updated in the store. */ onItemUpdated: event_stream.EventStream<Item>; /** Emits events when keys are updated in the store. */ onKeysUpdated?: event_stream.EventStream<key_agent.Key[]>; /** Unlock the vault */ unlock(password: string): Promise<void>; /** List the states (ID, last update time and whether deleted) * of all items in the store. */ listItemStates(): Promise<ItemState[]>; /** List all of the items in the store */ listItems(opts?: ListItemsOptions): Promise<Item[]>; /** Load the item with a specific ID. * * If a revision is specified, load a specific version of an item, * otherwise load the current version of the item. * * loadItem() should report an error if the item has been deleted. * Deleted items are only available as tombstone entries in the * list returned by listItemStates(). */ loadItem(uuid: string, revision?: string): Promise<ItemAndContent>; /** Save changes to the overview data and item content * back to the store. The @p source specifies whether * this update is a result of syncing changes * with another store or a local modification. * * Saving an item assigns a new revision to it. */ saveItem(item: Item, source?: ChangeSource): Promise<void>; /** Fetch and decrypt the item's secure contents. */ getContent(item: Item): Promise<ItemContent>; /** Fetch and decrypt item's secure contents and return * as a raw string - ie. without parsing the data and converting * to an ItemContent instance. */ getRawDecryptedData(item: Item): Promise<string>; /** Retrieve the master encryption keys for this store. */ listKeys(): Promise<key_agent.Key[]>; /** Update the encryption keys in this store. */ saveKeys(keys: key_agent.Key[], hint: string): Promise<void>; /** Permanently delete all data from the store. */ clear(): Promise<void>; /** Return the user-provided password hint. */ passwordHint(): Promise<string>; } /** Represents a pair of revision strings for * the same revision of an item in the local and cloud * stores. * * Item revision formats are specific to the store * implementation, so the same revision of an item * that is synced between two stores (eg. a local * store in IndexedDB in the browser and a cloud store * in Dropbox) will have different revision strings. */ export interface RevisionPair { /** The revision of the item in the local store. */ local: string; /** The corresponding revision of the item in the * external store. */ external: string; } /** SyncableStore provides methods for storing metadata * to enable syncing this store with other stores. */ export interface SyncableStore extends Store { /** Stores which revision of an item in a store (identified by @p storeID) was * last synced with this store. */ setLastSyncedRevision( item: Item, storeID: string, revision?: RevisionPair ): Promise<void>; /** Retrieves the revision of an item in a store (identified by @p storeID) * which was last synced with this store. */ getLastSyncedRevision(uuid: string, storeID: string): Promise<RevisionPair>; /** Retrieve a map of (item ID -> last-synced revision) for * all items in the store which have previously been synced with * @p storeID. */ lastSyncRevisions(storeID: string): Promise<Map<string, RevisionPair>>; } /** Copy an item and its contents, using @p uuid as the ID for * the new item. If new item is associated with @p store. * * The returned item will have {itemAndContent.item.revision} as * its parentRevision and a null revision property. */ export function cloneItem( itemAndContent: ItemAndContent, uuid: string, store?: Store ) { let item = itemAndContent.item; // item ID and sync data let clonedItem = new Item(store, uuid); clonedItem.parentRevision = item.revision; // core metadata clonedItem.folderUuid = item.uuid; clonedItem.faveIndex = item.faveIndex; clonedItem.trashed = item.trashed; clonedItem.updatedAt = item.updatedAt; clonedItem.createdAt = item.createdAt; clonedItem.typeName = item.typeName; clonedItem.title = item.title; clonedItem.openContents = item.openContents; clonedItem.locations = <string[]>clone(item.locations); clonedItem.account = item.account; // item content let clonedContent = <ItemContent>clone(itemAndContent.content); clonedItem.setContent(clonedContent); return { item: clonedItem, content: clonedContent }; } /** Generate a content-based revision ID for an item. * Revision IDs are a hash of the item's parent revision, * plus all of its current content. */ export function generateRevisionId(item: ItemAndContent) { var contentMetadata = { uuid: item.item.uuid, parentRevision: item.item.parentRevision, title: item.item.title, updatedAt: item.item.updatedAt, createdAt: item.item.createdAt, typeName: item.item.typeName, openContents: item.item.openContents, folderUuid: item.item.folderUuid, faveIndex: item.item.faveIndex, trashed: item.item.trashed, content: item.content, }; var contentString = JSON.stringify(contentMetadata); var hasher = new sha1.SHA1(); var srcBuf = collectionutil.bufferFromString(contentString); var digest = new Int32Array(5); hasher.hash(srcBuf, digest); return collectionutil.hexlify(digest); } /** Provides a default implementation of ItemStore.listItemStates() using * ItemStore.listItems(). Since listItemStates() returns a subset of * the information returned by listItems(), stores may be able to * provide more efficient implementations. */ export function itemStates(store: Store): Promise<ItemState[]> { return store.listItems({ includeTombstones: true }).then(items => items.map(item => ({ uuid: item.uuid, revision: item.revision, deleted: item.isTombstone(), })) ); } /** Decrypt the encryption keys for @p store and add * the keys to @p agent. */ export function unlockStore( store: Store, agent: key_agent.KeyAgent, password: string ): Promise<void> { return store .listKeys() .then(keys => { if (keys.length == 0) { throw new Error( 'Unable to unlock store: No encryption keys have been saved' ); } return key_agent.decryptKeys(keys, password); }) .then(keys => { let savedKeys: Promise<void>[] = []; keys.forEach(key => { savedKeys.push(agent.addKey(key.id, key.key)); }); return asyncutil.eraseResult(Promise.all(savedKeys)); }); }
* via setContent() or decrypted on-demand by * getContent() */ private content: ItemContent;
random_line_split
item_store.ts
// item_store contains the core interfaces and types for // encrypted items and storage of them import clone = require('clone'); import sprintf = require('sprintf'); import agile_keychain_crypto = require('./agile_keychain_crypto'); import asyncutil = require('./base/asyncutil'); import collectionutil = require('./base/collectionutil'); import dateutil = require('./base/dateutil'); import err_util = require('./base/err_util'); import event_stream = require('./base/event_stream'); import key_agent = require('./key_agent'); import sha1 = require('./crypto/sha1'); import stringutil = require('./base/stringutil'); // typedef for item type codes export interface ItemType extends String {} /** Constants for the different types of item * that a vault may contain. * * Item type codes are taken from 1Password v4 */ export class ItemTypes { // The most common type, for logins and other web forms static LOGIN = <ItemType>'webforms.WebForm'; // Other item types static CREDIT_CARD = <ItemType>'wallet.financial.CreditCard'; static ROUTER = <ItemType>'wallet.computer.Router'; static SECURE_NOTE = <ItemType>'securenotes.SecureNote'; static PASSWORD = <ItemType>'passwords.Password'; static EMAIL_ACCOUNT = <ItemType>'wallet.onlineservices.Email.v2'; static BANK_ACCOUNT = <ItemType>'wallet.financial.BankAccountUS'; static DATABASE = <ItemType>'wallet.computer.Database'; static DRIVERS_LICENSE = <ItemType>'wallet.government.DriversLicense'; static MEMBERSHIP = <ItemType>'wallet.membership.Membership'; static HUNTING_LICENSE = <ItemType>'wallet.government.HuntingLicense'; static PASSPORT = <ItemType>'wallet.government.Passport'; static REWARD_PROGRAM = <ItemType>'wallet.membership.RewardProgram'; static SERVER = <ItemType>'wallet.computer.UnixServer'; static SOCIAL_SECURITY = <ItemType>'wallet.government.SsnUS'; static SOFTWARE_LICENSE = <ItemType>'wallet.computer.License'; static IDENTITY = <ItemType>'identities.Identity'; // Non-item types static FOLDER = <ItemType>'system.folder.Regular'; static SAVED_SEARCH = <ItemType>'system.folder.SavedSearch'; // Marker type used to deleted items. The ID is preserved // but the type is set to Tombstone and all other data // is removed static TOMBSTONE = <ItemType>'system.Tombstone'; } /** Map of item type codes to human-readable item type names */ export var ITEM_TYPES: ItemTypeMap = { 'webforms.WebForm': { name: 'Login', shortAlias: 'login', }, 'wallet.financial.CreditCard': { name: 'Credit Card', shortAlias: 'card', }, 'wallet.computer.Router': { name: 'Wireless Router', shortAlias: 'router', }, 'securenotes.SecureNote': { name: 'Secure Note', shortAlias: 'note', }, 'passwords.Password': { name: 'Password', shortAlias: 'pass', }, 'wallet.onlineservices.Email.v2': { name: 'Email Account', shortAlias: 'email', }, 'system.folder.Regular': { name: 'Folder', shortAlias: 'folder', }, 'system.folder.SavedSearch': { name: 'Smart Folder', shortAlias: 'smart-folder', }, 'wallet.financial.BankAccountUS': { name: 'Bank Account', shortAlias: 'bank', }, 'wallet.computer.Database': { name: 'Database', shortAlias: 'db', }, 'wallet.government.DriversLicense': { name: "Driver's License", shortAlias: 'driver', }, 'wallet.membership.Membership': { name: 'Membership', shortAlias: 'membership', }, 'wallet.government.HuntingLicense': { name: 'Outdoor License', shortAlias: 'outdoor', }, 'wallet.government.Passport': { name: 'Passport', shortAlias: 'passport', }, 'wallet.membership.RewardProgram': { name: 'Reward Program', shortAlias: 'reward', }, 'wallet.computer.UnixServer': { name: 'Unix Server', shortAlias: 'server', }, 'wallet.government.SsnUS': { name: 'Social Security Number', shortAlias: 'social', }, 'wallet.computer.License': { name: 'Software License', shortAlias: 'software', }, 'identities.Identity': { name: 'Identity', shortAlias: 'id', }, // internal entry type created for items // that have been removed from the trash 'system.Tombstone': { name: 'Tombstone', shortAlias: 'tombstone', }, }; export interface ItemState { uuid: string; revision: string; deleted: boolean; } /** A convenience interface for passing around an item * and its contents together. */ export interface ItemAndContent { item: Item; content: ItemContent; } export interface ItemTypeInfo { name: string; shortAlias: string; } export interface ItemTypeMap { // map of ItemType -> ItemTypeInfo [index: string]: ItemTypeInfo; } export class UnsavedItemError extends err_util.BaseError { constructor() { super('Item has not been saved to a store'); } } /** Represents the content of an item, usually stored * encrypted in a vault. * * ItemContent and its dependent fields are plain interfaces * to facilitate easy (de-)serialization. */ export interface ItemContent { sections: ItemSection[]; urls: ItemUrl[]; notes: string; formFields: WebFormField[]; htmlMethod: string; htmlAction: string; htmlId: string; } /** Utility functions for creating and extracting * data from ItemContent instances. */ export let ContentUtil = { /** Creates a new ItemContent instance with all * fields set to default values. */ empty(): ItemContent { return { sections: [], urls: [], notes: '', formFields: [], htmlMethod: '', htmlAction: '', htmlId: '', }; }, /** Returns the account name associated with this item. * * The field used for the account name depends on the item * type. For logins, this is the 'username' field. * * Returns an empty string if the item has no associated account. */ account(content: ItemContent): string { let field = ContentUtil.accountField(content); return field ? field.value : ''; }, accountField(content: ItemContent): WebFormField { let accountFields = content.formFields.filter( field => field.designation === 'username' ); return accountFields.length > 0 ? accountFields[0] : null; }, /** Returns the primary password associated with this item. * * This depends upon the item type. For logins, this is * the 'password' field. * * Returns an empty password if the item has no associated * account. */ password(content: ItemContent): string { let field = ContentUtil.passwordField(content); return field ? field.value : ''; }, passwordField(content: ItemContent): WebFormField { var passFields = content.formFields.filter( field => field.designation === 'password' ); return passFields.length > 0 ? passFields[0] : null; }, }; /** Represents a single item in a 1Password vault. */ export class Item { // store which this item belongs to, or null // if the item has not yet been saved private store: Store; /** Identifies the version of an item. This is an opaque * string which is set when an item is saved to a store. * It will change each time an item is saved. */ revision: string; /** Identifies the previous version of an item. This is * an opaque string which is set to the current revision * just prior to a new version being saved to a store * which supports item history. It will be updated * each time an item is saved. */ parentRevision: string; /** Unique ID for this item within the vault */ uuid: string; /** ID of the folder that this item currently belongs to */ folderUuid: string; faveIndex: number; trashed: boolean; updatedAt: Date; createdAt: Date; /** Item type code for this item. This is one of the values * in the ItemTypes class. */ typeName: ItemType; /** Main title for this item. */ title: string; /** Additional metadata (eg. tags) * which is stored unencrypted for this item. */ openContents: ItemOpenContents; /** List of URLs that this item is associated with. */ locations: string[]; /** The account name or number that this item is associated with */ account: string; /** The decrypted content of the item, either set * via setContent() or decrypted on-demand by * getContent() */ private content: ItemContent; /** Create a new item. @p store is the store * to associate the new item with. This can * be changed later via saveTo(). * * When importing an existing item or loading * an existing item from the store, @p uuid may be non-null. * Otherwise a random new UUID will be allocated for * the item. */ constructor(store?: Store, uuid?: string) { this.store = store; this.uuid = uuid || agile_keychain_crypto.newUUID(); this.trashed = false; this.typeName = ItemTypes.LOGIN; this.folderUuid = ''; this.locations = []; this.title = ''; } /** Retrieves and decrypts the content of a 1Password item. * * In the Agile Keychain format, items are stored in two parts. * The overview data is stored in both contents.js and replicated * in the <UUID>.1password file for the item and is unencrypted. * * The item content is stored in the <UUID>.1password file and * is encrypted using the store's master key. * * The item's store must be unlocked using Store.unlock() before * item content can be retrieved. */ getContent(): Promise<ItemContent> { if (this.content) { return Promise.resolve(this.content); } else if (!this.store) { this.content = ContentUtil.empty(); return Promise.resolve(this.content); } return this.store.getContent(this); } setContent(content: ItemContent) { this.content = content; } /** Return the raw decrypted JSON data for an item. * This is only available for saved items. */ getRawDecryptedData(): Promise<string> { if (!this.store) { return Promise.reject<string>(new UnsavedItemError()); } return this.store.getRawDecryptedData(this); } /** Save this item to its associated store */ save(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } return this.saveTo(this.store); } /** Save this item to the specified store */ saveTo(store: Store): Promise<void> { if (!this.content && !this.isSaved()) { return Promise.reject<void>( new Error('Unable to save new item, no content set') ); } this.store = store; return this.store.saveItem(this); } /** Remove the item from the store. * This erases all of the item's data and leaves behind a 'tombstone' * entry for syncing purposes. */ remove(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } this.typeName = ItemTypes.TOMBSTONE; this.title = 'Unnamed'; this.trashed = true; this.setContent(ContentUtil.empty()); this.folderUuid = ''; this.locations = []; this.faveIndex = null; this.openContents = null; return this.store.saveItem(this); } /** Returns true if this is a 'tombstone' entry remaining from * a deleted item. When an item is deleted, all of the properties except * the UUID are erased and the item's type is changed to 'system.Tombstone'. * * These 'tombstone' markers are preserved so that deletions are synced between * different 1Password clients. */ isTombstone(): boolean { return this.typeName == ItemTypes.TOMBSTONE; } /** Returns true if this is a regular item - ie. not a folder, * tombstone or saved search. */ isRegularItem(): boolean { return !stringutil.startsWith(<string>this.typeName, 'system.'); } /** Returns a shortened version of the item's UUID, suitable for disambiguation * between different items with the same type and title. */ shortID(): string { return this.uuid.slice(0, 4); } /** Returns the human-readable type name for this item's type. */
(): string { if (ITEM_TYPES[<string>this.typeName]) { return ITEM_TYPES[<string>this.typeName].name; } else { return <string>this.typeName; } } /** Returns true if this item has been saved to a store. */ isSaved(): boolean { return this.store && this.updatedAt != null; } /** Set the last-modified time for the item to the current time. * If the created time for the item has not been initialized, it * is also set to the current time. */ updateTimestamps() { if (!this.createdAt) { this.createdAt = new Date(); } // update last-modified time var prevDate = this.updatedAt; this.updatedAt = new Date(); // ensure that last-modified time always advances by at least one // second from the previous time on save. // // This is required to ensure the 'updatedAt' time saved in contents.js // changes since it only stores second-level resolution if (prevDate && this.updatedAt.getTime() - prevDate.getTime() < 1000) { this.updatedAt = new Date(prevDate.getTime() + 1000); } } /** Returns the main URL associated with this item or an empty * string if there are no associated URLs. */ primaryLocation(): string { if (this.locations.length > 0) { return this.locations[0]; } else { return ''; } } /** Update item overview metadata to match the complete * content of an item. * * This updates the URL list for an item. */ updateOverviewFromContent(content: ItemContent) { this.locations = []; content.urls.forEach(url => { this.locations.push(url.url); }); this.account = ContentUtil.account(content); } } /** Content of an item which is usually stored unencrypted * as part of the overview data. */ export interface ItemOpenContents { tags: string[]; /** Indicates where this item will be displayed. * Known values are 'Always' (show everywhere) * and 'Never' (never shown in browser) */ scope: string; } /** A group of fields in an item. */ export interface ItemSection { /** Internal name of the section. */ name: string; /** User-visible title for the section. */ title: string; fields: ItemField[]; } /** A specific property/attribute of an item. * * Each field has a data type, an internal name/ID for the field, * a user-visible title and a current value. */ export interface ItemField { kind: FieldType; name: string; title: string; value: any; } export function fieldValueString(field: ItemField) { switch (field.kind) { case FieldType.Date: return dateutil.dateFromUnixTimestamp(field.value).toString(); case FieldType.MonthYear: var month = field.value % 100; var year = (field.value / 100) % 100; return sprintf('%02d/%d', month, year); default: return field.value; } } /** Type of input field in a web form. */ export enum FormFieldType { Text, Password, Email, Checkbox, Input, } /** Saved value of an input field in a web form. */ export interface WebFormField { value: string; /** 'id' attribute of the <input> element */ id: string; /** Name of the field. For web forms this is the 'name' * attribute of the <input> element. */ name: string; /** Type of input element used for this form field */ type: FormFieldType; /** Purpose of the field. Known values are 'username', 'password' */ designation: string; } /** Entry in an item's 'Websites' list. */ export interface ItemUrl { label: string; url: string; } /** Type of data stored in a field. * The set of types comes originally from those used * in the 1Password Agile Keychain format. */ export enum FieldType { Text, Password, Address, Date, MonthYear, URL, CreditCardType, PhoneNumber, Gender, Email, Menu, } export interface ListItemsOptions { /** Include 'tombstone' items which are left in the store * when an item is removed. */ includeTombstones?: boolean; } /** Specifies where an update came from when saving an item. */ export enum ChangeSource { /** Indicates a change resulting from a sync with another store. */ Sync, /** Indicates a local change. */ Local, } /** Interface for a store of encrypted items. * * A Store consists of a set of Item(s), identified by unique ID, * plus a set of encryption keys used to encrypt the contents of * those items. * * Items are versioned with an implementation-specific revision. * Stores may keep only the last revision of an item or they * may keep previous revisions as well. */ export interface Store { /** Emits events when items are updated in the store. */ onItemUpdated: event_stream.EventStream<Item>; /** Emits events when keys are updated in the store. */ onKeysUpdated?: event_stream.EventStream<key_agent.Key[]>; /** Unlock the vault */ unlock(password: string): Promise<void>; /** List the states (ID, last update time and whether deleted) * of all items in the store. */ listItemStates(): Promise<ItemState[]>; /** List all of the items in the store */ listItems(opts?: ListItemsOptions): Promise<Item[]>; /** Load the item with a specific ID. * * If a revision is specified, load a specific version of an item, * otherwise load the current version of the item. * * loadItem() should report an error if the item has been deleted. * Deleted items are only available as tombstone entries in the * list returned by listItemStates(). */ loadItem(uuid: string, revision?: string): Promise<ItemAndContent>; /** Save changes to the overview data and item content * back to the store. The @p source specifies whether * this update is a result of syncing changes * with another store or a local modification. * * Saving an item assigns a new revision to it. */ saveItem(item: Item, source?: ChangeSource): Promise<void>; /** Fetch and decrypt the item's secure contents. */ getContent(item: Item): Promise<ItemContent>; /** Fetch and decrypt item's secure contents and return * as a raw string - ie. without parsing the data and converting * to an ItemContent instance. */ getRawDecryptedData(item: Item): Promise<string>; /** Retrieve the master encryption keys for this store. */ listKeys(): Promise<key_agent.Key[]>; /** Update the encryption keys in this store. */ saveKeys(keys: key_agent.Key[], hint: string): Promise<void>; /** Permanently delete all data from the store. */ clear(): Promise<void>; /** Return the user-provided password hint. */ passwordHint(): Promise<string>; } /** Represents a pair of revision strings for * the same revision of an item in the local and cloud * stores. * * Item revision formats are specific to the store * implementation, so the same revision of an item * that is synced between two stores (eg. a local * store in IndexedDB in the browser and a cloud store * in Dropbox) will have different revision strings. */ export interface RevisionPair { /** The revision of the item in the local store. */ local: string; /** The corresponding revision of the item in the * external store. */ external: string; } /** SyncableStore provides methods for storing metadata * to enable syncing this store with other stores. */ export interface SyncableStore extends Store { /** Stores which revision of an item in a store (identified by @p storeID) was * last synced with this store. */ setLastSyncedRevision( item: Item, storeID: string, revision?: RevisionPair ): Promise<void>; /** Retrieves the revision of an item in a store (identified by @p storeID) * which was last synced with this store. */ getLastSyncedRevision(uuid: string, storeID: string): Promise<RevisionPair>; /** Retrieve a map of (item ID -> last-synced revision) for * all items in the store which have previously been synced with * @p storeID. */ lastSyncRevisions(storeID: string): Promise<Map<string, RevisionPair>>; } /** Copy an item and its contents, using @p uuid as the ID for * the new item. If new item is associated with @p store. * * The returned item will have {itemAndContent.item.revision} as * its parentRevision and a null revision property. */ export function cloneItem( itemAndContent: ItemAndContent, uuid: string, store?: Store ) { let item = itemAndContent.item; // item ID and sync data let clonedItem = new Item(store, uuid); clonedItem.parentRevision = item.revision; // core metadata clonedItem.folderUuid = item.uuid; clonedItem.faveIndex = item.faveIndex; clonedItem.trashed = item.trashed; clonedItem.updatedAt = item.updatedAt; clonedItem.createdAt = item.createdAt; clonedItem.typeName = item.typeName; clonedItem.title = item.title; clonedItem.openContents = item.openContents; clonedItem.locations = <string[]>clone(item.locations); clonedItem.account = item.account; // item content let clonedContent = <ItemContent>clone(itemAndContent.content); clonedItem.setContent(clonedContent); return { item: clonedItem, content: clonedContent }; } /** Generate a content-based revision ID for an item. * Revision IDs are a hash of the item's parent revision, * plus all of its current content. */ export function generateRevisionId(item: ItemAndContent) { var contentMetadata = { uuid: item.item.uuid, parentRevision: item.item.parentRevision, title: item.item.title, updatedAt: item.item.updatedAt, createdAt: item.item.createdAt, typeName: item.item.typeName, openContents: item.item.openContents, folderUuid: item.item.folderUuid, faveIndex: item.item.faveIndex, trashed: item.item.trashed, content: item.content, }; var contentString = JSON.stringify(contentMetadata); var hasher = new sha1.SHA1(); var srcBuf = collectionutil.bufferFromString(contentString); var digest = new Int32Array(5); hasher.hash(srcBuf, digest); return collectionutil.hexlify(digest); } /** Provides a default implementation of ItemStore.listItemStates() using * ItemStore.listItems(). Since listItemStates() returns a subset of * the information returned by listItems(), stores may be able to * provide more efficient implementations. */ export function itemStates(store: Store): Promise<ItemState[]> { return store.listItems({ includeTombstones: true }).then(items => items.map(item => ({ uuid: item.uuid, revision: item.revision, deleted: item.isTombstone(), })) ); } /** Decrypt the encryption keys for @p store and add * the keys to @p agent. */ export function unlockStore( store: Store, agent: key_agent.KeyAgent, password: string ): Promise<void> { return store .listKeys() .then(keys => { if (keys.length == 0) { throw new Error( 'Unable to unlock store: No encryption keys have been saved' ); } return key_agent.decryptKeys(keys, password); }) .then(keys => { let savedKeys: Promise<void>[] = []; keys.forEach(key => { savedKeys.push(agent.addKey(key.id, key.key)); }); return asyncutil.eraseResult(Promise.all(savedKeys)); }); }
typeDescription
identifier_name
item_store.ts
// item_store contains the core interfaces and types for // encrypted items and storage of them import clone = require('clone'); import sprintf = require('sprintf'); import agile_keychain_crypto = require('./agile_keychain_crypto'); import asyncutil = require('./base/asyncutil'); import collectionutil = require('./base/collectionutil'); import dateutil = require('./base/dateutil'); import err_util = require('./base/err_util'); import event_stream = require('./base/event_stream'); import key_agent = require('./key_agent'); import sha1 = require('./crypto/sha1'); import stringutil = require('./base/stringutil'); // typedef for item type codes export interface ItemType extends String {} /** Constants for the different types of item * that a vault may contain. * * Item type codes are taken from 1Password v4 */ export class ItemTypes { // The most common type, for logins and other web forms static LOGIN = <ItemType>'webforms.WebForm'; // Other item types static CREDIT_CARD = <ItemType>'wallet.financial.CreditCard'; static ROUTER = <ItemType>'wallet.computer.Router'; static SECURE_NOTE = <ItemType>'securenotes.SecureNote'; static PASSWORD = <ItemType>'passwords.Password'; static EMAIL_ACCOUNT = <ItemType>'wallet.onlineservices.Email.v2'; static BANK_ACCOUNT = <ItemType>'wallet.financial.BankAccountUS'; static DATABASE = <ItemType>'wallet.computer.Database'; static DRIVERS_LICENSE = <ItemType>'wallet.government.DriversLicense'; static MEMBERSHIP = <ItemType>'wallet.membership.Membership'; static HUNTING_LICENSE = <ItemType>'wallet.government.HuntingLicense'; static PASSPORT = <ItemType>'wallet.government.Passport'; static REWARD_PROGRAM = <ItemType>'wallet.membership.RewardProgram'; static SERVER = <ItemType>'wallet.computer.UnixServer'; static SOCIAL_SECURITY = <ItemType>'wallet.government.SsnUS'; static SOFTWARE_LICENSE = <ItemType>'wallet.computer.License'; static IDENTITY = <ItemType>'identities.Identity'; // Non-item types static FOLDER = <ItemType>'system.folder.Regular'; static SAVED_SEARCH = <ItemType>'system.folder.SavedSearch'; // Marker type used to deleted items. The ID is preserved // but the type is set to Tombstone and all other data // is removed static TOMBSTONE = <ItemType>'system.Tombstone'; } /** Map of item type codes to human-readable item type names */ export var ITEM_TYPES: ItemTypeMap = { 'webforms.WebForm': { name: 'Login', shortAlias: 'login', }, 'wallet.financial.CreditCard': { name: 'Credit Card', shortAlias: 'card', }, 'wallet.computer.Router': { name: 'Wireless Router', shortAlias: 'router', }, 'securenotes.SecureNote': { name: 'Secure Note', shortAlias: 'note', }, 'passwords.Password': { name: 'Password', shortAlias: 'pass', }, 'wallet.onlineservices.Email.v2': { name: 'Email Account', shortAlias: 'email', }, 'system.folder.Regular': { name: 'Folder', shortAlias: 'folder', }, 'system.folder.SavedSearch': { name: 'Smart Folder', shortAlias: 'smart-folder', }, 'wallet.financial.BankAccountUS': { name: 'Bank Account', shortAlias: 'bank', }, 'wallet.computer.Database': { name: 'Database', shortAlias: 'db', }, 'wallet.government.DriversLicense': { name: "Driver's License", shortAlias: 'driver', }, 'wallet.membership.Membership': { name: 'Membership', shortAlias: 'membership', }, 'wallet.government.HuntingLicense': { name: 'Outdoor License', shortAlias: 'outdoor', }, 'wallet.government.Passport': { name: 'Passport', shortAlias: 'passport', }, 'wallet.membership.RewardProgram': { name: 'Reward Program', shortAlias: 'reward', }, 'wallet.computer.UnixServer': { name: 'Unix Server', shortAlias: 'server', }, 'wallet.government.SsnUS': { name: 'Social Security Number', shortAlias: 'social', }, 'wallet.computer.License': { name: 'Software License', shortAlias: 'software', }, 'identities.Identity': { name: 'Identity', shortAlias: 'id', }, // internal entry type created for items // that have been removed from the trash 'system.Tombstone': { name: 'Tombstone', shortAlias: 'tombstone', }, }; export interface ItemState { uuid: string; revision: string; deleted: boolean; } /** A convenience interface for passing around an item * and its contents together. */ export interface ItemAndContent { item: Item; content: ItemContent; } export interface ItemTypeInfo { name: string; shortAlias: string; } export interface ItemTypeMap { // map of ItemType -> ItemTypeInfo [index: string]: ItemTypeInfo; } export class UnsavedItemError extends err_util.BaseError { constructor() { super('Item has not been saved to a store'); } } /** Represents the content of an item, usually stored * encrypted in a vault. * * ItemContent and its dependent fields are plain interfaces * to facilitate easy (de-)serialization. */ export interface ItemContent { sections: ItemSection[]; urls: ItemUrl[]; notes: string; formFields: WebFormField[]; htmlMethod: string; htmlAction: string; htmlId: string; } /** Utility functions for creating and extracting * data from ItemContent instances. */ export let ContentUtil = { /** Creates a new ItemContent instance with all * fields set to default values. */ empty(): ItemContent { return { sections: [], urls: [], notes: '', formFields: [], htmlMethod: '', htmlAction: '', htmlId: '', }; }, /** Returns the account name associated with this item. * * The field used for the account name depends on the item * type. For logins, this is the 'username' field. * * Returns an empty string if the item has no associated account. */ account(content: ItemContent): string { let field = ContentUtil.accountField(content); return field ? field.value : ''; }, accountField(content: ItemContent): WebFormField { let accountFields = content.formFields.filter( field => field.designation === 'username' ); return accountFields.length > 0 ? accountFields[0] : null; }, /** Returns the primary password associated with this item. * * This depends upon the item type. For logins, this is * the 'password' field. * * Returns an empty password if the item has no associated * account. */ password(content: ItemContent): string { let field = ContentUtil.passwordField(content); return field ? field.value : ''; }, passwordField(content: ItemContent): WebFormField { var passFields = content.formFields.filter( field => field.designation === 'password' ); return passFields.length > 0 ? passFields[0] : null; }, }; /** Represents a single item in a 1Password vault. */ export class Item { // store which this item belongs to, or null // if the item has not yet been saved private store: Store; /** Identifies the version of an item. This is an opaque * string which is set when an item is saved to a store. * It will change each time an item is saved. */ revision: string; /** Identifies the previous version of an item. This is * an opaque string which is set to the current revision * just prior to a new version being saved to a store * which supports item history. It will be updated * each time an item is saved. */ parentRevision: string; /** Unique ID for this item within the vault */ uuid: string; /** ID of the folder that this item currently belongs to */ folderUuid: string; faveIndex: number; trashed: boolean; updatedAt: Date; createdAt: Date; /** Item type code for this item. This is one of the values * in the ItemTypes class. */ typeName: ItemType; /** Main title for this item. */ title: string; /** Additional metadata (eg. tags) * which is stored unencrypted for this item. */ openContents: ItemOpenContents; /** List of URLs that this item is associated with. */ locations: string[]; /** The account name or number that this item is associated with */ account: string; /** The decrypted content of the item, either set * via setContent() or decrypted on-demand by * getContent() */ private content: ItemContent; /** Create a new item. @p store is the store * to associate the new item with. This can * be changed later via saveTo(). * * When importing an existing item or loading * an existing item from the store, @p uuid may be non-null. * Otherwise a random new UUID will be allocated for * the item. */ constructor(store?: Store, uuid?: string) { this.store = store; this.uuid = uuid || agile_keychain_crypto.newUUID(); this.trashed = false; this.typeName = ItemTypes.LOGIN; this.folderUuid = ''; this.locations = []; this.title = ''; } /** Retrieves and decrypts the content of a 1Password item. * * In the Agile Keychain format, items are stored in two parts. * The overview data is stored in both contents.js and replicated * in the <UUID>.1password file for the item and is unencrypted. * * The item content is stored in the <UUID>.1password file and * is encrypted using the store's master key. * * The item's store must be unlocked using Store.unlock() before * item content can be retrieved. */ getContent(): Promise<ItemContent> { if (this.content)
else if (!this.store) { this.content = ContentUtil.empty(); return Promise.resolve(this.content); } return this.store.getContent(this); } setContent(content: ItemContent) { this.content = content; } /** Return the raw decrypted JSON data for an item. * This is only available for saved items. */ getRawDecryptedData(): Promise<string> { if (!this.store) { return Promise.reject<string>(new UnsavedItemError()); } return this.store.getRawDecryptedData(this); } /** Save this item to its associated store */ save(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } return this.saveTo(this.store); } /** Save this item to the specified store */ saveTo(store: Store): Promise<void> { if (!this.content && !this.isSaved()) { return Promise.reject<void>( new Error('Unable to save new item, no content set') ); } this.store = store; return this.store.saveItem(this); } /** Remove the item from the store. * This erases all of the item's data and leaves behind a 'tombstone' * entry for syncing purposes. */ remove(): Promise<void> { if (!this.store) { return Promise.reject<void>(new UnsavedItemError()); } this.typeName = ItemTypes.TOMBSTONE; this.title = 'Unnamed'; this.trashed = true; this.setContent(ContentUtil.empty()); this.folderUuid = ''; this.locations = []; this.faveIndex = null; this.openContents = null; return this.store.saveItem(this); } /** Returns true if this is a 'tombstone' entry remaining from * a deleted item. When an item is deleted, all of the properties except * the UUID are erased and the item's type is changed to 'system.Tombstone'. * * These 'tombstone' markers are preserved so that deletions are synced between * different 1Password clients. */ isTombstone(): boolean { return this.typeName == ItemTypes.TOMBSTONE; } /** Returns true if this is a regular item - ie. not a folder, * tombstone or saved search. */ isRegularItem(): boolean { return !stringutil.startsWith(<string>this.typeName, 'system.'); } /** Returns a shortened version of the item's UUID, suitable for disambiguation * between different items with the same type and title. */ shortID(): string { return this.uuid.slice(0, 4); } /** Returns the human-readable type name for this item's type. */ typeDescription(): string { if (ITEM_TYPES[<string>this.typeName]) { return ITEM_TYPES[<string>this.typeName].name; } else { return <string>this.typeName; } } /** Returns true if this item has been saved to a store. */ isSaved(): boolean { return this.store && this.updatedAt != null; } /** Set the last-modified time for the item to the current time. * If the created time for the item has not been initialized, it * is also set to the current time. */ updateTimestamps() { if (!this.createdAt) { this.createdAt = new Date(); } // update last-modified time var prevDate = this.updatedAt; this.updatedAt = new Date(); // ensure that last-modified time always advances by at least one // second from the previous time on save. // // This is required to ensure the 'updatedAt' time saved in contents.js // changes since it only stores second-level resolution if (prevDate && this.updatedAt.getTime() - prevDate.getTime() < 1000) { this.updatedAt = new Date(prevDate.getTime() + 1000); } } /** Returns the main URL associated with this item or an empty * string if there are no associated URLs. */ primaryLocation(): string { if (this.locations.length > 0) { return this.locations[0]; } else { return ''; } } /** Update item overview metadata to match the complete * content of an item. * * This updates the URL list for an item. */ updateOverviewFromContent(content: ItemContent) { this.locations = []; content.urls.forEach(url => { this.locations.push(url.url); }); this.account = ContentUtil.account(content); } } /** Content of an item which is usually stored unencrypted * as part of the overview data. */ export interface ItemOpenContents { tags: string[]; /** Indicates where this item will be displayed. * Known values are 'Always' (show everywhere) * and 'Never' (never shown in browser) */ scope: string; } /** A group of fields in an item. */ export interface ItemSection { /** Internal name of the section. */ name: string; /** User-visible title for the section. */ title: string; fields: ItemField[]; } /** A specific property/attribute of an item. * * Each field has a data type, an internal name/ID for the field, * a user-visible title and a current value. */ export interface ItemField { kind: FieldType; name: string; title: string; value: any; } export function fieldValueString(field: ItemField) { switch (field.kind) { case FieldType.Date: return dateutil.dateFromUnixTimestamp(field.value).toString(); case FieldType.MonthYear: var month = field.value % 100; var year = (field.value / 100) % 100; return sprintf('%02d/%d', month, year); default: return field.value; } } /** Type of input field in a web form. */ export enum FormFieldType { Text, Password, Email, Checkbox, Input, } /** Saved value of an input field in a web form. */ export interface WebFormField { value: string; /** 'id' attribute of the <input> element */ id: string; /** Name of the field. For web forms this is the 'name' * attribute of the <input> element. */ name: string; /** Type of input element used for this form field */ type: FormFieldType; /** Purpose of the field. Known values are 'username', 'password' */ designation: string; } /** Entry in an item's 'Websites' list. */ export interface ItemUrl { label: string; url: string; } /** Type of data stored in a field. * The set of types comes originally from those used * in the 1Password Agile Keychain format. */ export enum FieldType { Text, Password, Address, Date, MonthYear, URL, CreditCardType, PhoneNumber, Gender, Email, Menu, } export interface ListItemsOptions { /** Include 'tombstone' items which are left in the store * when an item is removed. */ includeTombstones?: boolean; } /** Specifies where an update came from when saving an item. */ export enum ChangeSource { /** Indicates a change resulting from a sync with another store. */ Sync, /** Indicates a local change. */ Local, } /** Interface for a store of encrypted items. * * A Store consists of a set of Item(s), identified by unique ID, * plus a set of encryption keys used to encrypt the contents of * those items. * * Items are versioned with an implementation-specific revision. * Stores may keep only the last revision of an item or they * may keep previous revisions as well. */ export interface Store { /** Emits events when items are updated in the store. */ onItemUpdated: event_stream.EventStream<Item>; /** Emits events when keys are updated in the store. */ onKeysUpdated?: event_stream.EventStream<key_agent.Key[]>; /** Unlock the vault */ unlock(password: string): Promise<void>; /** List the states (ID, last update time and whether deleted) * of all items in the store. */ listItemStates(): Promise<ItemState[]>; /** List all of the items in the store */ listItems(opts?: ListItemsOptions): Promise<Item[]>; /** Load the item with a specific ID. * * If a revision is specified, load a specific version of an item, * otherwise load the current version of the item. * * loadItem() should report an error if the item has been deleted. * Deleted items are only available as tombstone entries in the * list returned by listItemStates(). */ loadItem(uuid: string, revision?: string): Promise<ItemAndContent>; /** Save changes to the overview data and item content * back to the store. The @p source specifies whether * this update is a result of syncing changes * with another store or a local modification. * * Saving an item assigns a new revision to it. */ saveItem(item: Item, source?: ChangeSource): Promise<void>; /** Fetch and decrypt the item's secure contents. */ getContent(item: Item): Promise<ItemContent>; /** Fetch and decrypt item's secure contents and return * as a raw string - ie. without parsing the data and converting * to an ItemContent instance. */ getRawDecryptedData(item: Item): Promise<string>; /** Retrieve the master encryption keys for this store. */ listKeys(): Promise<key_agent.Key[]>; /** Update the encryption keys in this store. */ saveKeys(keys: key_agent.Key[], hint: string): Promise<void>; /** Permanently delete all data from the store. */ clear(): Promise<void>; /** Return the user-provided password hint. */ passwordHint(): Promise<string>; } /** Represents a pair of revision strings for * the same revision of an item in the local and cloud * stores. * * Item revision formats are specific to the store * implementation, so the same revision of an item * that is synced between two stores (eg. a local * store in IndexedDB in the browser and a cloud store * in Dropbox) will have different revision strings. */ export interface RevisionPair { /** The revision of the item in the local store. */ local: string; /** The corresponding revision of the item in the * external store. */ external: string; } /** SyncableStore provides methods for storing metadata * to enable syncing this store with other stores. */ export interface SyncableStore extends Store { /** Stores which revision of an item in a store (identified by @p storeID) was * last synced with this store. */ setLastSyncedRevision( item: Item, storeID: string, revision?: RevisionPair ): Promise<void>; /** Retrieves the revision of an item in a store (identified by @p storeID) * which was last synced with this store. */ getLastSyncedRevision(uuid: string, storeID: string): Promise<RevisionPair>; /** Retrieve a map of (item ID -> last-synced revision) for * all items in the store which have previously been synced with * @p storeID. */ lastSyncRevisions(storeID: string): Promise<Map<string, RevisionPair>>; } /** Copy an item and its contents, using @p uuid as the ID for * the new item. If new item is associated with @p store. * * The returned item will have {itemAndContent.item.revision} as * its parentRevision and a null revision property. */ export function cloneItem( itemAndContent: ItemAndContent, uuid: string, store?: Store ) { let item = itemAndContent.item; // item ID and sync data let clonedItem = new Item(store, uuid); clonedItem.parentRevision = item.revision; // core metadata clonedItem.folderUuid = item.uuid; clonedItem.faveIndex = item.faveIndex; clonedItem.trashed = item.trashed; clonedItem.updatedAt = item.updatedAt; clonedItem.createdAt = item.createdAt; clonedItem.typeName = item.typeName; clonedItem.title = item.title; clonedItem.openContents = item.openContents; clonedItem.locations = <string[]>clone(item.locations); clonedItem.account = item.account; // item content let clonedContent = <ItemContent>clone(itemAndContent.content); clonedItem.setContent(clonedContent); return { item: clonedItem, content: clonedContent }; } /** Generate a content-based revision ID for an item. * Revision IDs are a hash of the item's parent revision, * plus all of its current content. */ export function generateRevisionId(item: ItemAndContent) { var contentMetadata = { uuid: item.item.uuid, parentRevision: item.item.parentRevision, title: item.item.title, updatedAt: item.item.updatedAt, createdAt: item.item.createdAt, typeName: item.item.typeName, openContents: item.item.openContents, folderUuid: item.item.folderUuid, faveIndex: item.item.faveIndex, trashed: item.item.trashed, content: item.content, }; var contentString = JSON.stringify(contentMetadata); var hasher = new sha1.SHA1(); var srcBuf = collectionutil.bufferFromString(contentString); var digest = new Int32Array(5); hasher.hash(srcBuf, digest); return collectionutil.hexlify(digest); } /** Provides a default implementation of ItemStore.listItemStates() using * ItemStore.listItems(). Since listItemStates() returns a subset of * the information returned by listItems(), stores may be able to * provide more efficient implementations. */ export function itemStates(store: Store): Promise<ItemState[]> { return store.listItems({ includeTombstones: true }).then(items => items.map(item => ({ uuid: item.uuid, revision: item.revision, deleted: item.isTombstone(), })) ); } /** Decrypt the encryption keys for @p store and add * the keys to @p agent. */ export function unlockStore( store: Store, agent: key_agent.KeyAgent, password: string ): Promise<void> { return store .listKeys() .then(keys => { if (keys.length == 0) { throw new Error( 'Unable to unlock store: No encryption keys have been saved' ); } return key_agent.decryptKeys(keys, password); }) .then(keys => { let savedKeys: Promise<void>[] = []; keys.forEach(key => { savedKeys.push(agent.addKey(key.id, key.key)); }); return asyncutil.eraseResult(Promise.all(savedKeys)); }); }
{ return Promise.resolve(this.content); }
conditional_block
payment.py
import json from collections import OrderedDict from django import forms from django.template.loader import get_template from django.utils.translation import ugettext_lazy as _ from pretix.base.payment import BasePaymentProvider class BankTransfer(BasePaymentProvider):
identifier = 'banktransfer' verbose_name = _('Bank transfer') @property def settings_form_fields(self): return OrderedDict( list(super().settings_form_fields.items()) + [ ('bank_details', forms.CharField( widget=forms.Textarea, label=_('Bank account details'), )) ] ) def payment_form_render(self, request) -> str: template = get_template('pretixplugins/banktransfer/checkout_payment_form.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings} return template.render(ctx) def checkout_prepare(self, request, total): return True def payment_is_valid_session(self, request): return True def checkout_confirm_render(self, request): form = self.payment_form(request) template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html') ctx = {'request': request, 'form': form, 'settings': self.settings} return template.render(ctx) def order_pending_mail_render(self, order) -> str: template = get_template('pretixplugins/banktransfer/email/order_pending.txt') ctx = {'event': self.event, 'order': order, 'settings': self.settings} return template.render(ctx) def order_pending_render(self, request, order) -> str: template = get_template('pretixplugins/banktransfer/pending.html') ctx = {'request': request, 'order': order, 'settings': self.settings} return template.render(ctx) def order_control_render(self, request, order) -> str: if order.payment_info: payment_info = json.loads(order.payment_info) else: payment_info = None template = get_template('pretixplugins/banktransfer/control.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'payment_info': payment_info, 'order': order} return template.render(ctx)
identifier_body
payment.py
import json from collections import OrderedDict from django import forms from django.template.loader import get_template from django.utils.translation import ugettext_lazy as _ from pretix.base.payment import BasePaymentProvider class BankTransfer(BasePaymentProvider): identifier = 'banktransfer' verbose_name = _('Bank transfer') @property def settings_form_fields(self): return OrderedDict( list(super().settings_form_fields.items()) + [ ('bank_details', forms.CharField( widget=forms.Textarea, label=_('Bank account details'), )) ] ) def payment_form_render(self, request) -> str: template = get_template('pretixplugins/banktransfer/checkout_payment_form.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings} return template.render(ctx) def checkout_prepare(self, request, total): return True def payment_is_valid_session(self, request): return True def checkout_confirm_render(self, request): form = self.payment_form(request) template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html') ctx = {'request': request, 'form': form, 'settings': self.settings} return template.render(ctx) def order_pending_mail_render(self, order) -> str: template = get_template('pretixplugins/banktransfer/email/order_pending.txt') ctx = {'event': self.event, 'order': order, 'settings': self.settings} return template.render(ctx) def order_pending_render(self, request, order) -> str: template = get_template('pretixplugins/banktransfer/pending.html') ctx = {'request': request, 'order': order, 'settings': self.settings} return template.render(ctx) def
(self, request, order) -> str: if order.payment_info: payment_info = json.loads(order.payment_info) else: payment_info = None template = get_template('pretixplugins/banktransfer/control.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'payment_info': payment_info, 'order': order} return template.render(ctx)
order_control_render
identifier_name
payment.py
import json from collections import OrderedDict from django import forms from django.template.loader import get_template from django.utils.translation import ugettext_lazy as _ from pretix.base.payment import BasePaymentProvider class BankTransfer(BasePaymentProvider): identifier = 'banktransfer' verbose_name = _('Bank transfer') @property def settings_form_fields(self): return OrderedDict( list(super().settings_form_fields.items()) + [ ('bank_details', forms.CharField( widget=forms.Textarea, label=_('Bank account details'), )) ] ) def payment_form_render(self, request) -> str: template = get_template('pretixplugins/banktransfer/checkout_payment_form.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings} return template.render(ctx) def checkout_prepare(self, request, total): return True def payment_is_valid_session(self, request): return True def checkout_confirm_render(self, request): form = self.payment_form(request) template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html') ctx = {'request': request, 'form': form, 'settings': self.settings} return template.render(ctx) def order_pending_mail_render(self, order) -> str: template = get_template('pretixplugins/banktransfer/email/order_pending.txt') ctx = {'event': self.event, 'order': order, 'settings': self.settings} return template.render(ctx) def order_pending_render(self, request, order) -> str: template = get_template('pretixplugins/banktransfer/pending.html') ctx = {'request': request, 'order': order, 'settings': self.settings} return template.render(ctx) def order_control_render(self, request, order) -> str: if order.payment_info: payment_info = json.loads(order.payment_info)
payment_info = None template = get_template('pretixplugins/banktransfer/control.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'payment_info': payment_info, 'order': order} return template.render(ctx)
else:
random_line_split
payment.py
import json from collections import OrderedDict from django import forms from django.template.loader import get_template from django.utils.translation import ugettext_lazy as _ from pretix.base.payment import BasePaymentProvider class BankTransfer(BasePaymentProvider): identifier = 'banktransfer' verbose_name = _('Bank transfer') @property def settings_form_fields(self): return OrderedDict( list(super().settings_form_fields.items()) + [ ('bank_details', forms.CharField( widget=forms.Textarea, label=_('Bank account details'), )) ] ) def payment_form_render(self, request) -> str: template = get_template('pretixplugins/banktransfer/checkout_payment_form.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings} return template.render(ctx) def checkout_prepare(self, request, total): return True def payment_is_valid_session(self, request): return True def checkout_confirm_render(self, request): form = self.payment_form(request) template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html') ctx = {'request': request, 'form': form, 'settings': self.settings} return template.render(ctx) def order_pending_mail_render(self, order) -> str: template = get_template('pretixplugins/banktransfer/email/order_pending.txt') ctx = {'event': self.event, 'order': order, 'settings': self.settings} return template.render(ctx) def order_pending_render(self, request, order) -> str: template = get_template('pretixplugins/banktransfer/pending.html') ctx = {'request': request, 'order': order, 'settings': self.settings} return template.render(ctx) def order_control_render(self, request, order) -> str: if order.payment_info: payment_info = json.loads(order.payment_info) else:
template = get_template('pretixplugins/banktransfer/control.html') ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'payment_info': payment_info, 'order': order} return template.render(ctx)
payment_info = None
conditional_block
0110_add_default_contract_discount.py
# Generated by Django 2.2.14 on 2020-09-03 02:09 from django.db import migrations, models class Migration(migrations.Migration):
dependencies = [ ('enterprise', '0109_remove_use_enterprise_catalog_sample'), ] operations = [ migrations.AddField( model_name='enterprisecustomer', name='default_contract_discount', field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ), migrations.AddField( model_name='historicalenterprisecustomer', name='default_contract_discount', field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ), ]
identifier_body
0110_add_default_contract_discount.py
# Generated by Django 2.2.14 on 2020-09-03 02:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('enterprise', '0109_remove_use_enterprise_catalog_sample'), ] operations = [ migrations.AddField( model_name='enterprisecustomer', name='default_contract_discount',
migrations.AddField( model_name='historicalenterprisecustomer', name='default_contract_discount', field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ), ]
field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ),
random_line_split
0110_add_default_contract_discount.py
# Generated by Django 2.2.14 on 2020-09-03 02:09 from django.db import migrations, models class
(migrations.Migration): dependencies = [ ('enterprise', '0109_remove_use_enterprise_catalog_sample'), ] operations = [ migrations.AddField( model_name='enterprisecustomer', name='default_contract_discount', field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ), migrations.AddField( model_name='historicalenterprisecustomer', name='default_contract_discount', field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True), ), ]
Migration
identifier_name
send.py
# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import import os import time from bzrlib import ( controldir, errors, osutils, registry, trace, ) from bzrlib.i18n import gettext from bzrlib.branch import ( Branch, ) from bzrlib.revision import ( NULL_REVISION, ) format_registry = registry.Registry() def
(target_branch, revision, public_branch, remember, format, no_bundle, no_patch, output, from_, mail_to, message, body, to_file, strict=None): possible_transports = [] tree, branch = controldir.ControlDir.open_containing_tree_or_branch( from_, possible_transports=possible_transports)[:2] # we may need to write data into branch's repository to calculate # the data to send. branch.lock_write() try: if output is None: config_stack = branch.get_config_stack() if mail_to is None: mail_to = config_stack.get('submit_to') mail_client = config_stack.get('mail_client')(config_stack) if (not getattr(mail_client, 'supports_body', False) and body is not None): raise errors.BzrCommandError(gettext( 'Mail client "%s" does not support specifying body') % mail_client.__class__.__name__) if remember and target_branch is None: raise errors.BzrCommandError(gettext( '--remember requires a branch to be specified.')) stored_target_branch = branch.get_submit_branch() remembered_target_branch = None if target_branch is None: target_branch = stored_target_branch remembered_target_branch = "submit" else: # Remembers if asked explicitly or no previous location is set if remember or ( remember is None and stored_target_branch is None): branch.set_submit_branch(target_branch) if target_branch is None: target_branch = branch.get_parent() remembered_target_branch = "parent" if target_branch is None: raise errors.BzrCommandError(gettext('No submit branch known or' ' specified')) if remembered_target_branch is not None: trace.note(gettext('Using saved {0} location "{1}" to determine ' 'what changes to submit.').format( remembered_target_branch, target_branch)) submit_branch = Branch.open(target_branch, possible_transports=possible_transports) possible_transports.append(submit_branch.bzrdir.root_transport) if mail_to is None or format is None: if mail_to is None: mail_to = submit_branch.get_config_stack().get( 'child_submit_to') if format is None: formatname = submit_branch.get_child_submit_format() try: format = format_registry.get(formatname) except KeyError: raise errors.BzrCommandError( gettext("No such send format '%s'.") % formatname) stored_public_branch = branch.get_public_branch() if public_branch is None: public_branch = stored_public_branch # Remembers if asked explicitly or no previous location is set elif (remember or (remember is None and stored_public_branch is None)): branch.set_public_branch(public_branch) if no_bundle and public_branch is None: raise errors.BzrCommandError(gettext('No public branch specified or' ' known')) base_revision_id = None revision_id = None if revision is not None: if len(revision) > 2: raise errors.BzrCommandError(gettext('bzr send takes ' 'at most two one revision identifiers')) revision_id = revision[-1].as_revision_id(branch) if len(revision) == 2: base_revision_id = revision[0].as_revision_id(branch) if revision_id is None: if tree is not None: tree.check_changed_or_out_of_date( strict, 'send_strict', more_error='Use --no-strict to force the send.', more_warning='Uncommitted changes will not be sent.') revision_id = branch.last_revision() if revision_id == NULL_REVISION: raise errors.BzrCommandError(gettext('No revisions to submit.')) if format is None: format = format_registry.get() directive = format(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, submit_branch) if output is None: directive.compose_merge_request(mail_client, mail_to, body, branch, tree) else: if directive.multiple_output_files: if output == '-': raise errors.BzrCommandError(gettext('- not supported for ' 'merge directives that use more than one output file.')) if not os.path.exists(output): os.mkdir(output, 0755) for (filename, lines) in directive.to_files(): path = os.path.join(output, filename) outfile = open(path, 'wb') try: outfile.writelines(lines) finally: outfile.close() else: if output == '-': outfile = to_file else: outfile = open(output, 'wb') try: outfile.writelines(directive.to_lines()) finally: if outfile is not to_file: outfile.close() finally: branch.unlock() def _send_4(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): from bzrlib import merge_directive return merge_directive.MergeDirective2.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), target_branch, public_branch=public_branch, include_patch=not no_patch, include_bundle=not no_bundle, message=message, base_revision_id=base_revision_id, local_target_branch=local_target_branch) def _send_0_9(branch, revision_id, submit_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): if not no_bundle: if not no_patch: patch_type = 'bundle' else: raise errors.BzrCommandError(gettext('Format 0.9 does not' ' permit bundle with no patch')) else: if not no_patch: patch_type = 'diff' else: patch_type = None from bzrlib import merge_directive return merge_directive.MergeDirective.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), submit_branch, public_branch=public_branch, patch_type=patch_type, message=message, local_target_branch=local_target_branch) format_registry.register('4', _send_4, 'Bundle format 4, Merge Directive 2 (default)') format_registry.register('0.9', _send_0_9, 'Bundle format 0.9, Merge Directive 1') format_registry.default_key = '4'
send
identifier_name
send.py
# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import import os import time from bzrlib import ( controldir, errors, osutils, registry, trace, ) from bzrlib.i18n import gettext from bzrlib.branch import ( Branch, ) from bzrlib.revision import ( NULL_REVISION, ) format_registry = registry.Registry() def send(target_branch, revision, public_branch, remember, format, no_bundle, no_patch, output, from_, mail_to, message, body, to_file, strict=None): possible_transports = [] tree, branch = controldir.ControlDir.open_containing_tree_or_branch( from_, possible_transports=possible_transports)[:2] # we may need to write data into branch's repository to calculate # the data to send. branch.lock_write() try: if output is None: config_stack = branch.get_config_stack() if mail_to is None: mail_to = config_stack.get('submit_to') mail_client = config_stack.get('mail_client')(config_stack) if (not getattr(mail_client, 'supports_body', False) and body is not None): raise errors.BzrCommandError(gettext( 'Mail client "%s" does not support specifying body') % mail_client.__class__.__name__) if remember and target_branch is None: raise errors.BzrCommandError(gettext( '--remember requires a branch to be specified.')) stored_target_branch = branch.get_submit_branch() remembered_target_branch = None if target_branch is None: target_branch = stored_target_branch remembered_target_branch = "submit" else: # Remembers if asked explicitly or no previous location is set if remember or ( remember is None and stored_target_branch is None): branch.set_submit_branch(target_branch) if target_branch is None: target_branch = branch.get_parent() remembered_target_branch = "parent" if target_branch is None: raise errors.BzrCommandError(gettext('No submit branch known or' ' specified')) if remembered_target_branch is not None: trace.note(gettext('Using saved {0} location "{1}" to determine ' 'what changes to submit.').format( remembered_target_branch, target_branch)) submit_branch = Branch.open(target_branch, possible_transports=possible_transports) possible_transports.append(submit_branch.bzrdir.root_transport) if mail_to is None or format is None: if mail_to is None: mail_to = submit_branch.get_config_stack().get( 'child_submit_to') if format is None: formatname = submit_branch.get_child_submit_format() try: format = format_registry.get(formatname) except KeyError: raise errors.BzrCommandError( gettext("No such send format '%s'.") % formatname) stored_public_branch = branch.get_public_branch() if public_branch is None: public_branch = stored_public_branch # Remembers if asked explicitly or no previous location is set elif (remember or (remember is None and stored_public_branch is None)): branch.set_public_branch(public_branch) if no_bundle and public_branch is None: raise errors.BzrCommandError(gettext('No public branch specified or' ' known')) base_revision_id = None revision_id = None if revision is not None: if len(revision) > 2: raise errors.BzrCommandError(gettext('bzr send takes ' 'at most two one revision identifiers')) revision_id = revision[-1].as_revision_id(branch) if len(revision) == 2: base_revision_id = revision[0].as_revision_id(branch) if revision_id is None: if tree is not None: tree.check_changed_or_out_of_date( strict, 'send_strict', more_error='Use --no-strict to force the send.', more_warning='Uncommitted changes will not be sent.') revision_id = branch.last_revision() if revision_id == NULL_REVISION: raise errors.BzrCommandError(gettext('No revisions to submit.')) if format is None: format = format_registry.get() directive = format(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, submit_branch) if output is None: directive.compose_merge_request(mail_client, mail_to, body, branch, tree) else: if directive.multiple_output_files: if output == '-': raise errors.BzrCommandError(gettext('- not supported for ' 'merge directives that use more than one output file.')) if not os.path.exists(output): os.mkdir(output, 0755) for (filename, lines) in directive.to_files(): path = os.path.join(output, filename) outfile = open(path, 'wb') try: outfile.writelines(lines) finally: outfile.close() else: if output == '-': outfile = to_file else: outfile = open(output, 'wb') try: outfile.writelines(directive.to_lines()) finally: if outfile is not to_file: outfile.close() finally: branch.unlock() def _send_4(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None):
def _send_0_9(branch, revision_id, submit_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): if not no_bundle: if not no_patch: patch_type = 'bundle' else: raise errors.BzrCommandError(gettext('Format 0.9 does not' ' permit bundle with no patch')) else: if not no_patch: patch_type = 'diff' else: patch_type = None from bzrlib import merge_directive return merge_directive.MergeDirective.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), submit_branch, public_branch=public_branch, patch_type=patch_type, message=message, local_target_branch=local_target_branch) format_registry.register('4', _send_4, 'Bundle format 4, Merge Directive 2 (default)') format_registry.register('0.9', _send_0_9, 'Bundle format 0.9, Merge Directive 1') format_registry.default_key = '4'
from bzrlib import merge_directive return merge_directive.MergeDirective2.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), target_branch, public_branch=public_branch, include_patch=not no_patch, include_bundle=not no_bundle, message=message, base_revision_id=base_revision_id, local_target_branch=local_target_branch)
identifier_body
send.py
# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import import os import time from bzrlib import ( controldir, errors, osutils, registry, trace, ) from bzrlib.i18n import gettext from bzrlib.branch import ( Branch, ) from bzrlib.revision import ( NULL_REVISION, ) format_registry = registry.Registry() def send(target_branch, revision, public_branch, remember, format, no_bundle, no_patch, output, from_, mail_to, message, body, to_file, strict=None): possible_transports = [] tree, branch = controldir.ControlDir.open_containing_tree_or_branch( from_, possible_transports=possible_transports)[:2] # we may need to write data into branch's repository to calculate # the data to send. branch.lock_write() try:
if (not getattr(mail_client, 'supports_body', False) and body is not None): raise errors.BzrCommandError(gettext( 'Mail client "%s" does not support specifying body') % mail_client.__class__.__name__) if remember and target_branch is None: raise errors.BzrCommandError(gettext( '--remember requires a branch to be specified.')) stored_target_branch = branch.get_submit_branch() remembered_target_branch = None if target_branch is None: target_branch = stored_target_branch remembered_target_branch = "submit" else: # Remembers if asked explicitly or no previous location is set if remember or ( remember is None and stored_target_branch is None): branch.set_submit_branch(target_branch) if target_branch is None: target_branch = branch.get_parent() remembered_target_branch = "parent" if target_branch is None: raise errors.BzrCommandError(gettext('No submit branch known or' ' specified')) if remembered_target_branch is not None: trace.note(gettext('Using saved {0} location "{1}" to determine ' 'what changes to submit.').format( remembered_target_branch, target_branch)) submit_branch = Branch.open(target_branch, possible_transports=possible_transports) possible_transports.append(submit_branch.bzrdir.root_transport) if mail_to is None or format is None: if mail_to is None: mail_to = submit_branch.get_config_stack().get( 'child_submit_to') if format is None: formatname = submit_branch.get_child_submit_format() try: format = format_registry.get(formatname) except KeyError: raise errors.BzrCommandError( gettext("No such send format '%s'.") % formatname) stored_public_branch = branch.get_public_branch() if public_branch is None: public_branch = stored_public_branch # Remembers if asked explicitly or no previous location is set elif (remember or (remember is None and stored_public_branch is None)): branch.set_public_branch(public_branch) if no_bundle and public_branch is None: raise errors.BzrCommandError(gettext('No public branch specified or' ' known')) base_revision_id = None revision_id = None if revision is not None: if len(revision) > 2: raise errors.BzrCommandError(gettext('bzr send takes ' 'at most two one revision identifiers')) revision_id = revision[-1].as_revision_id(branch) if len(revision) == 2: base_revision_id = revision[0].as_revision_id(branch) if revision_id is None: if tree is not None: tree.check_changed_or_out_of_date( strict, 'send_strict', more_error='Use --no-strict to force the send.', more_warning='Uncommitted changes will not be sent.') revision_id = branch.last_revision() if revision_id == NULL_REVISION: raise errors.BzrCommandError(gettext('No revisions to submit.')) if format is None: format = format_registry.get() directive = format(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, submit_branch) if output is None: directive.compose_merge_request(mail_client, mail_to, body, branch, tree) else: if directive.multiple_output_files: if output == '-': raise errors.BzrCommandError(gettext('- not supported for ' 'merge directives that use more than one output file.')) if not os.path.exists(output): os.mkdir(output, 0755) for (filename, lines) in directive.to_files(): path = os.path.join(output, filename) outfile = open(path, 'wb') try: outfile.writelines(lines) finally: outfile.close() else: if output == '-': outfile = to_file else: outfile = open(output, 'wb') try: outfile.writelines(directive.to_lines()) finally: if outfile is not to_file: outfile.close() finally: branch.unlock() def _send_4(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): from bzrlib import merge_directive return merge_directive.MergeDirective2.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), target_branch, public_branch=public_branch, include_patch=not no_patch, include_bundle=not no_bundle, message=message, base_revision_id=base_revision_id, local_target_branch=local_target_branch) def _send_0_9(branch, revision_id, submit_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): if not no_bundle: if not no_patch: patch_type = 'bundle' else: raise errors.BzrCommandError(gettext('Format 0.9 does not' ' permit bundle with no patch')) else: if not no_patch: patch_type = 'diff' else: patch_type = None from bzrlib import merge_directive return merge_directive.MergeDirective.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), submit_branch, public_branch=public_branch, patch_type=patch_type, message=message, local_target_branch=local_target_branch) format_registry.register('4', _send_4, 'Bundle format 4, Merge Directive 2 (default)') format_registry.register('0.9', _send_0_9, 'Bundle format 0.9, Merge Directive 1') format_registry.default_key = '4'
if output is None: config_stack = branch.get_config_stack() if mail_to is None: mail_to = config_stack.get('submit_to') mail_client = config_stack.get('mail_client')(config_stack)
random_line_split
send.py
# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import import os import time from bzrlib import ( controldir, errors, osutils, registry, trace, ) from bzrlib.i18n import gettext from bzrlib.branch import ( Branch, ) from bzrlib.revision import ( NULL_REVISION, ) format_registry = registry.Registry() def send(target_branch, revision, public_branch, remember, format, no_bundle, no_patch, output, from_, mail_to, message, body, to_file, strict=None): possible_transports = [] tree, branch = controldir.ControlDir.open_containing_tree_or_branch( from_, possible_transports=possible_transports)[:2] # we may need to write data into branch's repository to calculate # the data to send. branch.lock_write() try: if output is None: config_stack = branch.get_config_stack() if mail_to is None: mail_to = config_stack.get('submit_to') mail_client = config_stack.get('mail_client')(config_stack) if (not getattr(mail_client, 'supports_body', False) and body is not None): raise errors.BzrCommandError(gettext( 'Mail client "%s" does not support specifying body') % mail_client.__class__.__name__) if remember and target_branch is None: raise errors.BzrCommandError(gettext( '--remember requires a branch to be specified.')) stored_target_branch = branch.get_submit_branch() remembered_target_branch = None if target_branch is None: target_branch = stored_target_branch remembered_target_branch = "submit" else: # Remembers if asked explicitly or no previous location is set if remember or ( remember is None and stored_target_branch is None): branch.set_submit_branch(target_branch) if target_branch is None: target_branch = branch.get_parent() remembered_target_branch = "parent" if target_branch is None: raise errors.BzrCommandError(gettext('No submit branch known or' ' specified')) if remembered_target_branch is not None: trace.note(gettext('Using saved {0} location "{1}" to determine ' 'what changes to submit.').format( remembered_target_branch, target_branch)) submit_branch = Branch.open(target_branch, possible_transports=possible_transports) possible_transports.append(submit_branch.bzrdir.root_transport) if mail_to is None or format is None: if mail_to is None: mail_to = submit_branch.get_config_stack().get( 'child_submit_to') if format is None: formatname = submit_branch.get_child_submit_format() try: format = format_registry.get(formatname) except KeyError: raise errors.BzrCommandError( gettext("No such send format '%s'.") % formatname) stored_public_branch = branch.get_public_branch() if public_branch is None: public_branch = stored_public_branch # Remembers if asked explicitly or no previous location is set elif (remember or (remember is None and stored_public_branch is None)): branch.set_public_branch(public_branch) if no_bundle and public_branch is None: raise errors.BzrCommandError(gettext('No public branch specified or' ' known')) base_revision_id = None revision_id = None if revision is not None: if len(revision) > 2: raise errors.BzrCommandError(gettext('bzr send takes ' 'at most two one revision identifiers')) revision_id = revision[-1].as_revision_id(branch) if len(revision) == 2: base_revision_id = revision[0].as_revision_id(branch) if revision_id is None: if tree is not None: tree.check_changed_or_out_of_date( strict, 'send_strict', more_error='Use --no-strict to force the send.', more_warning='Uncommitted changes will not be sent.') revision_id = branch.last_revision() if revision_id == NULL_REVISION: raise errors.BzrCommandError(gettext('No revisions to submit.')) if format is None: format = format_registry.get() directive = format(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, submit_branch) if output is None: directive.compose_merge_request(mail_client, mail_to, body, branch, tree) else: if directive.multiple_output_files: if output == '-': raise errors.BzrCommandError(gettext('- not supported for ' 'merge directives that use more than one output file.')) if not os.path.exists(output):
for (filename, lines) in directive.to_files(): path = os.path.join(output, filename) outfile = open(path, 'wb') try: outfile.writelines(lines) finally: outfile.close() else: if output == '-': outfile = to_file else: outfile = open(output, 'wb') try: outfile.writelines(directive.to_lines()) finally: if outfile is not to_file: outfile.close() finally: branch.unlock() def _send_4(branch, revision_id, target_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): from bzrlib import merge_directive return merge_directive.MergeDirective2.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), target_branch, public_branch=public_branch, include_patch=not no_patch, include_bundle=not no_bundle, message=message, base_revision_id=base_revision_id, local_target_branch=local_target_branch) def _send_0_9(branch, revision_id, submit_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): if not no_bundle: if not no_patch: patch_type = 'bundle' else: raise errors.BzrCommandError(gettext('Format 0.9 does not' ' permit bundle with no patch')) else: if not no_patch: patch_type = 'diff' else: patch_type = None from bzrlib import merge_directive return merge_directive.MergeDirective.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), submit_branch, public_branch=public_branch, patch_type=patch_type, message=message, local_target_branch=local_target_branch) format_registry.register('4', _send_4, 'Bundle format 4, Merge Directive 2 (default)') format_registry.register('0.9', _send_0_9, 'Bundle format 0.9, Merge Directive 1') format_registry.default_key = '4'
os.mkdir(output, 0755)
conditional_block
0005_auto_20160905_1853.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-05 16:53 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class
(migrations.Migration): dependencies = [ ('organization_network', '0004_organizationaudio_organizationblock_organizationimage_organizationlink_organizationvideo'), ] operations = [ migrations.CreateModel( name='UMR', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512, verbose_name='name')), ('description', models.TextField(blank=True, verbose_name='description')), ], options={ 'verbose_name': 'UMR', }, ), migrations.RemoveField( model_name='person', name='permanent', ), migrations.RemoveField( model_name='personactivity', name='function', ), migrations.RemoveField( model_name='personactivity', name='rd_quota', ), migrations.AddField( model_name='personactivity', name='is_permanent', field=models.BooleanField(default=False, verbose_name='permanent'), ), migrations.AddField( model_name='personactivity', name='rd_quota_float', field=models.IntegerField(blank=True, null=True, verbose_name='R&D quota (float)'), ), migrations.AddField( model_name='personactivity', name='rd_quota_text', field=models.CharField(blank=True, max_length=128, null=True, verbose_name='R&D quota (text)'), ), migrations.AlterField( model_name='person', name='user', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='user'), ), migrations.AddField( model_name='personactivity', name='umr', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='organization_network.UMR', verbose_name='training type'), ), ]
Migration
identifier_name
0005_auto_20160905_1853.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-05 16:53 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):
dependencies = [ ('organization_network', '0004_organizationaudio_organizationblock_organizationimage_organizationlink_organizationvideo'), ] operations = [ migrations.CreateModel( name='UMR', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512, verbose_name='name')), ('description', models.TextField(blank=True, verbose_name='description')), ], options={ 'verbose_name': 'UMR', }, ), migrations.RemoveField( model_name='person', name='permanent', ), migrations.RemoveField( model_name='personactivity', name='function', ), migrations.RemoveField( model_name='personactivity', name='rd_quota', ), migrations.AddField( model_name='personactivity', name='is_permanent', field=models.BooleanField(default=False, verbose_name='permanent'), ), migrations.AddField( model_name='personactivity', name='rd_quota_float', field=models.IntegerField(blank=True, null=True, verbose_name='R&D quota (float)'), ), migrations.AddField( model_name='personactivity', name='rd_quota_text', field=models.CharField(blank=True, max_length=128, null=True, verbose_name='R&D quota (text)'), ), migrations.AlterField( model_name='person', name='user', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='user'), ), migrations.AddField( model_name='personactivity', name='umr', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='organization_network.UMR', verbose_name='training type'), ), ]
identifier_body
0005_auto_20160905_1853.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-05 16:53 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('organization_network', '0004_organizationaudio_organizationblock_organizationimage_organizationlink_organizationvideo'), ] operations = [ migrations.CreateModel( name='UMR', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512, verbose_name='name')), ('description', models.TextField(blank=True, verbose_name='description')), ], options={ 'verbose_name': 'UMR', }, ), migrations.RemoveField( model_name='person', name='permanent', ), migrations.RemoveField( model_name='personactivity', name='function', ), migrations.RemoveField( model_name='personactivity', name='rd_quota', ), migrations.AddField( model_name='personactivity', name='is_permanent', field=models.BooleanField(default=False, verbose_name='permanent'), ), migrations.AddField( model_name='personactivity', name='rd_quota_float', field=models.IntegerField(blank=True, null=True, verbose_name='R&D quota (float)'), ), migrations.AddField( model_name='personactivity', name='rd_quota_text',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='user'), ), migrations.AddField( model_name='personactivity', name='umr', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='organization_network.UMR', verbose_name='training type'), ), ]
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='R&D quota (text)'), ), migrations.AlterField( model_name='person', name='user',
random_line_split
index.story.js
import React from 'react'; import { header, tabs, tab, description, importExample, title, divider, example, playground, api, testkit, } from 'wix-storybook-utils/Sections'; import { storySettings } from '../test/storySettings'; import Star from 'wix-ui-icons-common/Star'; import * as examples from './examples'; import CounterBadge from '..'; export default { category: storySettings.category, storyName: 'CounterBadge', component: CounterBadge, componentPath: '..', componentProps: { size: 'small', children: 1, skin: 'general', showShadow: false, }, exampleProps: { children: [ { label: 'number', value: 1 }, { label: 'string', value: 'New!' }, { label: 'node', value: <Star /> }, ], }, sections: [ header({ component: <CounterBadge>1</CounterBadge>, }), tabs([ tab({ title: 'Description', sections: [ description({ title: 'Description', text: '`CounterBadge` gives you a quick preview to indicate more action is required.', }), importExample("import { CounterBadge } from 'wix-style-react';"), divider(), title('Examples'), example({ title: 'Number counter', text: 'The most common use of CounterBadge is with a number value truncated to 99. CounterBadge comes in two sizes `small` (default) and `medium`.', source: examples.numbers, }), example({ title: 'Skins', text: 'Background color can be one of the following: `general`, `danger`, `urgent`, `standard`, `warning`, `success` and `light`.', source: examples.skins, }), example({ title: 'Shadow', text: 'CounterBadge can add a shadow using `showShadow` prop', source: examples.shadow, }), example({ title: 'Custom node', text: 'CounterBadge can display a custom node, like an icon.', source: examples.custom, }), example({ title: 'Advanced', text: 'An example for a CounterBadge counting items in cart.', source: examples.advanced, }), ], }), ...[ { title: 'API', sections: [api()] },
], };
{ title: 'Testkit', sections: [testkit()] }, { title: 'Playground', sections: [playground()] }, ].map(tab), ]),
random_line_split
Test_db_BKTree_Compare.py
import unittest import time import pprint import logging import scanner.logSetup as logSetup import pyximport print("Have Cython") pyximport.install() import dbPhashApi class TestCompareDatabaseInterface(unittest.TestCase): def __init__(self, *args, **kwargs): logSetup.initLogging() super().__init__(*args, **kwargs)
def setUp(self): # We set up and tear down the tree a few times to validate the dropTree function self.log = logging.getLogger("Main.TestCompareDatabaseInterface") self.tree = dbPhashApi.PhashDbApi() self.tree.forceReload() def dist_check(self, distance, dbid, phash): qtime1 = time.time() have1 = self.tree.getWithinDistance_db(phash, distance=distance) qtime2 = time.time() qtime3 = time.time() have2 = self.tree.getIdsWithinDistance(phash, distance=distance) qtime4 = time.time() # print(dbid, have1) if have1 != have2: self.log.error("Mismatch!") for line in pprint.pformat(have1).split("\n"): self.log.error(line) for line in pprint.pformat(have2).split("\n"): self.log.error(line) self.assertTrue(dbid in have1) self.assertTrue(dbid in have2) self.assertEqual(have1, have2) self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3) def test_0(self): rand_r = self.tree.getRandomPhashRows(0.001) self.log.info("Have %s items to test with", len(rand_r)) stepno = 0 for dbid, phash in rand_r: self.dist_check(1, dbid, phash) self.dist_check(2, dbid, phash) self.dist_check(3, dbid, phash) self.dist_check(4, dbid, phash) self.dist_check(5, dbid, phash) self.dist_check(6, dbid, phash) self.dist_check(7, dbid, phash) self.dist_check(8, dbid, phash) stepno += 1 self.log.info("On step %s of %s", stepno, len(rand_r))
random_line_split
Test_db_BKTree_Compare.py
import unittest import time import pprint import logging import scanner.logSetup as logSetup import pyximport print("Have Cython") pyximport.install() import dbPhashApi class TestCompareDatabaseInterface(unittest.TestCase):
def __init__(self, *args, **kwargs): logSetup.initLogging() super().__init__(*args, **kwargs) def setUp(self): # We set up and tear down the tree a few times to validate the dropTree function self.log = logging.getLogger("Main.TestCompareDatabaseInterface") self.tree = dbPhashApi.PhashDbApi() self.tree.forceReload() def dist_check(self, distance, dbid, phash): qtime1 = time.time() have1 = self.tree.getWithinDistance_db(phash, distance=distance) qtime2 = time.time() qtime3 = time.time() have2 = self.tree.getIdsWithinDistance(phash, distance=distance) qtime4 = time.time() # print(dbid, have1) if have1 != have2: self.log.error("Mismatch!") for line in pprint.pformat(have1).split("\n"): self.log.error(line) for line in pprint.pformat(have2).split("\n"): self.log.error(line) self.assertTrue(dbid in have1) self.assertTrue(dbid in have2) self.assertEqual(have1, have2) self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3) def test_0(self): rand_r = self.tree.getRandomPhashRows(0.001) self.log.info("Have %s items to test with", len(rand_r)) stepno = 0 for dbid, phash in rand_r: self.dist_check(1, dbid, phash) self.dist_check(2, dbid, phash) self.dist_check(3, dbid, phash) self.dist_check(4, dbid, phash) self.dist_check(5, dbid, phash) self.dist_check(6, dbid, phash) self.dist_check(7, dbid, phash) self.dist_check(8, dbid, phash) stepno += 1 self.log.info("On step %s of %s", stepno, len(rand_r))
identifier_body
Test_db_BKTree_Compare.py
import unittest import time import pprint import logging import scanner.logSetup as logSetup import pyximport print("Have Cython") pyximport.install() import dbPhashApi class TestCompareDatabaseInterface(unittest.TestCase): def __init__(self, *args, **kwargs): logSetup.initLogging() super().__init__(*args, **kwargs) def setUp(self): # We set up and tear down the tree a few times to validate the dropTree function self.log = logging.getLogger("Main.TestCompareDatabaseInterface") self.tree = dbPhashApi.PhashDbApi() self.tree.forceReload() def dist_check(self, distance, dbid, phash): qtime1 = time.time() have1 = self.tree.getWithinDistance_db(phash, distance=distance) qtime2 = time.time() qtime3 = time.time() have2 = self.tree.getIdsWithinDistance(phash, distance=distance) qtime4 = time.time() # print(dbid, have1) if have1 != have2:
self.assertTrue(dbid in have1) self.assertTrue(dbid in have2) self.assertEqual(have1, have2) self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3) def test_0(self): rand_r = self.tree.getRandomPhashRows(0.001) self.log.info("Have %s items to test with", len(rand_r)) stepno = 0 for dbid, phash in rand_r: self.dist_check(1, dbid, phash) self.dist_check(2, dbid, phash) self.dist_check(3, dbid, phash) self.dist_check(4, dbid, phash) self.dist_check(5, dbid, phash) self.dist_check(6, dbid, phash) self.dist_check(7, dbid, phash) self.dist_check(8, dbid, phash) stepno += 1 self.log.info("On step %s of %s", stepno, len(rand_r))
self.log.error("Mismatch!") for line in pprint.pformat(have1).split("\n"): self.log.error(line) for line in pprint.pformat(have2).split("\n"): self.log.error(line)
conditional_block
Test_db_BKTree_Compare.py
import unittest import time import pprint import logging import scanner.logSetup as logSetup import pyximport print("Have Cython") pyximport.install() import dbPhashApi class TestCompareDatabaseInterface(unittest.TestCase): def __init__(self, *args, **kwargs): logSetup.initLogging() super().__init__(*args, **kwargs) def setUp(self): # We set up and tear down the tree a few times to validate the dropTree function self.log = logging.getLogger("Main.TestCompareDatabaseInterface") self.tree = dbPhashApi.PhashDbApi() self.tree.forceReload() def dist_check(self, distance, dbid, phash): qtime1 = time.time() have1 = self.tree.getWithinDistance_db(phash, distance=distance) qtime2 = time.time() qtime3 = time.time() have2 = self.tree.getIdsWithinDistance(phash, distance=distance) qtime4 = time.time() # print(dbid, have1) if have1 != have2: self.log.error("Mismatch!") for line in pprint.pformat(have1).split("\n"): self.log.error(line) for line in pprint.pformat(have2).split("\n"): self.log.error(line) self.assertTrue(dbid in have1) self.assertTrue(dbid in have2) self.assertEqual(have1, have2) self.log.info('Dist %s %s, %s', distance, qtime2-qtime1, qtime4-qtime3) def
(self): rand_r = self.tree.getRandomPhashRows(0.001) self.log.info("Have %s items to test with", len(rand_r)) stepno = 0 for dbid, phash in rand_r: self.dist_check(1, dbid, phash) self.dist_check(2, dbid, phash) self.dist_check(3, dbid, phash) self.dist_check(4, dbid, phash) self.dist_check(5, dbid, phash) self.dist_check(6, dbid, phash) self.dist_check(7, dbid, phash) self.dist_check(8, dbid, phash) stepno += 1 self.log.info("On step %s of %s", stepno, len(rand_r))
test_0
identifier_name
calculator.py
# # calculator.py : A calculator module for the deskbar applet. # # Copyright (C) 2008 by Johannes Buchner # Copyright (C) 2007 by Michael Hofmann # Copyright (C) 2006 by Callum McKenzie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Callum McKenzie <[email protected]> - Original author # Michael Hofmann <[email protected]> - compatibility changes for deskbar 2.20 # Johannes Buchner <[email protected]> - Made externally usable # # This version of calculator can be used with converter # read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar # from __future__ import division from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction from deskbar.defs import VERSION from gettext import gettext as _ import deskbar.core.Utils import deskbar.interfaces.Match import deskbar.interfaces.Module import logging import math import re LOGGER = logging.getLogger(__name__) HANDLERS = ["CalculatorModule"] def bin (n): """A local binary equivalent of the hex and oct builtins.""" if (n == 0): return "0b0" s = "" if (n < 0): while n != -1: s = str (n & 1) + s n >>= 1 return "0b" + "...111" + s else: while n != 0: s = str (n & 1) + s n >>= 1 return "0b" + s # These next three make sure {hex, oct, bin} can handle floating point, # by rounding. This makes sure things like hex(255/2) behave as a # programmer would expect while allowing 255/2 to equal 127.5 for normal # people. Abstracting out the body of these into a single function which # takes hex, oct or bin as an argument seems to run into problems with # those functions not being defined correctly in the resticted eval (?). def lenient_hex (c): try: return hex (c) except TypeError: return hex (int (c)) def lenient_oct (c): try: return oct (c) except TypeError: return oct (int (c)) def lenient_bin (c): try: return bin (c) except TypeError: return bin (int (c)) class CalculatorAction (CopyToClipboardAction): def __init__ (self, text, answer): CopyToClipboardAction.__init__ (self, answer, answer) self.text = text def get_verb(self): return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard") def
(self, text = None): """Because the text variable for history entries contains the text typed for the history search (and not the text of the orginal action), we store the original text seperately.""" result = CopyToClipboardAction.get_name (self, text) result["origtext"] = self.text return result def get_tooltip(self, text=None): return self._name class CalculatorMatch (deskbar.interfaces.Match): def __init__ (self, text, answer, **kwargs): deskbar.interfaces.Match.__init__ (self, name = text, icon = "gtk-add", category = "calculator", **kwargs) self.answer = str (answer) self.add_action (CalculatorAction (text, self.answer)) def get_hash (self): return self.answer class CalculatorModule (deskbar.interfaces.Module): INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"), "name": _("Calculator"), "description": _("Calculate simple equations"), "version" : VERSION, "categories" : { "calculator" : { "name" : _("Calculator") }}} def __init__ (self): deskbar.interfaces.Module.__init__ (self) self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]") self.binre = re.compile ("0[bB][01_]*[01]") def _number_parser (self, match, base): """A generic number parser, regardless of base. It also ignores the '_' character so it can be used as a separator. Note how we skip the first two characters since we assume it is something like '0x' or '0b' and identifies the base.""" table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9, 'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13, 'e' : 14, 'f' : 15 } d = 0 for c in match.group()[2:]: if c != "_": d = d * base + table[c] return str (d) def _binsub (self, match): """Because python doesn't handle binary literals, we parse it ourselves and replace it with a decimal representation.""" return self._number_parser (match, 2) def _hexsub (self, match): """Parse the hex literal ourselves. We could let python do it, but since we have a generic parser we use that instead.""" return self._number_parser (match, 16) def run_query (self, query): """We evaluate the equation by first replacing hex and binary literals with their decimal representation. (We need to check hex, so we can distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We severely restrict the eval environment. Any errors are ignored.""" restricted_dictionary = { "__builtins__" : None, "abs" : abs, "acos" : math.acos, "asin" : math.asin, "atan" : math.atan, "atan2" : math.atan2, "bin" : lenient_bin,"ceil" : math.ceil, "cos" : math.cos, "cosh" : math.cosh, "degrees" : math.degrees, "exp" : math.exp, "floor" : math.floor, "hex" : lenient_hex, "int" : int, "log" : math.log, "pow" : math.pow, "log10" : math.log10, "oct" : lenient_oct, "pi" : math.pi, "radians" : math.radians, "round": round, "sin" : math.sin, "sinh" : math.sinh, "sqrt" : math.sqrt, "tan" : math.tan, "tanh" : math.tanh} try: scrubbedquery = query.lower() scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery) scrubbedquery = self.binre.sub (self._binsub, scrubbedquery) for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")): scrubbedquery = scrubbedquery.replace (c1, c2) answer = eval (scrubbedquery, restricted_dictionary) # Try and avoid echoing back simple numbers. Note that this # doesn't work well for floating point, e.g. '3.' behaves badly. if str (answer) == query: return None # We need this check because the eval can return function objects # when we are halfway through typing the expression. if isinstance (answer, (float, int, long, str)): return answer else: return None except Exception, e: LOGGER.debug (str(e)) return None def query (self, query): answer = self.run_query(query) if answer != None: result = [CalculatorMatch (query, answer)] self._emit_query_ready (query, result) return answer else: return []
get_name
identifier_name
calculator.py
# # calculator.py : A calculator module for the deskbar applet. # # Copyright (C) 2008 by Johannes Buchner # Copyright (C) 2007 by Michael Hofmann # Copyright (C) 2006 by Callum McKenzie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Callum McKenzie <[email protected]> - Original author # Michael Hofmann <[email protected]> - compatibility changes for deskbar 2.20 # Johannes Buchner <[email protected]> - Made externally usable # # This version of calculator can be used with converter # read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar # from __future__ import division from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction from deskbar.defs import VERSION from gettext import gettext as _ import deskbar.core.Utils import deskbar.interfaces.Match import deskbar.interfaces.Module import logging import math import re LOGGER = logging.getLogger(__name__) HANDLERS = ["CalculatorModule"] def bin (n): """A local binary equivalent of the hex and oct builtins.""" if (n == 0): return "0b0" s = "" if (n < 0): while n != -1: s = str (n & 1) + s n >>= 1 return "0b" + "...111" + s else: while n != 0: s = str (n & 1) + s n >>= 1 return "0b" + s # These next three make sure {hex, oct, bin} can handle floating point, # by rounding. This makes sure things like hex(255/2) behave as a # programmer would expect while allowing 255/2 to equal 127.5 for normal # people. Abstracting out the body of these into a single function which # takes hex, oct or bin as an argument seems to run into problems with # those functions not being defined correctly in the resticted eval (?). def lenient_hex (c):
except TypeError: return hex (int (c)) def lenient_oct (c): try: return oct (c) except TypeError: return oct (int (c)) def lenient_bin (c): try: return bin (c) except TypeError: return bin (int (c)) class CalculatorAction (CopyToClipboardAction): def __init__ (self, text, answer): CopyToClipboardAction.__init__ (self, answer, answer) self.text = text def get_verb(self): return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard") def get_name(self, text = None): """Because the text variable for history entries contains the text typed for the history search (and not the text of the orginal action), we store the original text seperately.""" result = CopyToClipboardAction.get_name (self, text) result["origtext"] = self.text return result def get_tooltip(self, text=None): return self._name class CalculatorMatch (deskbar.interfaces.Match): def __init__ (self, text, answer, **kwargs): deskbar.interfaces.Match.__init__ (self, name = text, icon = "gtk-add", category = "calculator", **kwargs) self.answer = str (answer) self.add_action (CalculatorAction (text, self.answer)) def get_hash (self): return self.answer class CalculatorModule (deskbar.interfaces.Module): INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"), "name": _("Calculator"), "description": _("Calculate simple equations"), "version" : VERSION, "categories" : { "calculator" : { "name" : _("Calculator") }}} def __init__ (self): deskbar.interfaces.Module.__init__ (self) self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]") self.binre = re.compile ("0[bB][01_]*[01]") def _number_parser (self, match, base): """A generic number parser, regardless of base. It also ignores the '_' character so it can be used as a separator. Note how we skip the first two characters since we assume it is something like '0x' or '0b' and identifies the base.""" table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9, 'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13, 'e' : 14, 'f' : 15 } d = 0 for c in match.group()[2:]: if c != "_": d = d * base + table[c] return str (d) def _binsub (self, match): """Because python doesn't handle binary literals, we parse it ourselves and replace it with a decimal representation.""" return self._number_parser (match, 2) def _hexsub (self, match): """Parse the hex literal ourselves. We could let python do it, but since we have a generic parser we use that instead.""" return self._number_parser (match, 16) def run_query (self, query): """We evaluate the equation by first replacing hex and binary literals with their decimal representation. (We need to check hex, so we can distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We severely restrict the eval environment. Any errors are ignored.""" restricted_dictionary = { "__builtins__" : None, "abs" : abs, "acos" : math.acos, "asin" : math.asin, "atan" : math.atan, "atan2" : math.atan2, "bin" : lenient_bin,"ceil" : math.ceil, "cos" : math.cos, "cosh" : math.cosh, "degrees" : math.degrees, "exp" : math.exp, "floor" : math.floor, "hex" : lenient_hex, "int" : int, "log" : math.log, "pow" : math.pow, "log10" : math.log10, "oct" : lenient_oct, "pi" : math.pi, "radians" : math.radians, "round": round, "sin" : math.sin, "sinh" : math.sinh, "sqrt" : math.sqrt, "tan" : math.tan, "tanh" : math.tanh} try: scrubbedquery = query.lower() scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery) scrubbedquery = self.binre.sub (self._binsub, scrubbedquery) for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")): scrubbedquery = scrubbedquery.replace (c1, c2) answer = eval (scrubbedquery, restricted_dictionary) # Try and avoid echoing back simple numbers. Note that this # doesn't work well for floating point, e.g. '3.' behaves badly. if str (answer) == query: return None # We need this check because the eval can return function objects # when we are halfway through typing the expression. if isinstance (answer, (float, int, long, str)): return answer else: return None except Exception, e: LOGGER.debug (str(e)) return None def query (self, query): answer = self.run_query(query) if answer != None: result = [CalculatorMatch (query, answer)] self._emit_query_ready (query, result) return answer else: return []
try: return hex (c)
random_line_split
calculator.py
# # calculator.py : A calculator module for the deskbar applet. # # Copyright (C) 2008 by Johannes Buchner # Copyright (C) 2007 by Michael Hofmann # Copyright (C) 2006 by Callum McKenzie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Callum McKenzie <[email protected]> - Original author # Michael Hofmann <[email protected]> - compatibility changes for deskbar 2.20 # Johannes Buchner <[email protected]> - Made externally usable # # This version of calculator can be used with converter # read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar # from __future__ import division from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction from deskbar.defs import VERSION from gettext import gettext as _ import deskbar.core.Utils import deskbar.interfaces.Match import deskbar.interfaces.Module import logging import math import re LOGGER = logging.getLogger(__name__) HANDLERS = ["CalculatorModule"] def bin (n): """A local binary equivalent of the hex and oct builtins.""" if (n == 0): return "0b0" s = "" if (n < 0): while n != -1: s = str (n & 1) + s n >>= 1 return "0b" + "...111" + s else: while n != 0: s = str (n & 1) + s n >>= 1 return "0b" + s # These next three make sure {hex, oct, bin} can handle floating point, # by rounding. This makes sure things like hex(255/2) behave as a # programmer would expect while allowing 255/2 to equal 127.5 for normal # people. Abstracting out the body of these into a single function which # takes hex, oct or bin as an argument seems to run into problems with # those functions not being defined correctly in the resticted eval (?). def lenient_hex (c): try: return hex (c) except TypeError: return hex (int (c)) def lenient_oct (c): try: return oct (c) except TypeError: return oct (int (c)) def lenient_bin (c): try: return bin (c) except TypeError: return bin (int (c)) class CalculatorAction (CopyToClipboardAction): def __init__ (self, text, answer): CopyToClipboardAction.__init__ (self, answer, answer) self.text = text def get_verb(self): return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard") def get_name(self, text = None): """Because the text variable for history entries contains the text typed for the history search (and not the text of the orginal action), we store the original text seperately.""" result = CopyToClipboardAction.get_name (self, text) result["origtext"] = self.text return result def get_tooltip(self, text=None): return self._name class CalculatorMatch (deskbar.interfaces.Match): def __init__ (self, text, answer, **kwargs): deskbar.interfaces.Match.__init__ (self, name = text, icon = "gtk-add", category = "calculator", **kwargs) self.answer = str (answer) self.add_action (CalculatorAction (text, self.answer)) def get_hash (self): return self.answer class CalculatorModule (deskbar.interfaces.Module): INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"), "name": _("Calculator"), "description": _("Calculate simple equations"), "version" : VERSION, "categories" : { "calculator" : { "name" : _("Calculator") }}} def __init__ (self): deskbar.interfaces.Module.__init__ (self) self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]") self.binre = re.compile ("0[bB][01_]*[01]") def _number_parser (self, match, base): """A generic number parser, regardless of base. It also ignores the '_' character so it can be used as a separator. Note how we skip the first two characters since we assume it is something like '0x' or '0b' and identifies the base.""" table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9, 'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13, 'e' : 14, 'f' : 15 } d = 0 for c in match.group()[2:]: if c != "_": d = d * base + table[c] return str (d) def _binsub (self, match):
def _hexsub (self, match): """Parse the hex literal ourselves. We could let python do it, but since we have a generic parser we use that instead.""" return self._number_parser (match, 16) def run_query (self, query): """We evaluate the equation by first replacing hex and binary literals with their decimal representation. (We need to check hex, so we can distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We severely restrict the eval environment. Any errors are ignored.""" restricted_dictionary = { "__builtins__" : None, "abs" : abs, "acos" : math.acos, "asin" : math.asin, "atan" : math.atan, "atan2" : math.atan2, "bin" : lenient_bin,"ceil" : math.ceil, "cos" : math.cos, "cosh" : math.cosh, "degrees" : math.degrees, "exp" : math.exp, "floor" : math.floor, "hex" : lenient_hex, "int" : int, "log" : math.log, "pow" : math.pow, "log10" : math.log10, "oct" : lenient_oct, "pi" : math.pi, "radians" : math.radians, "round": round, "sin" : math.sin, "sinh" : math.sinh, "sqrt" : math.sqrt, "tan" : math.tan, "tanh" : math.tanh} try: scrubbedquery = query.lower() scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery) scrubbedquery = self.binre.sub (self._binsub, scrubbedquery) for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")): scrubbedquery = scrubbedquery.replace (c1, c2) answer = eval (scrubbedquery, restricted_dictionary) # Try and avoid echoing back simple numbers. Note that this # doesn't work well for floating point, e.g. '3.' behaves badly. if str (answer) == query: return None # We need this check because the eval can return function objects # when we are halfway through typing the expression. if isinstance (answer, (float, int, long, str)): return answer else: return None except Exception, e: LOGGER.debug (str(e)) return None def query (self, query): answer = self.run_query(query) if answer != None: result = [CalculatorMatch (query, answer)] self._emit_query_ready (query, result) return answer else: return []
"""Because python doesn't handle binary literals, we parse it ourselves and replace it with a decimal representation.""" return self._number_parser (match, 2)
identifier_body
calculator.py
# # calculator.py : A calculator module for the deskbar applet. # # Copyright (C) 2008 by Johannes Buchner # Copyright (C) 2007 by Michael Hofmann # Copyright (C) 2006 by Callum McKenzie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Callum McKenzie <[email protected]> - Original author # Michael Hofmann <[email protected]> - compatibility changes for deskbar 2.20 # Johannes Buchner <[email protected]> - Made externally usable # # This version of calculator can be used with converter # read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar # from __future__ import division from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction from deskbar.defs import VERSION from gettext import gettext as _ import deskbar.core.Utils import deskbar.interfaces.Match import deskbar.interfaces.Module import logging import math import re LOGGER = logging.getLogger(__name__) HANDLERS = ["CalculatorModule"] def bin (n): """A local binary equivalent of the hex and oct builtins.""" if (n == 0): return "0b0" s = "" if (n < 0): while n != -1: s = str (n & 1) + s n >>= 1 return "0b" + "...111" + s else: while n != 0: s = str (n & 1) + s n >>= 1 return "0b" + s # These next three make sure {hex, oct, bin} can handle floating point, # by rounding. This makes sure things like hex(255/2) behave as a # programmer would expect while allowing 255/2 to equal 127.5 for normal # people. Abstracting out the body of these into a single function which # takes hex, oct or bin as an argument seems to run into problems with # those functions not being defined correctly in the resticted eval (?). def lenient_hex (c): try: return hex (c) except TypeError: return hex (int (c)) def lenient_oct (c): try: return oct (c) except TypeError: return oct (int (c)) def lenient_bin (c): try: return bin (c) except TypeError: return bin (int (c)) class CalculatorAction (CopyToClipboardAction): def __init__ (self, text, answer): CopyToClipboardAction.__init__ (self, answer, answer) self.text = text def get_verb(self): return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard") def get_name(self, text = None): """Because the text variable for history entries contains the text typed for the history search (and not the text of the orginal action), we store the original text seperately.""" result = CopyToClipboardAction.get_name (self, text) result["origtext"] = self.text return result def get_tooltip(self, text=None): return self._name class CalculatorMatch (deskbar.interfaces.Match): def __init__ (self, text, answer, **kwargs): deskbar.interfaces.Match.__init__ (self, name = text, icon = "gtk-add", category = "calculator", **kwargs) self.answer = str (answer) self.add_action (CalculatorAction (text, self.answer)) def get_hash (self): return self.answer class CalculatorModule (deskbar.interfaces.Module): INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"), "name": _("Calculator"), "description": _("Calculate simple equations"), "version" : VERSION, "categories" : { "calculator" : { "name" : _("Calculator") }}} def __init__ (self): deskbar.interfaces.Module.__init__ (self) self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]") self.binre = re.compile ("0[bB][01_]*[01]") def _number_parser (self, match, base): """A generic number parser, regardless of base. It also ignores the '_' character so it can be used as a separator. Note how we skip the first two characters since we assume it is something like '0x' or '0b' and identifies the base.""" table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9, 'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13, 'e' : 14, 'f' : 15 } d = 0 for c in match.group()[2:]: if c != "_": d = d * base + table[c] return str (d) def _binsub (self, match): """Because python doesn't handle binary literals, we parse it ourselves and replace it with a decimal representation.""" return self._number_parser (match, 2) def _hexsub (self, match): """Parse the hex literal ourselves. We could let python do it, but since we have a generic parser we use that instead.""" return self._number_parser (match, 16) def run_query (self, query): """We evaluate the equation by first replacing hex and binary literals with their decimal representation. (We need to check hex, so we can distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We severely restrict the eval environment. Any errors are ignored.""" restricted_dictionary = { "__builtins__" : None, "abs" : abs, "acos" : math.acos, "asin" : math.asin, "atan" : math.atan, "atan2" : math.atan2, "bin" : lenient_bin,"ceil" : math.ceil, "cos" : math.cos, "cosh" : math.cosh, "degrees" : math.degrees, "exp" : math.exp, "floor" : math.floor, "hex" : lenient_hex, "int" : int, "log" : math.log, "pow" : math.pow, "log10" : math.log10, "oct" : lenient_oct, "pi" : math.pi, "radians" : math.radians, "round": round, "sin" : math.sin, "sinh" : math.sinh, "sqrt" : math.sqrt, "tan" : math.tan, "tanh" : math.tanh} try: scrubbedquery = query.lower() scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery) scrubbedquery = self.binre.sub (self._binsub, scrubbedquery) for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")): scrubbedquery = scrubbedquery.replace (c1, c2) answer = eval (scrubbedquery, restricted_dictionary) # Try and avoid echoing back simple numbers. Note that this # doesn't work well for floating point, e.g. '3.' behaves badly. if str (answer) == query:
# We need this check because the eval can return function objects # when we are halfway through typing the expression. if isinstance (answer, (float, int, long, str)): return answer else: return None except Exception, e: LOGGER.debug (str(e)) return None def query (self, query): answer = self.run_query(query) if answer != None: result = [CalculatorMatch (query, answer)] self._emit_query_ready (query, result) return answer else: return []
return None
conditional_block
models.py
# Define a custom User class to work with django-social-auth from django.db import models from django.contrib.auth.models import User class Task(models.Model): name = models.CharField(max_length=200) owner = models.ForeignKey(User) finished = models.BooleanField(default=False) shared = models.BooleanField(default=False) class Viewer(models.Model): name = models.ForeignKey(User) tasks = models.ForeignKey(Task) class Friends(models.Model): created = models.DateTimeField(auto_now_add=True, editable=False) creator = models.ForeignKey(User, related_name="friendship_creator_set") friend = models.ForeignKey(User, related_name="friend_set") class CustomUserManager(models.Manager): def create_user(self, username, email):
class CustomUser(models.Model): username = models.CharField(max_length=128) last_login = models.DateTimeField(blank=True, null=True) objects = CustomUserManager() def is_authenticated(self): return True
return self.model._default_manager.create(username=username)
identifier_body
models.py
# Define a custom User class to work with django-social-auth from django.db import models from django.contrib.auth.models import User class Task(models.Model): name = models.CharField(max_length=200) owner = models.ForeignKey(User) finished = models.BooleanField(default=False)
shared = models.BooleanField(default=False) class Viewer(models.Model): name = models.ForeignKey(User) tasks = models.ForeignKey(Task) class Friends(models.Model): created = models.DateTimeField(auto_now_add=True, editable=False) creator = models.ForeignKey(User, related_name="friendship_creator_set") friend = models.ForeignKey(User, related_name="friend_set") class CustomUserManager(models.Manager): def create_user(self, username, email): return self.model._default_manager.create(username=username) class CustomUser(models.Model): username = models.CharField(max_length=128) last_login = models.DateTimeField(blank=True, null=True) objects = CustomUserManager() def is_authenticated(self): return True
random_line_split
models.py
# Define a custom User class to work with django-social-auth from django.db import models from django.contrib.auth.models import User class Task(models.Model): name = models.CharField(max_length=200) owner = models.ForeignKey(User) finished = models.BooleanField(default=False) shared = models.BooleanField(default=False) class Viewer(models.Model): name = models.ForeignKey(User) tasks = models.ForeignKey(Task) class Friends(models.Model): created = models.DateTimeField(auto_now_add=True, editable=False) creator = models.ForeignKey(User, related_name="friendship_creator_set") friend = models.ForeignKey(User, related_name="friend_set") class
(models.Manager): def create_user(self, username, email): return self.model._default_manager.create(username=username) class CustomUser(models.Model): username = models.CharField(max_length=128) last_login = models.DateTimeField(blank=True, null=True) objects = CustomUserManager() def is_authenticated(self): return True
CustomUserManager
identifier_name
plugin.js
/* global editor */ /* global caselaw */ /* global _ */ /* global CKEDITOR */ /* global $ */ /* global moment */ /* global console */ (function() { "use strict"; var pluginName = 'basic'; CKEDITOR.plugins.add(pluginName, { init: function(editor) { var formats = [{ label: "Bold", command: "bold", element: "strong" }, { label: "Italic", command: "italic", element: "em" }, { label: "Underline", command: "underline", element: "u" }, { label: "Subscript", command: "subscript", element: "sub" }, { label: "Superscript", command: "superscript", element: "sup" }, { label: "Strikethrough", command: "strike", element: "s" }]; if (typeof editor.config.displayButtons !== "undefined") { $("#tools button").hide(); _.each(editor.config.displayButtons.split(","), function(button){ $("[rel='" + button.trim() + "']").show(); }); } for (var i = 0; i < formats.length; i++) { var format = formats[i]; var style = new CKEDITOR.style({ element: format.element, attributes: format.attributes }); var stateChange = function(format) {
this.style = style; this.allowedContent = style; this.requiredContent = style; this.contextSensitive = true; }; styleCommand.prototype = { exec: function(editor) { editor.focus(); if (this.state === CKEDITOR.TRISTATE_OFF) { editor.applyStyle(this.style); } else if (this.state === CKEDITOR.TRISTATE_ON) { editor.removeStyle(this.style); } if (format.command !== 'fakecommand') { editor.execCommand('fakecommand'); editor.execCommand('fakecommand'); } /* hack to change button state properly */ // force the save button activation and dirty state set to true editor.fire('change'); }, refresh: function(editor,path) { this.setState( path && this.style.checkApplicable(path) ? (this.style.checkActive(path)?CKEDITOR.TRISTATE_ON:CKEDITOR.TRISTATE_OFF):CKEDITOR.TRISTATE_DISABLED ); } }; editor.addCommand( format.command, new styleCommand(style) //jshint ignore:line ); var f = format; return function () { editor.attachStyleStateChange(style, function(state) { if (!editor.readOnly) { editor.getCommand(f.command).setState(state); } try { switch (editor.getCommand(f.command).state) { case CKEDITOR.TRISTATE_DISABLED: // $("[rel='" + editor.getCommand(f.command).name + "']").addClass("disabled"); break; case CKEDITOR.TRISTATE_OFF: $("[rel='" + editor.getCommand(f.command).name + "']").removeClass("disabled"); $("[rel='" + editor.getCommand(f.command).name + "']").removeClass("active"); break; default: $("[rel='" + editor.getCommand(f.command).name + "']").addClass("active"); break; } } catch(e) { console.log(editor.getCommand(f.command).name); console.log(e); } }); }; }; stateChange(format)(); } } }); })();
var styleCommand = function(style) {
random_line_split
plugin.js
/* global editor */ /* global caselaw */ /* global _ */ /* global CKEDITOR */ /* global $ */ /* global moment */ /* global console */ (function() { "use strict"; var pluginName = 'basic'; CKEDITOR.plugins.add(pluginName, { init: function(editor) { var formats = [{ label: "Bold", command: "bold", element: "strong" }, { label: "Italic", command: "italic", element: "em" }, { label: "Underline", command: "underline", element: "u" }, { label: "Subscript", command: "subscript", element: "sub" }, { label: "Superscript", command: "superscript", element: "sup" }, { label: "Strikethrough", command: "strike", element: "s" }]; if (typeof editor.config.displayButtons !== "undefined") { $("#tools button").hide(); _.each(editor.config.displayButtons.split(","), function(button){ $("[rel='" + button.trim() + "']").show(); }); } for (var i = 0; i < formats.length; i++) { var format = formats[i]; var style = new CKEDITOR.style({ element: format.element, attributes: format.attributes }); var stateChange = function(format) { var styleCommand = function(style) { this.style = style; this.allowedContent = style; this.requiredContent = style; this.contextSensitive = true; }; styleCommand.prototype = { exec: function(editor) { editor.focus(); if (this.state === CKEDITOR.TRISTATE_OFF) { editor.applyStyle(this.style); } else if (this.state === CKEDITOR.TRISTATE_ON) { editor.removeStyle(this.style); } if (format.command !== 'fakecommand') { editor.execCommand('fakecommand'); editor.execCommand('fakecommand'); } /* hack to change button state properly */ // force the save button activation and dirty state set to true editor.fire('change'); }, refresh: function(editor,path) { this.setState( path && this.style.checkApplicable(path) ? (this.style.checkActive(path)?CKEDITOR.TRISTATE_ON:CKEDITOR.TRISTATE_OFF):CKEDITOR.TRISTATE_DISABLED ); } }; editor.addCommand( format.command, new styleCommand(style) //jshint ignore:line ); var f = format; return function () { editor.attachStyleStateChange(style, function(state) { if (!editor.readOnly)
try { switch (editor.getCommand(f.command).state) { case CKEDITOR.TRISTATE_DISABLED: // $("[rel='" + editor.getCommand(f.command).name + "']").addClass("disabled"); break; case CKEDITOR.TRISTATE_OFF: $("[rel='" + editor.getCommand(f.command).name + "']").removeClass("disabled"); $("[rel='" + editor.getCommand(f.command).name + "']").removeClass("active"); break; default: $("[rel='" + editor.getCommand(f.command).name + "']").addClass("active"); break; } } catch(e) { console.log(editor.getCommand(f.command).name); console.log(e); } }); }; }; stateChange(format)(); } } }); })();
{ editor.getCommand(f.command).setState(state); }
conditional_block
inventory.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. def paint_icon_inventory(what, row, tags, customer_vars): if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \ and inventory.has_inventory(row["host_name"]):
multisite_icons.append({ 'host_columns': [ "name" ], 'paint': paint_icon_inventory, })
return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")), row, 'inv_host' )
conditional_block
inventory.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it
# the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. def paint_icon_inventory(what, row, tags, customer_vars): if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \ and inventory.has_inventory(row["host_name"]): return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")), row, 'inv_host' ) multisite_icons.append({ 'host_columns': [ "name" ], 'paint': paint_icon_inventory, })
# under the terms of the GNU General Public License as published by
random_line_split
inventory.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. def paint_icon_inventory(what, row, tags, customer_vars):
multisite_icons.append({ 'host_columns': [ "name" ], 'paint': paint_icon_inventory, })
if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \ and inventory.has_inventory(row["host_name"]): return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")), row, 'inv_host' )
identifier_body
inventory.py
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. def
(what, row, tags, customer_vars): if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \ and inventory.has_inventory(row["host_name"]): return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")), row, 'inv_host' ) multisite_icons.append({ 'host_columns': [ "name" ], 'paint': paint_icon_inventory, })
paint_icon_inventory
identifier_name
context.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Context data structure used by rustpkg use std::{io, os}; use extra::workcache; use rustc::driver::session::{OptLevel, No}; #[deriving(Clone)] pub struct Context { // Config strings that the user passed in with --cfg cfgs: ~[~str], // Flags to pass to rustc rustc_flags: RustcFlags, // If use_rust_path_hack is true, rustpkg searches for sources // in *package* directories that are in the RUST_PATH (for example, // FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where // rustpkg stores build artifacts. use_rust_path_hack: bool, // The root directory containing the Rust standard libraries sysroot: Path } #[deriving(Clone)] pub struct BuildContext { // Context for workcache workcache_context: workcache::Context, // Everything else context: Context } impl BuildContext { pub fn sysroot(&self) -> Path { self.context.sysroot.clone() } pub fn sysroot_to_use(&self) -> Path { self.context.sysroot_to_use() } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.context.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.context.compile_upto() } } /* Deliberately unsupported rustc flags: --bin, --lib inferred from crate file names -L inferred from extern mods --out-dir inferred from RUST_PATH --test use `rustpkg test` -v -h --ls don't make sense with rustpkg -W -A -D -F - use pragmas instead rustc flags that aren't implemented yet: --passes --llvm-arg --target-feature --android-cross-path */ pub struct RustcFlags { compile_upto: StopBefore, // Linker to use with the --linker flag linker: Option<~str>, // Extra arguments to pass to rustc with the --link-args flag link_args: Option<~str>, // Optimization level. 0 = default. -O = 2. optimization_level: OptLevel, // True if the user passed in --save-temps save_temps: bool, // Target (defaults to rustc's default target) target: Option<~str>, // Target CPU (defaults to rustc's default target CPU) target_cpu: Option<~str>, // Any -Z features experimental_features: Option<~[~str]> } impl Clone for RustcFlags { fn clone(&self) -> RustcFlags { RustcFlags { compile_upto: self.compile_upto, linker: self.linker.clone(), link_args: self.link_args.clone(), optimization_level: self.optimization_level, save_temps: self.save_temps, target: self.target.clone(), target_cpu: self.target_cpu.clone(), experimental_features: self.experimental_features.clone() } } } #[deriving(Eq)] pub enum StopBefore { Nothing, // compile everything Link, // --no-link LLVMCompileBitcode, // --emit-llvm without -S LLVMAssemble, // -S --emit-llvm Assemble, // -S without --emit-llvm Trans, // --no-trans Pretty, // --pretty Analysis, // --parse-only } impl Context { pub fn sysroot(&self) -> Path { self.sysroot.clone() } /// Debugging pub fn sysroot_str(&self) -> ~str { self.sysroot.as_str().unwrap().to_owned() } // Hack so that rustpkg can run either out of a rustc target dir, // or the host dir pub fn sysroot_to_use(&self) -> Path { if !in_target(&self.sysroot) { self.sysroot.clone() } else { let mut p = self.sysroot.clone(); p.pop(); p.pop(); p.pop(); p } } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.rustc_flags.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.rustc_flags.compile_upto } } /// We assume that if ../../rustc exists, then we're running /// rustpkg from a Rust target directory. This is part of a /// kludgy hack used to adjust the sysroot. pub fn in_target(sysroot: &Path) -> bool
impl RustcFlags { fn flag_strs(&self) -> ~[~str] { let linker_flag = match self.linker { Some(ref l) => ~[~"--linker", l.clone()], None => ~[] }; let link_args_flag = match self.link_args { Some(ref l) => ~[~"--link-args", l.clone()], None => ~[] }; let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] }; let target_flag = match self.target { Some(ref l) => ~[~"--target", l.clone()], None => ~[] }; let target_cpu_flag = match self.target_cpu { Some(ref l) => ~[~"--target-cpu", l.clone()], None => ~[] }; let z_flags = match self.experimental_features { Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]), None => ~[] }; linker_flag + link_args_flag + save_temps_flag + target_flag + target_cpu_flag + z_flags + (match self.compile_upto { LLVMCompileBitcode => ~[~"--emit-llvm"], LLVMAssemble => ~[~"--emit-llvm", ~"-S"], Link => ~[~"-c"], Trans => ~[~"--no-trans"], Assemble => ~[~"-S"], // n.b. Doesn't support all flavors of --pretty (yet) Pretty => ~[~"--pretty"], Analysis => ~[~"--parse-only"], Nothing => ~[] }) } pub fn default() -> RustcFlags { RustcFlags { linker: None, link_args: None, compile_upto: Nothing, optimization_level: No, save_temps: false, target: None, target_cpu: None, experimental_features: None } } } /// Returns true if any of the flags given are incompatible with the cmd pub fn flags_forbidden_for_cmd(flags: &RustcFlags, cfgs: &[~str], cmd: &str, user_supplied_opt_level: bool) -> bool { let complain = |s| { println!("The {} option can only be used with the `build` command: rustpkg [options..] build {} [package-ID]", s, s); }; if flags.linker.is_some() && cmd != "build" && cmd != "install" { io::println("The --linker option can only be used with the build or install commands."); return true; } if flags.link_args.is_some() && cmd != "build" && cmd != "install" { io::println("The --link-args option can only be used with the build or install commands."); return true; } if !cfgs.is_empty() && cmd != "build" && cmd != "install" { io::println("The --cfg option can only be used with the build or install commands."); return true; } if user_supplied_opt_level && cmd != "build" && cmd != "install" { io::println("The -O and --opt-level options can only be used with the build \ or install commands."); return true; } if flags.save_temps && cmd != "build" && cmd != "install" { io::println("The --save-temps option can only be used with the build \ or install commands."); return true; } if flags.target.is_some() && cmd != "build" && cmd != "install" { io::println("The --target option can only be used with the build \ or install commands."); return true; } if flags.target_cpu.is_some() && cmd != "build" && cmd != "install" { io::println("The --target-cpu option can only be used with the build \ or install commands."); return true; } if flags.experimental_features.is_some() && cmd != "build" && cmd != "install" { io::println("The -Z option can only be used with the build or install commands."); return true; } match flags.compile_upto { Link if cmd != "build" => { complain("--no-link"); true } Trans if cmd != "build" => { complain("--no-trans"); true } Assemble if cmd != "build" => { complain("-S"); true } Pretty if cmd != "build" => { complain("--pretty"); true } Analysis if cmd != "build" => { complain("--parse-only"); true } LLVMCompileBitcode if cmd != "build" => { complain("--emit-llvm"); true } LLVMAssemble if cmd != "build" => { complain("--emit-llvm"); true } _ => false } }
{ debug2!("Checking whether {} is in target", sysroot.display()); let mut p = sysroot.dir_path(); p.set_filename("rustc"); os::path_is_dir(&p) }
identifier_body
context.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Context data structure used by rustpkg use std::{io, os}; use extra::workcache; use rustc::driver::session::{OptLevel, No}; #[deriving(Clone)] pub struct
{ // Config strings that the user passed in with --cfg cfgs: ~[~str], // Flags to pass to rustc rustc_flags: RustcFlags, // If use_rust_path_hack is true, rustpkg searches for sources // in *package* directories that are in the RUST_PATH (for example, // FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where // rustpkg stores build artifacts. use_rust_path_hack: bool, // The root directory containing the Rust standard libraries sysroot: Path } #[deriving(Clone)] pub struct BuildContext { // Context for workcache workcache_context: workcache::Context, // Everything else context: Context } impl BuildContext { pub fn sysroot(&self) -> Path { self.context.sysroot.clone() } pub fn sysroot_to_use(&self) -> Path { self.context.sysroot_to_use() } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.context.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.context.compile_upto() } } /* Deliberately unsupported rustc flags: --bin, --lib inferred from crate file names -L inferred from extern mods --out-dir inferred from RUST_PATH --test use `rustpkg test` -v -h --ls don't make sense with rustpkg -W -A -D -F - use pragmas instead rustc flags that aren't implemented yet: --passes --llvm-arg --target-feature --android-cross-path */ pub struct RustcFlags { compile_upto: StopBefore, // Linker to use with the --linker flag linker: Option<~str>, // Extra arguments to pass to rustc with the --link-args flag link_args: Option<~str>, // Optimization level. 0 = default. -O = 2. optimization_level: OptLevel, // True if the user passed in --save-temps save_temps: bool, // Target (defaults to rustc's default target) target: Option<~str>, // Target CPU (defaults to rustc's default target CPU) target_cpu: Option<~str>, // Any -Z features experimental_features: Option<~[~str]> } impl Clone for RustcFlags { fn clone(&self) -> RustcFlags { RustcFlags { compile_upto: self.compile_upto, linker: self.linker.clone(), link_args: self.link_args.clone(), optimization_level: self.optimization_level, save_temps: self.save_temps, target: self.target.clone(), target_cpu: self.target_cpu.clone(), experimental_features: self.experimental_features.clone() } } } #[deriving(Eq)] pub enum StopBefore { Nothing, // compile everything Link, // --no-link LLVMCompileBitcode, // --emit-llvm without -S LLVMAssemble, // -S --emit-llvm Assemble, // -S without --emit-llvm Trans, // --no-trans Pretty, // --pretty Analysis, // --parse-only } impl Context { pub fn sysroot(&self) -> Path { self.sysroot.clone() } /// Debugging pub fn sysroot_str(&self) -> ~str { self.sysroot.as_str().unwrap().to_owned() } // Hack so that rustpkg can run either out of a rustc target dir, // or the host dir pub fn sysroot_to_use(&self) -> Path { if !in_target(&self.sysroot) { self.sysroot.clone() } else { let mut p = self.sysroot.clone(); p.pop(); p.pop(); p.pop(); p } } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.rustc_flags.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.rustc_flags.compile_upto } } /// We assume that if ../../rustc exists, then we're running /// rustpkg from a Rust target directory. This is part of a /// kludgy hack used to adjust the sysroot. pub fn in_target(sysroot: &Path) -> bool { debug2!("Checking whether {} is in target", sysroot.display()); let mut p = sysroot.dir_path(); p.set_filename("rustc"); os::path_is_dir(&p) } impl RustcFlags { fn flag_strs(&self) -> ~[~str] { let linker_flag = match self.linker { Some(ref l) => ~[~"--linker", l.clone()], None => ~[] }; let link_args_flag = match self.link_args { Some(ref l) => ~[~"--link-args", l.clone()], None => ~[] }; let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] }; let target_flag = match self.target { Some(ref l) => ~[~"--target", l.clone()], None => ~[] }; let target_cpu_flag = match self.target_cpu { Some(ref l) => ~[~"--target-cpu", l.clone()], None => ~[] }; let z_flags = match self.experimental_features { Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]), None => ~[] }; linker_flag + link_args_flag + save_temps_flag + target_flag + target_cpu_flag + z_flags + (match self.compile_upto { LLVMCompileBitcode => ~[~"--emit-llvm"], LLVMAssemble => ~[~"--emit-llvm", ~"-S"], Link => ~[~"-c"], Trans => ~[~"--no-trans"], Assemble => ~[~"-S"], // n.b. Doesn't support all flavors of --pretty (yet) Pretty => ~[~"--pretty"], Analysis => ~[~"--parse-only"], Nothing => ~[] }) } pub fn default() -> RustcFlags { RustcFlags { linker: None, link_args: None, compile_upto: Nothing, optimization_level: No, save_temps: false, target: None, target_cpu: None, experimental_features: None } } } /// Returns true if any of the flags given are incompatible with the cmd pub fn flags_forbidden_for_cmd(flags: &RustcFlags, cfgs: &[~str], cmd: &str, user_supplied_opt_level: bool) -> bool { let complain = |s| { println!("The {} option can only be used with the `build` command: rustpkg [options..] build {} [package-ID]", s, s); }; if flags.linker.is_some() && cmd != "build" && cmd != "install" { io::println("The --linker option can only be used with the build or install commands."); return true; } if flags.link_args.is_some() && cmd != "build" && cmd != "install" { io::println("The --link-args option can only be used with the build or install commands."); return true; } if !cfgs.is_empty() && cmd != "build" && cmd != "install" { io::println("The --cfg option can only be used with the build or install commands."); return true; } if user_supplied_opt_level && cmd != "build" && cmd != "install" { io::println("The -O and --opt-level options can only be used with the build \ or install commands."); return true; } if flags.save_temps && cmd != "build" && cmd != "install" { io::println("The --save-temps option can only be used with the build \ or install commands."); return true; } if flags.target.is_some() && cmd != "build" && cmd != "install" { io::println("The --target option can only be used with the build \ or install commands."); return true; } if flags.target_cpu.is_some() && cmd != "build" && cmd != "install" { io::println("The --target-cpu option can only be used with the build \ or install commands."); return true; } if flags.experimental_features.is_some() && cmd != "build" && cmd != "install" { io::println("The -Z option can only be used with the build or install commands."); return true; } match flags.compile_upto { Link if cmd != "build" => { complain("--no-link"); true } Trans if cmd != "build" => { complain("--no-trans"); true } Assemble if cmd != "build" => { complain("-S"); true } Pretty if cmd != "build" => { complain("--pretty"); true } Analysis if cmd != "build" => { complain("--parse-only"); true } LLVMCompileBitcode if cmd != "build" => { complain("--emit-llvm"); true } LLVMAssemble if cmd != "build" => { complain("--emit-llvm"); true } _ => false } }
Context
identifier_name
context.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Context data structure used by rustpkg use std::{io, os};
#[deriving(Clone)] pub struct Context { // Config strings that the user passed in with --cfg cfgs: ~[~str], // Flags to pass to rustc rustc_flags: RustcFlags, // If use_rust_path_hack is true, rustpkg searches for sources // in *package* directories that are in the RUST_PATH (for example, // FOO/src/bar-0.1 instead of FOO). The flag doesn't affect where // rustpkg stores build artifacts. use_rust_path_hack: bool, // The root directory containing the Rust standard libraries sysroot: Path } #[deriving(Clone)] pub struct BuildContext { // Context for workcache workcache_context: workcache::Context, // Everything else context: Context } impl BuildContext { pub fn sysroot(&self) -> Path { self.context.sysroot.clone() } pub fn sysroot_to_use(&self) -> Path { self.context.sysroot_to_use() } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.context.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.context.compile_upto() } } /* Deliberately unsupported rustc flags: --bin, --lib inferred from crate file names -L inferred from extern mods --out-dir inferred from RUST_PATH --test use `rustpkg test` -v -h --ls don't make sense with rustpkg -W -A -D -F - use pragmas instead rustc flags that aren't implemented yet: --passes --llvm-arg --target-feature --android-cross-path */ pub struct RustcFlags { compile_upto: StopBefore, // Linker to use with the --linker flag linker: Option<~str>, // Extra arguments to pass to rustc with the --link-args flag link_args: Option<~str>, // Optimization level. 0 = default. -O = 2. optimization_level: OptLevel, // True if the user passed in --save-temps save_temps: bool, // Target (defaults to rustc's default target) target: Option<~str>, // Target CPU (defaults to rustc's default target CPU) target_cpu: Option<~str>, // Any -Z features experimental_features: Option<~[~str]> } impl Clone for RustcFlags { fn clone(&self) -> RustcFlags { RustcFlags { compile_upto: self.compile_upto, linker: self.linker.clone(), link_args: self.link_args.clone(), optimization_level: self.optimization_level, save_temps: self.save_temps, target: self.target.clone(), target_cpu: self.target_cpu.clone(), experimental_features: self.experimental_features.clone() } } } #[deriving(Eq)] pub enum StopBefore { Nothing, // compile everything Link, // --no-link LLVMCompileBitcode, // --emit-llvm without -S LLVMAssemble, // -S --emit-llvm Assemble, // -S without --emit-llvm Trans, // --no-trans Pretty, // --pretty Analysis, // --parse-only } impl Context { pub fn sysroot(&self) -> Path { self.sysroot.clone() } /// Debugging pub fn sysroot_str(&self) -> ~str { self.sysroot.as_str().unwrap().to_owned() } // Hack so that rustpkg can run either out of a rustc target dir, // or the host dir pub fn sysroot_to_use(&self) -> Path { if !in_target(&self.sysroot) { self.sysroot.clone() } else { let mut p = self.sysroot.clone(); p.pop(); p.pop(); p.pop(); p } } /// Returns the flags to pass to rustc, as a vector of strings pub fn flag_strs(&self) -> ~[~str] { self.rustc_flags.flag_strs() } pub fn compile_upto(&self) -> StopBefore { self.rustc_flags.compile_upto } } /// We assume that if ../../rustc exists, then we're running /// rustpkg from a Rust target directory. This is part of a /// kludgy hack used to adjust the sysroot. pub fn in_target(sysroot: &Path) -> bool { debug2!("Checking whether {} is in target", sysroot.display()); let mut p = sysroot.dir_path(); p.set_filename("rustc"); os::path_is_dir(&p) } impl RustcFlags { fn flag_strs(&self) -> ~[~str] { let linker_flag = match self.linker { Some(ref l) => ~[~"--linker", l.clone()], None => ~[] }; let link_args_flag = match self.link_args { Some(ref l) => ~[~"--link-args", l.clone()], None => ~[] }; let save_temps_flag = if self.save_temps { ~[~"--save-temps"] } else { ~[] }; let target_flag = match self.target { Some(ref l) => ~[~"--target", l.clone()], None => ~[] }; let target_cpu_flag = match self.target_cpu { Some(ref l) => ~[~"--target-cpu", l.clone()], None => ~[] }; let z_flags = match self.experimental_features { Some(ref ls) => ls.flat_map(|s| ~[~"-Z", s.clone()]), None => ~[] }; linker_flag + link_args_flag + save_temps_flag + target_flag + target_cpu_flag + z_flags + (match self.compile_upto { LLVMCompileBitcode => ~[~"--emit-llvm"], LLVMAssemble => ~[~"--emit-llvm", ~"-S"], Link => ~[~"-c"], Trans => ~[~"--no-trans"], Assemble => ~[~"-S"], // n.b. Doesn't support all flavors of --pretty (yet) Pretty => ~[~"--pretty"], Analysis => ~[~"--parse-only"], Nothing => ~[] }) } pub fn default() -> RustcFlags { RustcFlags { linker: None, link_args: None, compile_upto: Nothing, optimization_level: No, save_temps: false, target: None, target_cpu: None, experimental_features: None } } } /// Returns true if any of the flags given are incompatible with the cmd pub fn flags_forbidden_for_cmd(flags: &RustcFlags, cfgs: &[~str], cmd: &str, user_supplied_opt_level: bool) -> bool { let complain = |s| { println!("The {} option can only be used with the `build` command: rustpkg [options..] build {} [package-ID]", s, s); }; if flags.linker.is_some() && cmd != "build" && cmd != "install" { io::println("The --linker option can only be used with the build or install commands."); return true; } if flags.link_args.is_some() && cmd != "build" && cmd != "install" { io::println("The --link-args option can only be used with the build or install commands."); return true; } if !cfgs.is_empty() && cmd != "build" && cmd != "install" { io::println("The --cfg option can only be used with the build or install commands."); return true; } if user_supplied_opt_level && cmd != "build" && cmd != "install" { io::println("The -O and --opt-level options can only be used with the build \ or install commands."); return true; } if flags.save_temps && cmd != "build" && cmd != "install" { io::println("The --save-temps option can only be used with the build \ or install commands."); return true; } if flags.target.is_some() && cmd != "build" && cmd != "install" { io::println("The --target option can only be used with the build \ or install commands."); return true; } if flags.target_cpu.is_some() && cmd != "build" && cmd != "install" { io::println("The --target-cpu option can only be used with the build \ or install commands."); return true; } if flags.experimental_features.is_some() && cmd != "build" && cmd != "install" { io::println("The -Z option can only be used with the build or install commands."); return true; } match flags.compile_upto { Link if cmd != "build" => { complain("--no-link"); true } Trans if cmd != "build" => { complain("--no-trans"); true } Assemble if cmd != "build" => { complain("-S"); true } Pretty if cmd != "build" => { complain("--pretty"); true } Analysis if cmd != "build" => { complain("--parse-only"); true } LLVMCompileBitcode if cmd != "build" => { complain("--emit-llvm"); true } LLVMAssemble if cmd != "build" => { complain("--emit-llvm"); true } _ => false } }
use extra::workcache; use rustc::driver::session::{OptLevel, No};
random_line_split
ComplexLine.py
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # This file is part of the E-Cell System # # Copyright (C) 1996-2016 Keio University # Copyright (C) 2008-2016 RIKEN # Copyright (C) 2005-2009 The Molecular Sciences Institute # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # # E-Cell System is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # E-Cell System is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with E-Cell System -- see the file COPYING. # If not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #END_HEADER try: import gnomecanvas except: import gnome.canvas as gnomecanvas from ecell.ui.model_editor.Constants import * from ecell.ui.model_editor.Utils import * from ecell.ui.model_editor.ResizeableText import * class ComplexLine: def __init__( self, anObject, aCanvas ): self.theCanvas = aCanvas self.parentObject = anObject self.graphUtils = self.parentObject.getGraphUtils() self.shapeMap = {} self.lastmousex = 0 self.lastmousey = 0 self.buttonpressed = False self.firstdrag=False def show ( self ): self.theRoot = self.parentObject.theCanvas.getRoot() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.__sortByZOrder( self.shapeDescriptorList ) self.isSelected = False for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.createText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.createLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.createBpath( aDescriptor ) self.isSelected = False def repaint ( self ): self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.__sortByZOrder( self.shapeDescriptorList ) for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.redrawText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.redrawLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.redrawBpath( aDescriptor ) def reName( self ): self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) ) aDescriptor = self.shapeDescriptorList["textbox"] self.renameText( aDescriptor ) def delete( self ): for aShapeName in self.shapeMap.keys(): self.shapeMap[ aShapeName ].destroy() def selected( self ): self.isSelected = True def unselected( self ): self.isSelected = False def outlineColorChanged( self ): self.fillColorChanged() def fillColorChanged( self ): # find shapes with outline color anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) ) if self.isSelected: for i in range(0,3): anRGB[i] = 32768 + anRGB[i] for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[ SD_COLOR ] == SD_FILL: aColor = self.graphUtils.getGdkColorByRGB( anRGB ) if aDescriptor[SD_TYPE] in CV_LINE: self.changeLineColor( aDescriptor[ SD_NAME ] , aColor ) elif aDescriptor[SD_TYPE] in CV_BPATH: self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor ) def createBpath(self, aDescriptor): aSpecific= aDescriptor[SD_SPECIFIC] # get pathdef pathdef= aSpecific[BPATH_PATHDEF] pd = gnomecanvas.path_def_new(pathdef) aGdkColor = self.getGdkColor( aDescriptor ) #cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black) bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3, outline_color_gdk = aGdkColor) bpath.set_bpath(pd) self.addHandlers( bpath, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath #cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla.. #regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all #but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI def createLine( self, aDescriptor ): lineSpec = aDescriptor[SD_SPECIFIC] ( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ]
aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 ) self.addHandlers( aLine, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine def changeLineColor ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('fill_color_gdk', aColor ) def changeLineColorB ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('outline_color_gdk', aColor ) def createText( self, aDescriptor ): textSpec = aDescriptor[SD_SPECIFIC] (X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] ) aGdkColor = self.getGdkColor( aDescriptor ) aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW ) self.addHandlers( aText, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText def redrawLine( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x1 = aSpecific[0] y1 = aSpecific[1] x2 = aSpecific[2] y2 = aSpecific[3] hasFirstArrow = aSpecific[4] hasLastArrow = aSpecific[5] aShape.set_property( 'points', (x1, y1, x2, y2) ) aShape.set_property('first_arrowhead', hasFirstArrow ) aShape.set_property('last_arrowhead', hasLastArrow ) def redrawBpath( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF] pd=gnomecanvas.path_def_new(pathdef) aShape.set_bpath(pd) def redrawText( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x = aSpecific[TEXT_ABSX] y = aSpecific[TEXT_ABSY] aShape.set_property( 'x', x ) aShape.set_property( 'y', y ) def renameText (self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] label = aSpecific[ TEXT_TEXT ] aShape.set_property( 'text', label ) def getGdkColor( self, aDescriptor ): aColorType = aDescriptor[ SD_COLOR ] if aColorType == SD_FILL: queryProp = OB_FILL_COLOR elif aColorType == CV_TEXT: queryProp = OB_TEXT_COLOR anRGBColor = self.parentObject.getProperty( queryProp ) return self.graphUtils.getGdkColorByRGB( anRGBColor ) def __sortByZOrder ( self, desclist ): keys = desclist.keys() fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] ) keys.sort(fn) def leftClick( self, shapeName, x, y, shift_pressed = False ): # usually select self.parentObject.doSelect( shift_pressed ) if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: self.changeCursor( shapeName, x, y, True ) def rightClick ( self, shapeName, x, y, anEvent, shift ): # usually show menu if not self.parentObject.isSelected: self.parentObject.doSelect( shift ) self.parentObject.showMenu( anEvent) def getFirstDrag(self): return self.firstdrag def setFirstDrag(self,aValue): self.firstdrag=aValue def mouseDrag( self, shapeName, deltax, deltay, origx, origy ): # decide whether resize or move or draw arrow if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE: ''' if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE: self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE) #Accessing BPATH_DEF now, the coords like above bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF] self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk bpathDefcheCk[1][1] = deltax bpathDefcheCk[1][2] = deltay bpathDefcheCk[1][3] = deltax bpathDefcheCk[1][4] = deltay bpathDefcheCk[2][1] = deltax bpathDefcheCk[2][2] = deltay bpathDefcheCk[2][3] = deltax bpathDefcheCk[2][4] = deltay #bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay] ''' elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: if not self.firstdrag: self.firstdrag=True self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy) def checkConnection( self ): self.parentObject.checkConnection() def doubleClick( self, shapeName ): self.parentObject.popupEditor() def getShapeDescriptor( self, shapeName ): return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName ) def addHandlers( self, canvasObject, aName ): canvasObject.connect('event', self.rect_event, aName ) def releaseButton( self, shapeName, x, y ): self.changeCursor( shapeName, x, y, False ) self.parentObject.mouseReleased( shapeName,x, y) def mouseEntered( self, shapeName, x, y ): self.changeCursor( shapeName, x, y ) def changeCursor( self, shapeName, x, y, buttonpressed = False): aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION] aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed) self.theCanvas.setCursor( aCursorType ) def rect_event( self, *args ): event = args[1] item = args[0] shapeName = args[2] if event.type == gtk.gdk.BUTTON_PRESS: if event.state&gtk.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK: shift_press = True else: shift_press = False if event.button == 1: self.lastmousex = event.x self.lastmousey = event.y self.buttonpressed = True self.leftClick( shapeName, event.x, event.y, shift_press ) elif event.button == 3: self.rightClick(shapeName, event.x, event.y, event, shift_press ) elif event.type == gtk.gdk.BUTTON_RELEASE: if event.button == 1: self.buttonpressed = False self.releaseButton(shapeName, event.x, event.y ) elif event.type == gtk.gdk.MOTION_NOTIFY: self.buttonpressed=(event.state&gtk.gdk.BUTTON1_MASK)>0 if not self.buttonpressed: return oldx = self.lastmousex oldy = self.lastmousey deltax = event.x - oldx deltay = event.y - oldy self.lastmousex = event.x self.lastmousey = event.y self.mouseDrag( shapeName, deltax, deltay, oldx, oldy ) elif event.type == gtk.gdk._2BUTTON_PRESS: if event.button == 1: self.doubleClick( shapeName ) elif event.type == gtk.gdk.ENTER_NOTIFY: self.mouseEntered( shapeName, event.x, event.y )
aGdkColor = self.getGdkColor( aDescriptor ) firstArrow = lineSpec[4] secondArrow = lineSpec[5]
random_line_split
ComplexLine.py
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # This file is part of the E-Cell System # # Copyright (C) 1996-2016 Keio University # Copyright (C) 2008-2016 RIKEN # Copyright (C) 2005-2009 The Molecular Sciences Institute # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # # E-Cell System is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # E-Cell System is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with E-Cell System -- see the file COPYING. # If not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #END_HEADER try: import gnomecanvas except: import gnome.canvas as gnomecanvas from ecell.ui.model_editor.Constants import * from ecell.ui.model_editor.Utils import * from ecell.ui.model_editor.ResizeableText import * class ComplexLine: def __init__( self, anObject, aCanvas ): self.theCanvas = aCanvas self.parentObject = anObject self.graphUtils = self.parentObject.getGraphUtils() self.shapeMap = {} self.lastmousex = 0 self.lastmousey = 0 self.buttonpressed = False self.firstdrag=False def show ( self ): self.theRoot = self.parentObject.theCanvas.getRoot() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.__sortByZOrder( self.shapeDescriptorList ) self.isSelected = False for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.createText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.createLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.createBpath( aDescriptor ) self.isSelected = False def repaint ( self ): self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.__sortByZOrder( self.shapeDescriptorList ) for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT:
elif aDescriptor[SD_TYPE] == CV_LINE: self.redrawLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.redrawBpath( aDescriptor ) def reName( self ): self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) ) aDescriptor = self.shapeDescriptorList["textbox"] self.renameText( aDescriptor ) def delete( self ): for aShapeName in self.shapeMap.keys(): self.shapeMap[ aShapeName ].destroy() def selected( self ): self.isSelected = True def unselected( self ): self.isSelected = False def outlineColorChanged( self ): self.fillColorChanged() def fillColorChanged( self ): # find shapes with outline color anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) ) if self.isSelected: for i in range(0,3): anRGB[i] = 32768 + anRGB[i] for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[ SD_COLOR ] == SD_FILL: aColor = self.graphUtils.getGdkColorByRGB( anRGB ) if aDescriptor[SD_TYPE] in CV_LINE: self.changeLineColor( aDescriptor[ SD_NAME ] , aColor ) elif aDescriptor[SD_TYPE] in CV_BPATH: self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor ) def createBpath(self, aDescriptor): aSpecific= aDescriptor[SD_SPECIFIC] # get pathdef pathdef= aSpecific[BPATH_PATHDEF] pd = gnomecanvas.path_def_new(pathdef) aGdkColor = self.getGdkColor( aDescriptor ) #cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black) bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3, outline_color_gdk = aGdkColor) bpath.set_bpath(pd) self.addHandlers( bpath, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath #cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla.. #regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all #but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI def createLine( self, aDescriptor ): lineSpec = aDescriptor[SD_SPECIFIC] ( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ] aGdkColor = self.getGdkColor( aDescriptor ) firstArrow = lineSpec[4] secondArrow = lineSpec[5] aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 ) self.addHandlers( aLine, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine def changeLineColor ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('fill_color_gdk', aColor ) def changeLineColorB ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('outline_color_gdk', aColor ) def createText( self, aDescriptor ): textSpec = aDescriptor[SD_SPECIFIC] (X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] ) aGdkColor = self.getGdkColor( aDescriptor ) aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW ) self.addHandlers( aText, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText def redrawLine( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x1 = aSpecific[0] y1 = aSpecific[1] x2 = aSpecific[2] y2 = aSpecific[3] hasFirstArrow = aSpecific[4] hasLastArrow = aSpecific[5] aShape.set_property( 'points', (x1, y1, x2, y2) ) aShape.set_property('first_arrowhead', hasFirstArrow ) aShape.set_property('last_arrowhead', hasLastArrow ) def redrawBpath( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF] pd=gnomecanvas.path_def_new(pathdef) aShape.set_bpath(pd) def redrawText( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x = aSpecific[TEXT_ABSX] y = aSpecific[TEXT_ABSY] aShape.set_property( 'x', x ) aShape.set_property( 'y', y ) def renameText (self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] label = aSpecific[ TEXT_TEXT ] aShape.set_property( 'text', label ) def getGdkColor( self, aDescriptor ): aColorType = aDescriptor[ SD_COLOR ] if aColorType == SD_FILL: queryProp = OB_FILL_COLOR elif aColorType == CV_TEXT: queryProp = OB_TEXT_COLOR anRGBColor = self.parentObject.getProperty( queryProp ) return self.graphUtils.getGdkColorByRGB( anRGBColor ) def __sortByZOrder ( self, desclist ): keys = desclist.keys() fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] ) keys.sort(fn) def leftClick( self, shapeName, x, y, shift_pressed = False ): # usually select self.parentObject.doSelect( shift_pressed ) if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: self.changeCursor( shapeName, x, y, True ) def rightClick ( self, shapeName, x, y, anEvent, shift ): # usually show menu if not self.parentObject.isSelected: self.parentObject.doSelect( shift ) self.parentObject.showMenu( anEvent) def getFirstDrag(self): return self.firstdrag def setFirstDrag(self,aValue): self.firstdrag=aValue def mouseDrag( self, shapeName, deltax, deltay, origx, origy ): # decide whether resize or move or draw arrow if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE: ''' if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE: self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE) #Accessing BPATH_DEF now, the coords like above bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF] self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk bpathDefcheCk[1][1] = deltax bpathDefcheCk[1][2] = deltay bpathDefcheCk[1][3] = deltax bpathDefcheCk[1][4] = deltay bpathDefcheCk[2][1] = deltax bpathDefcheCk[2][2] = deltay bpathDefcheCk[2][3] = deltax bpathDefcheCk[2][4] = deltay #bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay] ''' elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: if not self.firstdrag: self.firstdrag=True self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy) def checkConnection( self ): self.parentObject.checkConnection() def doubleClick( self, shapeName ): self.parentObject.popupEditor() def getShapeDescriptor( self, shapeName ): return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName ) def addHandlers( self, canvasObject, aName ): canvasObject.connect('event', self.rect_event, aName ) def releaseButton( self, shapeName, x, y ): self.changeCursor( shapeName, x, y, False ) self.parentObject.mouseReleased( shapeName,x, y) def mouseEntered( self, shapeName, x, y ): self.changeCursor( shapeName, x, y ) def changeCursor( self, shapeName, x, y, buttonpressed = False): aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION] aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed) self.theCanvas.setCursor( aCursorType ) def rect_event( self, *args ): event = args[1] item = args[0] shapeName = args[2] if event.type == gtk.gdk.BUTTON_PRESS: if event.state&gtk.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK: shift_press = True else: shift_press = False if event.button == 1: self.lastmousex = event.x self.lastmousey = event.y self.buttonpressed = True self.leftClick( shapeName, event.x, event.y, shift_press ) elif event.button == 3: self.rightClick(shapeName, event.x, event.y, event, shift_press ) elif event.type == gtk.gdk.BUTTON_RELEASE: if event.button == 1: self.buttonpressed = False self.releaseButton(shapeName, event.x, event.y ) elif event.type == gtk.gdk.MOTION_NOTIFY: self.buttonpressed=(event.state&gtk.gdk.BUTTON1_MASK)>0 if not self.buttonpressed: return oldx = self.lastmousex oldy = self.lastmousey deltax = event.x - oldx deltay = event.y - oldy self.lastmousex = event.x self.lastmousey = event.y self.mouseDrag( shapeName, deltax, deltay, oldx, oldy ) elif event.type == gtk.gdk._2BUTTON_PRESS: if event.button == 1: self.doubleClick( shapeName ) elif event.type == gtk.gdk.ENTER_NOTIFY: self.mouseEntered( shapeName, event.x, event.y )
self.redrawText( aDescriptor )
conditional_block
ComplexLine.py
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # This file is part of the E-Cell System # # Copyright (C) 1996-2016 Keio University # Copyright (C) 2008-2016 RIKEN # Copyright (C) 2005-2009 The Molecular Sciences Institute # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # # E-Cell System is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # E-Cell System is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with E-Cell System -- see the file COPYING. # If not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #END_HEADER try: import gnomecanvas except: import gnome.canvas as gnomecanvas from ecell.ui.model_editor.Constants import * from ecell.ui.model_editor.Utils import * from ecell.ui.model_editor.ResizeableText import * class ComplexLine: def __init__( self, anObject, aCanvas ): self.theCanvas = aCanvas self.parentObject = anObject self.graphUtils = self.parentObject.getGraphUtils() self.shapeMap = {} self.lastmousex = 0 self.lastmousey = 0 self.buttonpressed = False self.firstdrag=False def show ( self ): self.theRoot = self.parentObject.theCanvas.getRoot() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.__sortByZOrder( self.shapeDescriptorList ) self.isSelected = False for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.createText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.createLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.createBpath( aDescriptor ) self.isSelected = False def repaint ( self ): self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.__sortByZOrder( self.shapeDescriptorList ) for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.redrawText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.redrawLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.redrawBpath( aDescriptor ) def reName( self ): self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) ) aDescriptor = self.shapeDescriptorList["textbox"] self.renameText( aDescriptor ) def delete( self ): for aShapeName in self.shapeMap.keys(): self.shapeMap[ aShapeName ].destroy() def selected( self ): self.isSelected = True def unselected( self ): self.isSelected = False def outlineColorChanged( self ): self.fillColorChanged() def fillColorChanged( self ): # find shapes with outline color anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) ) if self.isSelected: for i in range(0,3): anRGB[i] = 32768 + anRGB[i] for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[ SD_COLOR ] == SD_FILL: aColor = self.graphUtils.getGdkColorByRGB( anRGB ) if aDescriptor[SD_TYPE] in CV_LINE: self.changeLineColor( aDescriptor[ SD_NAME ] , aColor ) elif aDescriptor[SD_TYPE] in CV_BPATH: self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor ) def
(self, aDescriptor): aSpecific= aDescriptor[SD_SPECIFIC] # get pathdef pathdef= aSpecific[BPATH_PATHDEF] pd = gnomecanvas.path_def_new(pathdef) aGdkColor = self.getGdkColor( aDescriptor ) #cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black) bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3, outline_color_gdk = aGdkColor) bpath.set_bpath(pd) self.addHandlers( bpath, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath #cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla.. #regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all #but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI def createLine( self, aDescriptor ): lineSpec = aDescriptor[SD_SPECIFIC] ( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ] aGdkColor = self.getGdkColor( aDescriptor ) firstArrow = lineSpec[4] secondArrow = lineSpec[5] aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 ) self.addHandlers( aLine, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine def changeLineColor ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('fill_color_gdk', aColor ) def changeLineColorB ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('outline_color_gdk', aColor ) def createText( self, aDescriptor ): textSpec = aDescriptor[SD_SPECIFIC] (X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] ) aGdkColor = self.getGdkColor( aDescriptor ) aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW ) self.addHandlers( aText, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText def redrawLine( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x1 = aSpecific[0] y1 = aSpecific[1] x2 = aSpecific[2] y2 = aSpecific[3] hasFirstArrow = aSpecific[4] hasLastArrow = aSpecific[5] aShape.set_property( 'points', (x1, y1, x2, y2) ) aShape.set_property('first_arrowhead', hasFirstArrow ) aShape.set_property('last_arrowhead', hasLastArrow ) def redrawBpath( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF] pd=gnomecanvas.path_def_new(pathdef) aShape.set_bpath(pd) def redrawText( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x = aSpecific[TEXT_ABSX] y = aSpecific[TEXT_ABSY] aShape.set_property( 'x', x ) aShape.set_property( 'y', y ) def renameText (self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] label = aSpecific[ TEXT_TEXT ] aShape.set_property( 'text', label ) def getGdkColor( self, aDescriptor ): aColorType = aDescriptor[ SD_COLOR ] if aColorType == SD_FILL: queryProp = OB_FILL_COLOR elif aColorType == CV_TEXT: queryProp = OB_TEXT_COLOR anRGBColor = self.parentObject.getProperty( queryProp ) return self.graphUtils.getGdkColorByRGB( anRGBColor ) def __sortByZOrder ( self, desclist ): keys = desclist.keys() fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] ) keys.sort(fn) def leftClick( self, shapeName, x, y, shift_pressed = False ): # usually select self.parentObject.doSelect( shift_pressed ) if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: self.changeCursor( shapeName, x, y, True ) def rightClick ( self, shapeName, x, y, anEvent, shift ): # usually show menu if not self.parentObject.isSelected: self.parentObject.doSelect( shift ) self.parentObject.showMenu( anEvent) def getFirstDrag(self): return self.firstdrag def setFirstDrag(self,aValue): self.firstdrag=aValue def mouseDrag( self, shapeName, deltax, deltay, origx, origy ): # decide whether resize or move or draw arrow if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE: ''' if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE: self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE) #Accessing BPATH_DEF now, the coords like above bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF] self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk bpathDefcheCk[1][1] = deltax bpathDefcheCk[1][2] = deltay bpathDefcheCk[1][3] = deltax bpathDefcheCk[1][4] = deltay bpathDefcheCk[2][1] = deltax bpathDefcheCk[2][2] = deltay bpathDefcheCk[2][3] = deltax bpathDefcheCk[2][4] = deltay #bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay] ''' elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: if not self.firstdrag: self.firstdrag=True self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy) def checkConnection( self ): self.parentObject.checkConnection() def doubleClick( self, shapeName ): self.parentObject.popupEditor() def getShapeDescriptor( self, shapeName ): return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName ) def addHandlers( self, canvasObject, aName ): canvasObject.connect('event', self.rect_event, aName ) def releaseButton( self, shapeName, x, y ): self.changeCursor( shapeName, x, y, False ) self.parentObject.mouseReleased( shapeName,x, y) def mouseEntered( self, shapeName, x, y ): self.changeCursor( shapeName, x, y ) def changeCursor( self, shapeName, x, y, buttonpressed = False): aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION] aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed) self.theCanvas.setCursor( aCursorType ) def rect_event( self, *args ): event = args[1] item = args[0] shapeName = args[2] if event.type == gtk.gdk.BUTTON_PRESS: if event.state&gtk.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK: shift_press = True else: shift_press = False if event.button == 1: self.lastmousex = event.x self.lastmousey = event.y self.buttonpressed = True self.leftClick( shapeName, event.x, event.y, shift_press ) elif event.button == 3: self.rightClick(shapeName, event.x, event.y, event, shift_press ) elif event.type == gtk.gdk.BUTTON_RELEASE: if event.button == 1: self.buttonpressed = False self.releaseButton(shapeName, event.x, event.y ) elif event.type == gtk.gdk.MOTION_NOTIFY: self.buttonpressed=(event.state&gtk.gdk.BUTTON1_MASK)>0 if not self.buttonpressed: return oldx = self.lastmousex oldy = self.lastmousey deltax = event.x - oldx deltay = event.y - oldy self.lastmousex = event.x self.lastmousey = event.y self.mouseDrag( shapeName, deltax, deltay, oldx, oldy ) elif event.type == gtk.gdk._2BUTTON_PRESS: if event.button == 1: self.doubleClick( shapeName ) elif event.type == gtk.gdk.ENTER_NOTIFY: self.mouseEntered( shapeName, event.x, event.y )
createBpath
identifier_name
ComplexLine.py
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # This file is part of the E-Cell System # # Copyright (C) 1996-2016 Keio University # Copyright (C) 2008-2016 RIKEN # Copyright (C) 2005-2009 The Molecular Sciences Institute # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # # E-Cell System is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # E-Cell System is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with E-Cell System -- see the file COPYING. # If not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #END_HEADER try: import gnomecanvas except: import gnome.canvas as gnomecanvas from ecell.ui.model_editor.Constants import * from ecell.ui.model_editor.Utils import * from ecell.ui.model_editor.ResizeableText import * class ComplexLine: def __init__( self, anObject, aCanvas ): self.theCanvas = aCanvas self.parentObject = anObject self.graphUtils = self.parentObject.getGraphUtils() self.shapeMap = {} self.lastmousex = 0 self.lastmousey = 0 self.buttonpressed = False self.firstdrag=False def show ( self ): self.theRoot = self.parentObject.theCanvas.getRoot() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.__sortByZOrder( self.shapeDescriptorList ) self.isSelected = False for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.createText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.createLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.createBpath( aDescriptor ) self.isSelected = False def repaint ( self ): self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate() self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.__sortByZOrder( self.shapeDescriptorList ) for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[SD_TYPE] == CV_TEXT: self.redrawText( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_LINE: self.redrawLine( aDescriptor ) elif aDescriptor[SD_TYPE] == CV_BPATH: self.redrawBpath( aDescriptor ) def reName( self ): self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList() self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) ) aDescriptor = self.shapeDescriptorList["textbox"] self.renameText( aDescriptor ) def delete( self ):
def selected( self ): self.isSelected = True def unselected( self ): self.isSelected = False def outlineColorChanged( self ): self.fillColorChanged() def fillColorChanged( self ): # find shapes with outline color anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) ) if self.isSelected: for i in range(0,3): anRGB[i] = 32768 + anRGB[i] for aKey in self.shapeDescriptorList.keys(): aDescriptor = self.shapeDescriptorList[aKey] if aDescriptor[ SD_COLOR ] == SD_FILL: aColor = self.graphUtils.getGdkColorByRGB( anRGB ) if aDescriptor[SD_TYPE] in CV_LINE: self.changeLineColor( aDescriptor[ SD_NAME ] , aColor ) elif aDescriptor[SD_TYPE] in CV_BPATH: self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor ) def createBpath(self, aDescriptor): aSpecific= aDescriptor[SD_SPECIFIC] # get pathdef pathdef= aSpecific[BPATH_PATHDEF] pd = gnomecanvas.path_def_new(pathdef) aGdkColor = self.getGdkColor( aDescriptor ) #cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black) bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3, outline_color_gdk = aGdkColor) bpath.set_bpath(pd) self.addHandlers( bpath, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath #cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla.. #regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all #but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI def createLine( self, aDescriptor ): lineSpec = aDescriptor[SD_SPECIFIC] ( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ] aGdkColor = self.getGdkColor( aDescriptor ) firstArrow = lineSpec[4] secondArrow = lineSpec[5] aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 ) self.addHandlers( aLine, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine def changeLineColor ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('fill_color_gdk', aColor ) def changeLineColorB ( self, shapeName, aColor ): aShape = self.shapeMap[ shapeName ] aShape.set_property('outline_color_gdk', aColor ) def createText( self, aDescriptor ): textSpec = aDescriptor[SD_SPECIFIC] (X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] ) aGdkColor = self.getGdkColor( aDescriptor ) aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW ) self.addHandlers( aText, aDescriptor[ SD_NAME ] ) self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText def redrawLine( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x1 = aSpecific[0] y1 = aSpecific[1] x2 = aSpecific[2] y2 = aSpecific[3] hasFirstArrow = aSpecific[4] hasLastArrow = aSpecific[5] aShape.set_property( 'points', (x1, y1, x2, y2) ) aShape.set_property('first_arrowhead', hasFirstArrow ) aShape.set_property('last_arrowhead', hasLastArrow ) def redrawBpath( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF] pd=gnomecanvas.path_def_new(pathdef) aShape.set_bpath(pd) def redrawText( self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] x = aSpecific[TEXT_ABSX] y = aSpecific[TEXT_ABSY] aShape.set_property( 'x', x ) aShape.set_property( 'y', y ) def renameText (self, aDescriptor ): aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ] aSpecific = aDescriptor[ SD_SPECIFIC ] label = aSpecific[ TEXT_TEXT ] aShape.set_property( 'text', label ) def getGdkColor( self, aDescriptor ): aColorType = aDescriptor[ SD_COLOR ] if aColorType == SD_FILL: queryProp = OB_FILL_COLOR elif aColorType == CV_TEXT: queryProp = OB_TEXT_COLOR anRGBColor = self.parentObject.getProperty( queryProp ) return self.graphUtils.getGdkColorByRGB( anRGBColor ) def __sortByZOrder ( self, desclist ): keys = desclist.keys() fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] ) keys.sort(fn) def leftClick( self, shapeName, x, y, shift_pressed = False ): # usually select self.parentObject.doSelect( shift_pressed ) if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: self.changeCursor( shapeName, x, y, True ) def rightClick ( self, shapeName, x, y, anEvent, shift ): # usually show menu if not self.parentObject.isSelected: self.parentObject.doSelect( shift ) self.parentObject.showMenu( anEvent) def getFirstDrag(self): return self.firstdrag def setFirstDrag(self,aValue): self.firstdrag=aValue def mouseDrag( self, shapeName, deltax, deltay, origx, origy ): # decide whether resize or move or draw arrow if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE: ''' if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE: self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE) #Accessing BPATH_DEF now, the coords like above bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF] self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk bpathDefcheCk[1][1] = deltax bpathDefcheCk[1][2] = deltay bpathDefcheCk[1][3] = deltax bpathDefcheCk[1][4] = deltay bpathDefcheCk[2][1] = deltax bpathDefcheCk[2][2] = deltay bpathDefcheCk[2][3] = deltax bpathDefcheCk[2][4] = deltay #bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay] ''' elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD: if not self.firstdrag: self.firstdrag=True self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy) def checkConnection( self ): self.parentObject.checkConnection() def doubleClick( self, shapeName ): self.parentObject.popupEditor() def getShapeDescriptor( self, shapeName ): return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName ) def addHandlers( self, canvasObject, aName ): canvasObject.connect('event', self.rect_event, aName ) def releaseButton( self, shapeName, x, y ): self.changeCursor( shapeName, x, y, False ) self.parentObject.mouseReleased( shapeName,x, y) def mouseEntered( self, shapeName, x, y ): self.changeCursor( shapeName, x, y ) def changeCursor( self, shapeName, x, y, buttonpressed = False): aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION] aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed) self.theCanvas.setCursor( aCursorType ) def rect_event( self, *args ): event = args[1] item = args[0] shapeName = args[2] if event.type == gtk.gdk.BUTTON_PRESS: if event.state&gtk.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK: shift_press = True else: shift_press = False if event.button == 1: self.lastmousex = event.x self.lastmousey = event.y self.buttonpressed = True self.leftClick( shapeName, event.x, event.y, shift_press ) elif event.button == 3: self.rightClick(shapeName, event.x, event.y, event, shift_press ) elif event.type == gtk.gdk.BUTTON_RELEASE: if event.button == 1: self.buttonpressed = False self.releaseButton(shapeName, event.x, event.y ) elif event.type == gtk.gdk.MOTION_NOTIFY: self.buttonpressed=(event.state&gtk.gdk.BUTTON1_MASK)>0 if not self.buttonpressed: return oldx = self.lastmousex oldy = self.lastmousey deltax = event.x - oldx deltay = event.y - oldy self.lastmousex = event.x self.lastmousey = event.y self.mouseDrag( shapeName, deltax, deltay, oldx, oldy ) elif event.type == gtk.gdk._2BUTTON_PRESS: if event.button == 1: self.doubleClick( shapeName ) elif event.type == gtk.gdk.ENTER_NOTIFY: self.mouseEntered( shapeName, event.x, event.y )
for aShapeName in self.shapeMap.keys(): self.shapeMap[ aShapeName ].destroy()
identifier_body
acceptor.rs
//! Future for mediating the processing of commands received from the //! CtlGateway in the Supervisor. use super::handler::CtlHandler; use crate::{ctl_gateway::server::MgrReceiver, manager::{action::ActionSender, ManagerState}}; use futures::{channel::oneshot, future::FutureExt, stream::{Stream, StreamExt}, task::{Context, Poll}}; use std::{pin::Pin, sync::Arc}; pub struct CtlAcceptor { /// Communication channel from the control gateway server. User /// interactions are received there and then sent here into the /// `CtlAcceptor` future for further processing. mgr_receiver: MgrReceiver, /// Reference to the Supervisor's main state. This is passed into /// handlers that need to access, e.g., what services are running, /// etc. state: Arc<ManagerState>, /// Signaling channel for the intention to shut down. A message /// received on this channel will cause the `CtlAcceptor` future /// stream to terminate. shutdown_trigger: oneshot::Receiver<()>, /// Communication channel back into the main Supervisor loop. This /// is passed into any generated command handlers as a way to /// send actions into the Supervisor. action_sender: ActionSender, } impl CtlAcceptor { pub fn new(state: Arc<ManagerState>, mgr_receiver: MgrReceiver, shutdown_trigger: oneshot::Receiver<()>, action_sender: ActionSender) -> Self { CtlAcceptor { mgr_receiver, state, shutdown_trigger, action_sender } } } impl Stream for CtlAcceptor { type Item = CtlHandler; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { match self.shutdown_trigger.poll_unpin(cx) { Poll::Ready(Ok(())) => { info!("Signal received; stopping CtlAcceptor"); Poll::Ready(None) } Poll::Ready(Err(e)) =>
Poll::Pending => { match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) { Some(cmd) => { let task = CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone()); Poll::Ready(Some(task)) } None => Poll::Ready(None), } } } } }
{ error!("Error polling CtlAcceptor shutdown trigger: {}", e); Poll::Ready(None) }
conditional_block
acceptor.rs
//! Future for mediating the processing of commands received from the //! CtlGateway in the Supervisor. use super::handler::CtlHandler; use crate::{ctl_gateway::server::MgrReceiver, manager::{action::ActionSender, ManagerState}}; use futures::{channel::oneshot, future::FutureExt, stream::{Stream, StreamExt}, task::{Context, Poll}}; use std::{pin::Pin, sync::Arc}; pub struct CtlAcceptor { /// Communication channel from the control gateway server. User /// interactions are received there and then sent here into the /// `CtlAcceptor` future for further processing. mgr_receiver: MgrReceiver, /// Reference to the Supervisor's main state. This is passed into /// handlers that need to access, e.g., what services are running, /// etc. state: Arc<ManagerState>, /// Signaling channel for the intention to shut down. A message /// received on this channel will cause the `CtlAcceptor` future /// stream to terminate. shutdown_trigger: oneshot::Receiver<()>, /// Communication channel back into the main Supervisor loop. This /// is passed into any generated command handlers as a way to /// send actions into the Supervisor. action_sender: ActionSender, } impl CtlAcceptor { pub fn new(state: Arc<ManagerState>, mgr_receiver: MgrReceiver, shutdown_trigger: oneshot::Receiver<()>, action_sender: ActionSender) -> Self { CtlAcceptor { mgr_receiver, state, shutdown_trigger, action_sender } } } impl Stream for CtlAcceptor { type Item = CtlHandler; fn
(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { match self.shutdown_trigger.poll_unpin(cx) { Poll::Ready(Ok(())) => { info!("Signal received; stopping CtlAcceptor"); Poll::Ready(None) } Poll::Ready(Err(e)) => { error!("Error polling CtlAcceptor shutdown trigger: {}", e); Poll::Ready(None) } Poll::Pending => { match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) { Some(cmd) => { let task = CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone()); Poll::Ready(Some(task)) } None => Poll::Ready(None), } } } } }
poll_next
identifier_name
acceptor.rs
//! Future for mediating the processing of commands received from the //! CtlGateway in the Supervisor. use super::handler::CtlHandler; use crate::{ctl_gateway::server::MgrReceiver, manager::{action::ActionSender, ManagerState}}; use futures::{channel::oneshot, future::FutureExt, stream::{Stream, StreamExt}, task::{Context, Poll}}; use std::{pin::Pin, sync::Arc}; pub struct CtlAcceptor { /// Communication channel from the control gateway server. User /// interactions are received there and then sent here into the /// `CtlAcceptor` future for further processing. mgr_receiver: MgrReceiver, /// Reference to the Supervisor's main state. This is passed into /// handlers that need to access, e.g., what services are running, /// etc. state: Arc<ManagerState>, /// Signaling channel for the intention to shut down. A message /// received on this channel will cause the `CtlAcceptor` future /// stream to terminate. shutdown_trigger: oneshot::Receiver<()>, /// Communication channel back into the main Supervisor loop. This /// is passed into any generated command handlers as a way to /// send actions into the Supervisor. action_sender: ActionSender, } impl CtlAcceptor { pub fn new(state: Arc<ManagerState>,
shutdown_trigger: oneshot::Receiver<()>, action_sender: ActionSender) -> Self { CtlAcceptor { mgr_receiver, state, shutdown_trigger, action_sender } } } impl Stream for CtlAcceptor { type Item = CtlHandler; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { match self.shutdown_trigger.poll_unpin(cx) { Poll::Ready(Ok(())) => { info!("Signal received; stopping CtlAcceptor"); Poll::Ready(None) } Poll::Ready(Err(e)) => { error!("Error polling CtlAcceptor shutdown trigger: {}", e); Poll::Ready(None) } Poll::Pending => { match futures::ready!(self.mgr_receiver.poll_next_unpin(cx)) { Some(cmd) => { let task = CtlHandler::new(cmd, self.state.clone(), self.action_sender.clone()); Poll::Ready(Some(task)) } None => Poll::Ready(None), } } } } }
mgr_receiver: MgrReceiver,
random_line_split
makegrid.py
from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import numpy as np fig=plt.figure(figsize=(9, 3)) map = Basemap(width=12000000,height=8000000, resolution='l',projection='stere', lat_ts=50,lat_0=50,lon_0=-107.)
ax.set_title('The regular grid') map.scatter(x, y, marker='o') map.drawcoastlines() ax = fig.add_subplot(122) ax.set_title('Projection changed') map = Basemap(width=12000000,height=9000000,projection='aeqd', lat_0=50.,lon_0=-105.) x, y = map(lons, lats) map.scatter(x, y, marker='o') map.drawcoastlines() plt.show()
lons, lats, x, y = map.makegrid(30, 30, returnxy=True) ax = fig.add_subplot(121)
random_line_split
tar.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles tarring up documentation directories.""" import subprocess from docuploader import shell def
(directory: str, destination: str) -> subprocess.CompletedProcess: """Compress the given directory into the tarfile at destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--create", f"--directory={directory}", f"--file={destination}", # Treat a colon in the filename as part of the filename, # not an indication of a remote file. This is required in order to # handle canonical filenames on Windows. "--force-local", "--gzip", "--verbose", ".", ], hide_output=False, ) def decompress(archive: str, destination: str) -> subprocess.CompletedProcess: """Decompress the given tarfile to the destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--extract", f"--directory={destination}", f"--file={archive}", "--gzip", "--verbose", ], hide_output=True, )
compress
identifier_name
tar.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles tarring up documentation directories.""" import subprocess from docuploader import shell def compress(directory: str, destination: str) -> subprocess.CompletedProcess: """Compress the given directory into the tarfile at destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--create", f"--directory={directory}", f"--file={destination}", # Treat a colon in the filename as part of the filename, # not an indication of a remote file. This is required in order to # handle canonical filenames on Windows. "--force-local", "--gzip", "--verbose", ".", ], hide_output=False, ) def decompress(archive: str, destination: str) -> subprocess.CompletedProcess:
"""Decompress the given tarfile to the destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--extract", f"--directory={destination}", f"--file={archive}", "--gzip", "--verbose", ], hide_output=True, )
identifier_body
tar.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles tarring up documentation directories.""" import subprocess from docuploader import shell def compress(directory: str, destination: str) -> subprocess.CompletedProcess: """Compress the given directory into the tarfile at destination.""" # Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--create", f"--directory={directory}", f"--file={destination}", # Treat a colon in the filename as part of the filename, # not an indication of a remote file. This is required in order to # handle canonical filenames on Windows. "--force-local", "--gzip", "--verbose", ".", ], hide_output=False, )
# Note: we don't use the stdlib's "tarfile" module for performance reasons. # While it can handle creating tarfiles, its not as efficient on large # numbers of files like the tar command. return shell.run( [ "tar", "--extract", f"--directory={destination}", f"--file={archive}", "--gzip", "--verbose", ], hide_output=True, )
def decompress(archive: str, destination: str) -> subprocess.CompletedProcess: """Decompress the given tarfile to the destination."""
random_line_split
clean_data_for_academic_analysis.py
#!/usr/bin/python # This file is part of OpenHatch. # Copyright (C) 2010 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ### The purpose of this script is to create a version of the database ### that helps a U Mass Amherst researcher look through the OpenHatch ### data and perform text classification and other text analysis. ### To protect our users' privacy, we: ### * set the password column to the empty string ### * set the email column to the empty string ### * delete (from the database) any PortfolioEntry that is_deleted ### * delete (from the database) any Citation that is_deleted ### * delete all WebResponse objects import mysite.profile.models import django.contrib.auth.models ### set the email and password columns to the empty string for user in django.contrib.auth.models.User.objects.all(): user.email = '' user.password = '' user.save() ### delete PortfolioEntry instances that is_deleted for pfe in mysite.profile.models.PortfolioEntry.objects.all(): if pfe.is_deleted: pfe.delete() ### delete Citation instances that is_deleted for citation in mysite.profile.models.Citation.objects.all(): if citation.is_deleted:
### delete all WebResponse objects for wr in mysite.customs.models.WebResponse.objects.all(): wr.delete()
citation.delete()
conditional_block
clean_data_for_academic_analysis.py
#!/usr/bin/python # This file is part of OpenHatch. # Copyright (C) 2010 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ### The purpose of this script is to create a version of the database ### that helps a U Mass Amherst researcher look through the OpenHatch ### data and perform text classification and other text analysis.
### * delete (from the database) any PortfolioEntry that is_deleted ### * delete (from the database) any Citation that is_deleted ### * delete all WebResponse objects import mysite.profile.models import django.contrib.auth.models ### set the email and password columns to the empty string for user in django.contrib.auth.models.User.objects.all(): user.email = '' user.password = '' user.save() ### delete PortfolioEntry instances that is_deleted for pfe in mysite.profile.models.PortfolioEntry.objects.all(): if pfe.is_deleted: pfe.delete() ### delete Citation instances that is_deleted for citation in mysite.profile.models.Citation.objects.all(): if citation.is_deleted: citation.delete() ### delete all WebResponse objects for wr in mysite.customs.models.WebResponse.objects.all(): wr.delete()
### To protect our users' privacy, we: ### * set the password column to the empty string ### * set the email column to the empty string
random_line_split
node.js
/*jshint unused:false */ function NodeController( $scope ){ this.initialize= function () { $scope.calculateImagePosition(); }; $scope.calculateImagePosition = function(){ var depth = $scope.node.depth;
else { width++; } //console.log( "depth:" + depth + " width:" + width ); $scope.cx = width * 40; $scope.cy = 30 + depth * 40; $scope.r = 14; $scope.x = $scope.cx - 15; $scope.y = $scope.cy + 2; $scope.lineColor = '#FF0000'; //$scope.test = 1; console.log( $scope.cx ); $scope.treeWidth[depth] = width; }; this.initialize(); }
var width = $scope.treeWidth[depth]; if( width === undefined ) { width = 1; }
random_line_split
node.js
/*jshint unused:false */ function NodeController( $scope ){ this.initialize= function () { $scope.calculateImagePosition(); }; $scope.calculateImagePosition = function(){ var depth = $scope.node.depth; var width = $scope.treeWidth[depth]; if( width === undefined ) { width = 1; } else
//console.log( "depth:" + depth + " width:" + width ); $scope.cx = width * 40; $scope.cy = 30 + depth * 40; $scope.r = 14; $scope.x = $scope.cx - 15; $scope.y = $scope.cy + 2; $scope.lineColor = '#FF0000'; //$scope.test = 1; console.log( $scope.cx ); $scope.treeWidth[depth] = width; }; this.initialize(); }
{ width++; }
conditional_block
node.js
/*jshint unused:false */ function NodeController( $scope )
{ this.initialize= function () { $scope.calculateImagePosition(); }; $scope.calculateImagePosition = function(){ var depth = $scope.node.depth; var width = $scope.treeWidth[depth]; if( width === undefined ) { width = 1; } else { width++; } //console.log( "depth:" + depth + " width:" + width ); $scope.cx = width * 40; $scope.cy = 30 + depth * 40; $scope.r = 14; $scope.x = $scope.cx - 15; $scope.y = $scope.cy + 2; $scope.lineColor = '#FF0000'; //$scope.test = 1; console.log( $scope.cx ); $scope.treeWidth[depth] = width; }; this.initialize(); }
identifier_body
node.js
/*jshint unused:false */ function
( $scope ){ this.initialize= function () { $scope.calculateImagePosition(); }; $scope.calculateImagePosition = function(){ var depth = $scope.node.depth; var width = $scope.treeWidth[depth]; if( width === undefined ) { width = 1; } else { width++; } //console.log( "depth:" + depth + " width:" + width ); $scope.cx = width * 40; $scope.cy = 30 + depth * 40; $scope.r = 14; $scope.x = $scope.cx - 15; $scope.y = $scope.cy + 2; $scope.lineColor = '#FF0000'; //$scope.test = 1; console.log( $scope.cx ); $scope.treeWidth[depth] = width; }; this.initialize(); }
NodeController
identifier_name
workspace-stacks.controller.ts
/* * Copyright (c) 2015-2017 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation */ 'use strict'; import {CheWorkspace} from '../../../../components/api/che-workspace.factory'; import {ComposeEnvironmentManager} from '../../../../components/api/environment/compose-environment-manager'; import {CheEnvironmentRegistry} from '../../../../components/api/environment/che-environment-registry.factory'; import {CheStack} from '../../../../components/api/che-stack.factory'; /** * @ngdoc controller * @name workspaces.workspace.stacks.controller:WorkspaceStacksController * @description This class is handling the controller for stacks selection * @author Oleksii Kurinnyi */ const DEFAULT_WORKSPACE_RAM: number = 2 * Math.pow(1024, 3); export class WorkspaceStacksController { $scope: ng.IScope; cheStack: CheStack; cheWorkspace: CheWorkspace; composeEnvironmentManager: ComposeEnvironmentManager; recipeUrl: string; recipeScript: string; recipeFormat: string; stack: any = null; isCustomStack: boolean = false; selectSourceOption: string; tabName: string; environmentName: string; workspaceName: string; workspaceImportedRecipe: { type: string, content: string, location: string }; workspaceStackOnChange: Function; /** * Default constructor that is using resource * @ngInject for Dependency injection */ constructor($scope: ng.IScope, cheWorkspace: CheWorkspace, cheEnvironmentRegistry: CheEnvironmentRegistry, cheStack: CheStack) { this.cheWorkspace = cheWorkspace; this.cheStack = cheStack; this.composeEnvironmentManager = cheEnvironmentRegistry.getEnvironmentManager('compose'); $scope.$watch(() => { return this.recipeScript; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeUrl; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeFormat; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.workspaceImportedRecipe; }, () => { if (!this.workspaceImportedRecipe) { return; } this.initStackSelecter(); }, true); } /** * Initialize stack selector widget. */ initStackSelecter(): void { let type = this.workspaceImportedRecipe.type; if (this.workspaceImportedRecipe.location && type !== 'dockerimage') { this.recipeFormat = type; this.recipeUrl = this.workspaceImportedRecipe.location; delete this.recipeScript; } else { if (type === 'dockerimage') { type = 'dockerfile'; this.recipeScript = 'FROM ' + this.workspaceImportedRecipe.location; } else { this.recipeScript = this.workspaceImportedRecipe.content; } this.recipeFormat = type; delete this.recipeUrl; } } /** * Callback when stack has been set. * * @param stack {object} the selected stack */ cheStackLibrarySelecter(stack: any): void { if (stack) { this.isCustomStack = false; this.recipeUrl = null; this.recipeScript = null; } else { this.isCustomStack = true; } this.stack = stack; let config = this.buildWorkspaceConfig(); this.workspaceStackOnChange({config: config, stackId: this.stack ? this.stack.id : ''}); } /** * Builds workspace config. * * @returns {config} */ buildWorkspaceConfig(): any
/** * Detects machine source from pointed stack. * * @param stack {object} to retrieve described source * @returns {source} machine source config */ getSourceFromStack(stack: any): any { let source: any = {}; source.type = 'dockerfile'; switch (stack.source.type.toLowerCase()) { case 'image': source.content = 'FROM ' + stack.source.origin; break; case 'dockerfile': source.content = stack.source.origin; break; default: throw 'Not implemented'; } return source; } }
{ let stackWorkspaceConfig; if (this.stack) { stackWorkspaceConfig = this.stack.workspaceConfig; } else if (!this.stack) { let stackTemplate = this.cheStack.getStackTemplate(), defEnvName = stackTemplate.workspaceConfig.defaultEnv, defEnvironment = stackTemplate.workspaceConfig.environments[defEnvName], machines = this.composeEnvironmentManager.getMachines(defEnvironment), environment = this.composeEnvironmentManager.getEnvironment(defEnvironment, machines); stackWorkspaceConfig = { defaultEnv: this.environmentName, environments: { [this.environmentName]: environment } }; } return this.cheWorkspace.formWorkspaceConfig(stackWorkspaceConfig, this.workspaceName, null, DEFAULT_WORKSPACE_RAM); }
identifier_body
workspace-stacks.controller.ts
/* * Copyright (c) 2015-2017 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation */ 'use strict'; import {CheWorkspace} from '../../../../components/api/che-workspace.factory'; import {ComposeEnvironmentManager} from '../../../../components/api/environment/compose-environment-manager'; import {CheEnvironmentRegistry} from '../../../../components/api/environment/che-environment-registry.factory'; import {CheStack} from '../../../../components/api/che-stack.factory'; /** * @ngdoc controller * @name workspaces.workspace.stacks.controller:WorkspaceStacksController * @description This class is handling the controller for stacks selection * @author Oleksii Kurinnyi */ const DEFAULT_WORKSPACE_RAM: number = 2 * Math.pow(1024, 3); export class WorkspaceStacksController { $scope: ng.IScope; cheStack: CheStack; cheWorkspace: CheWorkspace; composeEnvironmentManager: ComposeEnvironmentManager; recipeUrl: string; recipeScript: string; recipeFormat: string; stack: any = null; isCustomStack: boolean = false; selectSourceOption: string; tabName: string; environmentName: string; workspaceName: string; workspaceImportedRecipe: { type: string, content: string, location: string }; workspaceStackOnChange: Function; /** * Default constructor that is using resource * @ngInject for Dependency injection */ constructor($scope: ng.IScope, cheWorkspace: CheWorkspace, cheEnvironmentRegistry: CheEnvironmentRegistry, cheStack: CheStack) { this.cheWorkspace = cheWorkspace; this.cheStack = cheStack; this.composeEnvironmentManager = cheEnvironmentRegistry.getEnvironmentManager('compose'); $scope.$watch(() => { return this.recipeScript; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeUrl; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeFormat; }, () => { if (this.isCustomStack)
}); $scope.$watch(() => { return this.workspaceImportedRecipe; }, () => { if (!this.workspaceImportedRecipe) { return; } this.initStackSelecter(); }, true); } /** * Initialize stack selector widget. */ initStackSelecter(): void { let type = this.workspaceImportedRecipe.type; if (this.workspaceImportedRecipe.location && type !== 'dockerimage') { this.recipeFormat = type; this.recipeUrl = this.workspaceImportedRecipe.location; delete this.recipeScript; } else { if (type === 'dockerimage') { type = 'dockerfile'; this.recipeScript = 'FROM ' + this.workspaceImportedRecipe.location; } else { this.recipeScript = this.workspaceImportedRecipe.content; } this.recipeFormat = type; delete this.recipeUrl; } } /** * Callback when stack has been set. * * @param stack {object} the selected stack */ cheStackLibrarySelecter(stack: any): void { if (stack) { this.isCustomStack = false; this.recipeUrl = null; this.recipeScript = null; } else { this.isCustomStack = true; } this.stack = stack; let config = this.buildWorkspaceConfig(); this.workspaceStackOnChange({config: config, stackId: this.stack ? this.stack.id : ''}); } /** * Builds workspace config. * * @returns {config} */ buildWorkspaceConfig(): any { let stackWorkspaceConfig; if (this.stack) { stackWorkspaceConfig = this.stack.workspaceConfig; } else if (!this.stack) { let stackTemplate = this.cheStack.getStackTemplate(), defEnvName = stackTemplate.workspaceConfig.defaultEnv, defEnvironment = stackTemplate.workspaceConfig.environments[defEnvName], machines = this.composeEnvironmentManager.getMachines(defEnvironment), environment = this.composeEnvironmentManager.getEnvironment(defEnvironment, machines); stackWorkspaceConfig = { defaultEnv: this.environmentName, environments: { [this.environmentName]: environment } }; } return this.cheWorkspace.formWorkspaceConfig(stackWorkspaceConfig, this.workspaceName, null, DEFAULT_WORKSPACE_RAM); } /** * Detects machine source from pointed stack. * * @param stack {object} to retrieve described source * @returns {source} machine source config */ getSourceFromStack(stack: any): any { let source: any = {}; source.type = 'dockerfile'; switch (stack.source.type.toLowerCase()) { case 'image': source.content = 'FROM ' + stack.source.origin; break; case 'dockerfile': source.content = stack.source.origin; break; default: throw 'Not implemented'; } return source; } }
{ this.cheStackLibrarySelecter(null); }
conditional_block
workspace-stacks.controller.ts
/* * Copyright (c) 2015-2017 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation */ 'use strict'; import {CheWorkspace} from '../../../../components/api/che-workspace.factory'; import {ComposeEnvironmentManager} from '../../../../components/api/environment/compose-environment-manager'; import {CheEnvironmentRegistry} from '../../../../components/api/environment/che-environment-registry.factory'; import {CheStack} from '../../../../components/api/che-stack.factory'; /** * @ngdoc controller * @name workspaces.workspace.stacks.controller:WorkspaceStacksController * @description This class is handling the controller for stacks selection * @author Oleksii Kurinnyi */ const DEFAULT_WORKSPACE_RAM: number = 2 * Math.pow(1024, 3); export class WorkspaceStacksController { $scope: ng.IScope; cheStack: CheStack; cheWorkspace: CheWorkspace; composeEnvironmentManager: ComposeEnvironmentManager; recipeUrl: string; recipeScript: string; recipeFormat: string; stack: any = null; isCustomStack: boolean = false; selectSourceOption: string; tabName: string; environmentName: string; workspaceName: string; workspaceImportedRecipe: { type: string, content: string, location: string }; workspaceStackOnChange: Function; /** * Default constructor that is using resource * @ngInject for Dependency injection */
($scope: ng.IScope, cheWorkspace: CheWorkspace, cheEnvironmentRegistry: CheEnvironmentRegistry, cheStack: CheStack) { this.cheWorkspace = cheWorkspace; this.cheStack = cheStack; this.composeEnvironmentManager = cheEnvironmentRegistry.getEnvironmentManager('compose'); $scope.$watch(() => { return this.recipeScript; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeUrl; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeFormat; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.workspaceImportedRecipe; }, () => { if (!this.workspaceImportedRecipe) { return; } this.initStackSelecter(); }, true); } /** * Initialize stack selector widget. */ initStackSelecter(): void { let type = this.workspaceImportedRecipe.type; if (this.workspaceImportedRecipe.location && type !== 'dockerimage') { this.recipeFormat = type; this.recipeUrl = this.workspaceImportedRecipe.location; delete this.recipeScript; } else { if (type === 'dockerimage') { type = 'dockerfile'; this.recipeScript = 'FROM ' + this.workspaceImportedRecipe.location; } else { this.recipeScript = this.workspaceImportedRecipe.content; } this.recipeFormat = type; delete this.recipeUrl; } } /** * Callback when stack has been set. * * @param stack {object} the selected stack */ cheStackLibrarySelecter(stack: any): void { if (stack) { this.isCustomStack = false; this.recipeUrl = null; this.recipeScript = null; } else { this.isCustomStack = true; } this.stack = stack; let config = this.buildWorkspaceConfig(); this.workspaceStackOnChange({config: config, stackId: this.stack ? this.stack.id : ''}); } /** * Builds workspace config. * * @returns {config} */ buildWorkspaceConfig(): any { let stackWorkspaceConfig; if (this.stack) { stackWorkspaceConfig = this.stack.workspaceConfig; } else if (!this.stack) { let stackTemplate = this.cheStack.getStackTemplate(), defEnvName = stackTemplate.workspaceConfig.defaultEnv, defEnvironment = stackTemplate.workspaceConfig.environments[defEnvName], machines = this.composeEnvironmentManager.getMachines(defEnvironment), environment = this.composeEnvironmentManager.getEnvironment(defEnvironment, machines); stackWorkspaceConfig = { defaultEnv: this.environmentName, environments: { [this.environmentName]: environment } }; } return this.cheWorkspace.formWorkspaceConfig(stackWorkspaceConfig, this.workspaceName, null, DEFAULT_WORKSPACE_RAM); } /** * Detects machine source from pointed stack. * * @param stack {object} to retrieve described source * @returns {source} machine source config */ getSourceFromStack(stack: any): any { let source: any = {}; source.type = 'dockerfile'; switch (stack.source.type.toLowerCase()) { case 'image': source.content = 'FROM ' + stack.source.origin; break; case 'dockerfile': source.content = stack.source.origin; break; default: throw 'Not implemented'; } return source; } }
constructor
identifier_name
workspace-stacks.controller.ts
/* * Copyright (c) 2015-2017 Codenvy, S.A. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Codenvy, S.A. - initial API and implementation */ 'use strict'; import {CheWorkspace} from '../../../../components/api/che-workspace.factory'; import {ComposeEnvironmentManager} from '../../../../components/api/environment/compose-environment-manager'; import {CheEnvironmentRegistry} from '../../../../components/api/environment/che-environment-registry.factory'; import {CheStack} from '../../../../components/api/che-stack.factory'; /** * @ngdoc controller * @name workspaces.workspace.stacks.controller:WorkspaceStacksController * @description This class is handling the controller for stacks selection * @author Oleksii Kurinnyi */
cheStack: CheStack; cheWorkspace: CheWorkspace; composeEnvironmentManager: ComposeEnvironmentManager; recipeUrl: string; recipeScript: string; recipeFormat: string; stack: any = null; isCustomStack: boolean = false; selectSourceOption: string; tabName: string; environmentName: string; workspaceName: string; workspaceImportedRecipe: { type: string, content: string, location: string }; workspaceStackOnChange: Function; /** * Default constructor that is using resource * @ngInject for Dependency injection */ constructor($scope: ng.IScope, cheWorkspace: CheWorkspace, cheEnvironmentRegistry: CheEnvironmentRegistry, cheStack: CheStack) { this.cheWorkspace = cheWorkspace; this.cheStack = cheStack; this.composeEnvironmentManager = cheEnvironmentRegistry.getEnvironmentManager('compose'); $scope.$watch(() => { return this.recipeScript; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeUrl; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.recipeFormat; }, () => { if (this.isCustomStack) { this.cheStackLibrarySelecter(null); } }); $scope.$watch(() => { return this.workspaceImportedRecipe; }, () => { if (!this.workspaceImportedRecipe) { return; } this.initStackSelecter(); }, true); } /** * Initialize stack selector widget. */ initStackSelecter(): void { let type = this.workspaceImportedRecipe.type; if (this.workspaceImportedRecipe.location && type !== 'dockerimage') { this.recipeFormat = type; this.recipeUrl = this.workspaceImportedRecipe.location; delete this.recipeScript; } else { if (type === 'dockerimage') { type = 'dockerfile'; this.recipeScript = 'FROM ' + this.workspaceImportedRecipe.location; } else { this.recipeScript = this.workspaceImportedRecipe.content; } this.recipeFormat = type; delete this.recipeUrl; } } /** * Callback when stack has been set. * * @param stack {object} the selected stack */ cheStackLibrarySelecter(stack: any): void { if (stack) { this.isCustomStack = false; this.recipeUrl = null; this.recipeScript = null; } else { this.isCustomStack = true; } this.stack = stack; let config = this.buildWorkspaceConfig(); this.workspaceStackOnChange({config: config, stackId: this.stack ? this.stack.id : ''}); } /** * Builds workspace config. * * @returns {config} */ buildWorkspaceConfig(): any { let stackWorkspaceConfig; if (this.stack) { stackWorkspaceConfig = this.stack.workspaceConfig; } else if (!this.stack) { let stackTemplate = this.cheStack.getStackTemplate(), defEnvName = stackTemplate.workspaceConfig.defaultEnv, defEnvironment = stackTemplate.workspaceConfig.environments[defEnvName], machines = this.composeEnvironmentManager.getMachines(defEnvironment), environment = this.composeEnvironmentManager.getEnvironment(defEnvironment, machines); stackWorkspaceConfig = { defaultEnv: this.environmentName, environments: { [this.environmentName]: environment } }; } return this.cheWorkspace.formWorkspaceConfig(stackWorkspaceConfig, this.workspaceName, null, DEFAULT_WORKSPACE_RAM); } /** * Detects machine source from pointed stack. * * @param stack {object} to retrieve described source * @returns {source} machine source config */ getSourceFromStack(stack: any): any { let source: any = {}; source.type = 'dockerfile'; switch (stack.source.type.toLowerCase()) { case 'image': source.content = 'FROM ' + stack.source.origin; break; case 'dockerfile': source.content = stack.source.origin; break; default: throw 'Not implemented'; } return source; } }
const DEFAULT_WORKSPACE_RAM: number = 2 * Math.pow(1024, 3); export class WorkspaceStacksController { $scope: ng.IScope;
random_line_split
nir_opt_algebraic.py
#! /usr/bin/env python # # Copyright (C) 2014 Intel Corporation # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Jason Ekstrand ([email protected]) import nir_algebraic # Convenience variables a = 'a' b = 'b' c = 'c' d = 'd' # Written in the form (<search>, <replace>) where <search> is an expression # and <replace> is either an expression or a value. An expression is # defined as a tuple of the form (<op>, <src0>, <src1>, <src2>, <src3>) # where each source is either an expression or a value. A value can be # either a numeric constant or a string representing a variable name. # # Variable names are specified as "[#]name[@type]" where "#" inicates that # the given variable will only match constants and the type indicates that # the given variable will only match values from ALU instructions with the # given output type. # # For constants, you have to be careful to make sure that it is the right # type because python is unaware of the source and destination types of the # opcodes. optimizations = [ (('fneg', ('fneg', a)), a), (('ineg', ('ineg', a)), a), (('fabs', ('fabs', a)), ('fabs', a)), (('fabs', ('fneg', a)), ('fabs', a)), (('iabs', ('iabs', a)), ('iabs', a)), (('iabs', ('ineg', a)), ('iabs', a)), (('fadd', a, 0.0), a), (('iadd', a, 0), a), (('fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))), (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))), (('fadd', ('fneg', a), a), 0.0), (('iadd', ('ineg', a), a), 0), (('fmul', a, 0.0), 0.0), (('imul', a, 0), 0), (('fmul', a, 1.0), a), (('imul', a, 1), a), (('fmul', a, -1.0), ('fneg', a)), (('imul', a, -1), ('ineg', a)), (('ffma', 0.0, a, b), b), (('ffma', a, 0.0, b), b), (('ffma', a, b, 0.0), ('fmul', a, b)), (('ffma', a, 1.0, b), ('fadd', a, b)), (('ffma', 1.0, a, b), ('fadd', a, b)), (('flrp', a, b, 0.0), a), (('flrp', a, b, 1.0), b), (('flrp', a, a, b), a), (('flrp', 0.0, a, b), ('fmul', a, b)), (('flrp', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp'), (('fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c))), ('fmul', b, c)), ('flrp', a, b, c), '!options->lower_flrp'), (('fadd', a, ('fmul', c, ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'), (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'), (('fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'), # Comparison simplifications (('inot', ('flt', a, b)), ('fge', a, b)), (('inot', ('fge', a, b)), ('flt', a, b)), (('inot', ('ilt', a, b)), ('ige', a, b)), (('inot', ('ige', a, b)), ('ilt', a, b)), (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)), (('bcsel', ('flt', a, b), a, b), ('fmin', a, b)), (('bcsel', ('flt', a, b), b, a), ('fmax', a, b)), (('bcsel', ('inot', 'a@bool'), b, c), ('bcsel', a, c, b)), (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)), (('fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'), (('fsat', ('fsat', a)), ('fsat', a)), (('fmin', ('fmax', ('fmin', ('fmax', a, 0.0), 1.0), 0.0), 1.0), ('fmin', ('fmax', a, 0.0), 1.0)), (('ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))), (('ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))), (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'), (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'), (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'), (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'), # Emulating booleans (('fmul', ('b2f', a), ('b2f', b)), ('b2f', ('iand', a, b))), (('fsat', ('fadd', ('b2f', a), ('b2f', b))), ('b2f', ('ior', a, b))), (('iand', 'a@bool', 1.0), ('b2f', a)), (('flt', ('fneg', ('b2f', a)), 0), a), # Generated by TGSI KILL_IF. (('flt', ('fsub', 0.0, ('b2f', a)), 0), a), # Generated by TGSI KILL_IF. # Comparison with the same args. Note that these are not done for # the float versions because NaN always returns false on float # inequalities. (('ilt', a, a), False), (('ige', a, a), True), (('ieq', a, a), True), (('ine', a, a), False), (('ult', a, a), False), (('uge', a, a), True), # Logical and bit operations (('fand', a, 0.0), 0.0), (('iand', a, a), a), (('iand', a, 0), 0), (('ior', a, a), a), (('ior', a, 0), a), (('fxor', a, a), 0.0), (('ixor', a, a), 0), (('inot', ('inot', a)), a), # DeMorgan's Laws (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))), (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))), # Shift optimizations (('ishl', 0, a), 0), (('ishl', a, 0), a), (('ishr', 0, a), 0), (('ishr', a, 0), a), (('ushr', 0, a), 0), (('ushr', a, 0), a), # Exponential/logarithmic identities (('fexp2', ('flog2', a)), a), # 2^lg2(a) = a (('fexp', ('flog', a)), a), # e^ln(a) = a (('flog2', ('fexp2', a)), a), # lg2(2^a) = a (('flog', ('fexp', a)), a), # ln(e^a) = a (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b) (('fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b (('fexp', ('fmul', ('flog', a), b)), ('fpow', a, b), '!options->lower_fpow'), # e^(ln(a)*b) = a^b (('fpow', a, 1.0), a), (('fpow', a, 2.0), ('fmul', a, a)), (('fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))), (('fpow', 2.0, a), ('fexp2', a)), (('fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))), (('fsqrt', ('fexp', a)), ('fexp', ('fmul', 0.5, a))), (('frcp', ('fexp2', a)), ('fexp2', ('fneg', a))), (('frcp', ('fexp', a)), ('fexp', ('fneg', a))), (('frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))), (('frsq', ('fexp', a)), ('fexp', ('fmul', -0.5, a))), (('flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))), (('flog', ('fsqrt', a)), ('fmul', 0.5, ('flog', a))), (('flog2', ('frcp', a)), ('fneg', ('flog2', a))), (('flog', ('frcp', a)), ('fneg', ('flog', a))), (('flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))), (('flog', ('frsq', a)), ('fmul', -0.5, ('flog', a))), (('flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))), (('flog', ('fpow', a, b)), ('fmul', b, ('flog', a))), (('fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))), (('fadd', ('flog', a), ('flog', b)), ('flog', ('fmul', a, b))), (('fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))), (('fadd', ('flog', a), ('fneg', ('flog', b))), ('flog', ('fdiv', a, b))), (('fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))), (('fmul', ('fexp', a), ('fexp', b)), ('fexp', ('fadd', a, b))), # Division and reciprocal (('fdiv', 1.0, a), ('frcp', a)), (('frcp', ('frcp', a)), a), (('frcp', ('fsqrt', a)), ('frsq', a)), (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'), (('frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'), # Boolean simplifications (('ine', 'a@bool', 0), 'a'), (('ieq', 'a@bool', 0), ('inot', 'a')), (('bcsel', a, True, False), ('ine', a, 0)), (('bcsel', a, False, True), ('ieq', a, 0)), (('bcsel', True, b, c), b), (('bcsel', False, b, c), c), # The result of this should be hit by constant propagation and, in the # next round of opt_algebraic, get picked up by one of the above two. (('bcsel', '#a', b, c), ('bcsel', ('ine', 'a', 0), b, c)), (('bcsel', a, b, b), b), (('fcsel', a, b, b), b), # Conversions (('f2i', ('ftrunc', a)), ('f2i', a)), (('f2u', ('ftrunc', a)), ('f2u', a)), # Subtracts (('fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)), (('isub', a, ('isub', 0, b)), ('iadd', a, b)), (('fsub', a, b), ('fadd', a, ('fneg', b)), 'options->lower_sub'), (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'), (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'), (('ineg', a), ('isub', 0, a), 'options->lower_negate'), (('fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)), (('iadd', a, ('isub', 0, b)), ('isub', a, b)), (('fabs', ('fsub', 0.0, a)), ('fabs', a)), (('iabs', ('isub', 0, a)), ('iabs', a)), ] # Add optimizations to handle the case where the result of a ternary is # compared to a constant. This way we can take things like # # (a ? 0 : 1) > 0 # # and turn it into # # a ? (0 > 0) : (1 > 0) # # which constant folding will eat for lunch. The resulting ternary will # further get cleaned up by the boolean reductions above and we will be # left with just the original variable "a". for op in ['flt', 'fge', 'feq', 'fne', 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
# This section contains "late" optimizations that should be run after the # regular optimizations have finished. Optimizations should go here if # they help code generation but do not necessarily produce code that is # more easily optimizable. late_optimizations = [ (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))), (('fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))), (('feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))), (('fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))), ] print nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render() print nir_algebraic.AlgebraicPass("nir_opt_algebraic_late", late_optimizations).render()
optimizations += [ ((op, ('bcsel', 'a', '#b', '#c'), '#d'), ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))), ((op, '#d', ('bcsel', a, '#b', '#c')), ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))), ]
conditional_block
nir_opt_algebraic.py
#! /usr/bin/env python # # Copyright (C) 2014 Intel Corporation # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Jason Ekstrand ([email protected])
b = 'b' c = 'c' d = 'd' # Written in the form (<search>, <replace>) where <search> is an expression # and <replace> is either an expression or a value. An expression is # defined as a tuple of the form (<op>, <src0>, <src1>, <src2>, <src3>) # where each source is either an expression or a value. A value can be # either a numeric constant or a string representing a variable name. # # Variable names are specified as "[#]name[@type]" where "#" inicates that # the given variable will only match constants and the type indicates that # the given variable will only match values from ALU instructions with the # given output type. # # For constants, you have to be careful to make sure that it is the right # type because python is unaware of the source and destination types of the # opcodes. optimizations = [ (('fneg', ('fneg', a)), a), (('ineg', ('ineg', a)), a), (('fabs', ('fabs', a)), ('fabs', a)), (('fabs', ('fneg', a)), ('fabs', a)), (('iabs', ('iabs', a)), ('iabs', a)), (('iabs', ('ineg', a)), ('iabs', a)), (('fadd', a, 0.0), a), (('iadd', a, 0), a), (('fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))), (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))), (('fadd', ('fneg', a), a), 0.0), (('iadd', ('ineg', a), a), 0), (('fmul', a, 0.0), 0.0), (('imul', a, 0), 0), (('fmul', a, 1.0), a), (('imul', a, 1), a), (('fmul', a, -1.0), ('fneg', a)), (('imul', a, -1), ('ineg', a)), (('ffma', 0.0, a, b), b), (('ffma', a, 0.0, b), b), (('ffma', a, b, 0.0), ('fmul', a, b)), (('ffma', a, 1.0, b), ('fadd', a, b)), (('ffma', 1.0, a, b), ('fadd', a, b)), (('flrp', a, b, 0.0), a), (('flrp', a, b, 1.0), b), (('flrp', a, a, b), a), (('flrp', 0.0, a, b), ('fmul', a, b)), (('flrp', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp'), (('fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c))), ('fmul', b, c)), ('flrp', a, b, c), '!options->lower_flrp'), (('fadd', a, ('fmul', c, ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'), (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'), (('fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'), # Comparison simplifications (('inot', ('flt', a, b)), ('fge', a, b)), (('inot', ('fge', a, b)), ('flt', a, b)), (('inot', ('ilt', a, b)), ('ige', a, b)), (('inot', ('ige', a, b)), ('ilt', a, b)), (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)), (('bcsel', ('flt', a, b), a, b), ('fmin', a, b)), (('bcsel', ('flt', a, b), b, a), ('fmax', a, b)), (('bcsel', ('inot', 'a@bool'), b, c), ('bcsel', a, c, b)), (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)), (('fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'), (('fsat', ('fsat', a)), ('fsat', a)), (('fmin', ('fmax', ('fmin', ('fmax', a, 0.0), 1.0), 0.0), 1.0), ('fmin', ('fmax', a, 0.0), 1.0)), (('ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))), (('ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))), (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'), (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'), (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'), (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'), # Emulating booleans (('fmul', ('b2f', a), ('b2f', b)), ('b2f', ('iand', a, b))), (('fsat', ('fadd', ('b2f', a), ('b2f', b))), ('b2f', ('ior', a, b))), (('iand', 'a@bool', 1.0), ('b2f', a)), (('flt', ('fneg', ('b2f', a)), 0), a), # Generated by TGSI KILL_IF. (('flt', ('fsub', 0.0, ('b2f', a)), 0), a), # Generated by TGSI KILL_IF. # Comparison with the same args. Note that these are not done for # the float versions because NaN always returns false on float # inequalities. (('ilt', a, a), False), (('ige', a, a), True), (('ieq', a, a), True), (('ine', a, a), False), (('ult', a, a), False), (('uge', a, a), True), # Logical and bit operations (('fand', a, 0.0), 0.0), (('iand', a, a), a), (('iand', a, 0), 0), (('ior', a, a), a), (('ior', a, 0), a), (('fxor', a, a), 0.0), (('ixor', a, a), 0), (('inot', ('inot', a)), a), # DeMorgan's Laws (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))), (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))), # Shift optimizations (('ishl', 0, a), 0), (('ishl', a, 0), a), (('ishr', 0, a), 0), (('ishr', a, 0), a), (('ushr', 0, a), 0), (('ushr', a, 0), a), # Exponential/logarithmic identities (('fexp2', ('flog2', a)), a), # 2^lg2(a) = a (('fexp', ('flog', a)), a), # e^ln(a) = a (('flog2', ('fexp2', a)), a), # lg2(2^a) = a (('flog', ('fexp', a)), a), # ln(e^a) = a (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b) (('fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b (('fexp', ('fmul', ('flog', a), b)), ('fpow', a, b), '!options->lower_fpow'), # e^(ln(a)*b) = a^b (('fpow', a, 1.0), a), (('fpow', a, 2.0), ('fmul', a, a)), (('fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))), (('fpow', 2.0, a), ('fexp2', a)), (('fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))), (('fsqrt', ('fexp', a)), ('fexp', ('fmul', 0.5, a))), (('frcp', ('fexp2', a)), ('fexp2', ('fneg', a))), (('frcp', ('fexp', a)), ('fexp', ('fneg', a))), (('frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))), (('frsq', ('fexp', a)), ('fexp', ('fmul', -0.5, a))), (('flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))), (('flog', ('fsqrt', a)), ('fmul', 0.5, ('flog', a))), (('flog2', ('frcp', a)), ('fneg', ('flog2', a))), (('flog', ('frcp', a)), ('fneg', ('flog', a))), (('flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))), (('flog', ('frsq', a)), ('fmul', -0.5, ('flog', a))), (('flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))), (('flog', ('fpow', a, b)), ('fmul', b, ('flog', a))), (('fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))), (('fadd', ('flog', a), ('flog', b)), ('flog', ('fmul', a, b))), (('fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))), (('fadd', ('flog', a), ('fneg', ('flog', b))), ('flog', ('fdiv', a, b))), (('fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))), (('fmul', ('fexp', a), ('fexp', b)), ('fexp', ('fadd', a, b))), # Division and reciprocal (('fdiv', 1.0, a), ('frcp', a)), (('frcp', ('frcp', a)), a), (('frcp', ('fsqrt', a)), ('frsq', a)), (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'), (('frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'), # Boolean simplifications (('ine', 'a@bool', 0), 'a'), (('ieq', 'a@bool', 0), ('inot', 'a')), (('bcsel', a, True, False), ('ine', a, 0)), (('bcsel', a, False, True), ('ieq', a, 0)), (('bcsel', True, b, c), b), (('bcsel', False, b, c), c), # The result of this should be hit by constant propagation and, in the # next round of opt_algebraic, get picked up by one of the above two. (('bcsel', '#a', b, c), ('bcsel', ('ine', 'a', 0), b, c)), (('bcsel', a, b, b), b), (('fcsel', a, b, b), b), # Conversions (('f2i', ('ftrunc', a)), ('f2i', a)), (('f2u', ('ftrunc', a)), ('f2u', a)), # Subtracts (('fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)), (('isub', a, ('isub', 0, b)), ('iadd', a, b)), (('fsub', a, b), ('fadd', a, ('fneg', b)), 'options->lower_sub'), (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'), (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'), (('ineg', a), ('isub', 0, a), 'options->lower_negate'), (('fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)), (('iadd', a, ('isub', 0, b)), ('isub', a, b)), (('fabs', ('fsub', 0.0, a)), ('fabs', a)), (('iabs', ('isub', 0, a)), ('iabs', a)), ] # Add optimizations to handle the case where the result of a ternary is # compared to a constant. This way we can take things like # # (a ? 0 : 1) > 0 # # and turn it into # # a ? (0 > 0) : (1 > 0) # # which constant folding will eat for lunch. The resulting ternary will # further get cleaned up by the boolean reductions above and we will be # left with just the original variable "a". for op in ['flt', 'fge', 'feq', 'fne', 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']: optimizations += [ ((op, ('bcsel', 'a', '#b', '#c'), '#d'), ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))), ((op, '#d', ('bcsel', a, '#b', '#c')), ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))), ] # This section contains "late" optimizations that should be run after the # regular optimizations have finished. Optimizations should go here if # they help code generation but do not necessarily produce code that is # more easily optimizable. late_optimizations = [ (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))), (('fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))), (('feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))), (('fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))), ] print nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render() print nir_algebraic.AlgebraicPass("nir_opt_algebraic_late", late_optimizations).render()
import nir_algebraic # Convenience variables a = 'a'
random_line_split
class_passed_message.js
var class_passed_message = [ [ "direction_t", "class_passed_message.html#a11c83e74aa007c495b32ec3ed4953a50", [ [ "INCOMING", "class_passed_message.html#a11c83e74aa007c495b32ec3ed4953a50a43c42d4afa45cd04736e0d59167260a4", null ], [ "OUTGOING", "class_passed_message.html#a11c83e74aa007c495b32ec3ed4953a50a862e80d4bad52c451a413eef983c16ae", null ] ] ], [ "gates_t", "class_passed_message.html#a7738b6f08855f784d1012de87fbfd9e6", [ [ "UPPER_DATA", "class_passed_message.html#a7738b6f08855f784d1012de87fbfd9e6adf76d3ca7bb9a62bed70965639d59859", null ], [ "UPPER_CONTROL", "class_passed_message.html#a7738b6f08855f784d1012de87fbfd9e6aea991e99dac6c91c9e3e89f902f1075d", null ],
[ "LOWER_DATA", "class_passed_message.html#a7738b6f08855f784d1012de87fbfd9e6a97265ac51f333c88508670c5d3f5ded9", null ], [ "LOWER_CONTROL", "class_passed_message.html#a7738b6f08855f784d1012de87fbfd9e6afb379d2a15495f1ef2f290dc9ac97299", null ] ] ], [ "direction", "class_passed_message.html#af55219a6ed1e656af091cb7583467f5b", null ], [ "fromModule", "class_passed_message.html#a6c340595cb29a4e8a4c55ea0503dffad", null ], [ "gateType", "class_passed_message.html#a41f11b3139f3552cf2de3bb648c1ff55", null ], [ "kind", "class_passed_message.html#ab4e2bf6d2317196af7e9c98ed2c406a6", null ], [ "name", "class_passed_message.html#a8a4eb44ad1e43205d1881fec0c00a6d7", null ] ];
random_line_split
ParserFactory.ts
// // LESERKRITIKK v2 (aka Reader Critics) // Copyright (C) 2017 DB Medialab/Aller Media AS, Oslo, Norway // https://github.com/dbmedialab/reader-critics/ // // This program is free software: you can redistribute it and/or modify it under // the terms of the GNU General Public License as published by the Free Software // Foundation, either version 3 of the License, or (at your option) any later // version. // // This program is distributed in the hope that it will be useful, but WITHOUT // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along with // this program. If not, see <http://www.gnu.org/licenses/>. // import ArticleURL from 'base/ArticleURL'; import Parser from 'base/Parser'; export interface ParserFactory { newInstance(rawArticle : string, articleURL : ArticleURL) : Parser; } export default ParserFactory; export const createFactory = (constructorFn : Function) : ParserFactory => ({ newInstance: (...arghs) : Parser => new ( Function.prototype.bind.call(constructorFn, null, ...arghs) )(),
});
random_line_split
database_backup.py
#!/usr/bin/python """
where {hostname} and {port} are as they are below """ import sys sys.path.append("/next_backend") import time import traceback import next.utils as utils import subprocess import next.constants as constants import os NEXT_BACKEND_GLOBAL_HOST = os.environ.get('NEXT_BACKEND_GLOBAL_HOST', 'localhost') AWS_BUCKET_NAME = os.environ.get('AWS_BUCKET_NAME','next-database-backups') timestamp = utils.datetimeNow() print "[ %s ] starting backup of MongoDB to S3..." % str(timestamp) print "[ %s ] constants.AWS_ACCESS_ID = %s" % (str(timestamp),constants.AWS_ACCESS_ID) subprocess.call('/usr/bin/mongodump -vvvvv --host {hostname}:{port} --out /dump/mongo_dump'.format( hostname=constants.MONGODB_HOST, port=constants.MONGODB_PORT ),shell=True) try: tar_file = sys.argv[1] except: tar_file = 'mongo_dump_{hostname}_{timestamp}.tar.gz'.format( hostname=NEXT_BACKEND_GLOBAL_HOST, timestamp= timestamp.strftime("%Y-%m-%d_%H:%M:%S") ) subprocess.call('tar czf {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True) from boto.s3.connection import S3Connection from boto.s3.key import Key import boto # boto.set_stream_logger('boto') try: conn = S3Connection(constants.AWS_ACCESS_ID,constants.AWS_SECRET_ACCESS_KEY) b = conn.get_bucket(AWS_BUCKET_NAME) k = Key(b) k.key = tar_file bytes_saved = k.set_contents_from_filename( '/dump/'+tar_file ) timestamp = utils.datetimeNow() print "[ %s ] done with backup of MongoDB to S3... %d bytes saved" % (str(timestamp),bytes_saved) except: error = traceback.format_exc() timestamp = utils.datetimeNow() print "[ %s ] FAILED TO CONNECT TO S3... saving locally" % str(timestamp) print error subprocess.call('rm {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True)
Every 30 minutes backs up database to S3. To recover the database, (i.e. reverse the process) simply download the file from S3, un-tar it, and use the command: (./)mongorestore --host {hostname} --port {port} path/to/dump/mongodump
random_line_split
main.rs
extern crate rustc_serialize; extern crate docopt; extern crate glob; use docopt::Docopt; use std::io::Write; use std::path::PathBuf; use glob::glob; #[cfg_attr(rustfmt, rustfmt_skip)]
Kibar imager. Helper utils to download, format, install and manage raspbery pi images for the kibar project. Usage: img install <device> img mount <device> <location> img unmount (<device> | <location>) img chroot <device> img (-h | --help | --version) Options: -h --help Show this screen. --version Show version. "; #[derive(Debug, RustcDecodable)] struct Args { arg_device: String, arg_location: String, cmd_install: bool, cmd_mount: bool, cmd_unmount: bool, cmd_chroot: bool, } #[derive(Debug)] struct Device { device_file: PathBuf, partitions: Vec<PathBuf>, } impl Device { // TODO pass errors up rather then just panicing fn new(device_file: String) -> Device { let pattern = device_file.clone() + "?[0-9]"; Device { device_file: PathBuf::from(device_file), partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(), } } } fn main() { let args: Args = Docopt::new(USAGE) .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); if args.cmd_install { unimplemented!() } else if args.cmd_mount { let d = Device::new(args.arg_device); println!("{:?}", d); } else if args.cmd_unmount { unimplemented!(); writeln!(&mut std::io::stderr(), "Error!").unwrap(); ::std::process::exit(1) } else if args.cmd_chroot { unimplemented!() } else { unimplemented!() } }
const USAGE: &'static str = "
random_line_split
main.rs
extern crate rustc_serialize; extern crate docopt; extern crate glob; use docopt::Docopt; use std::io::Write; use std::path::PathBuf; use glob::glob; #[cfg_attr(rustfmt, rustfmt_skip)] const USAGE: &'static str = " Kibar imager. Helper utils to download, format, install and manage raspbery pi images for the kibar project. Usage: img install <device> img mount <device> <location> img unmount (<device> | <location>) img chroot <device> img (-h | --help | --version) Options: -h --help Show this screen. --version Show version. "; #[derive(Debug, RustcDecodable)] struct
{ arg_device: String, arg_location: String, cmd_install: bool, cmd_mount: bool, cmd_unmount: bool, cmd_chroot: bool, } #[derive(Debug)] struct Device { device_file: PathBuf, partitions: Vec<PathBuf>, } impl Device { // TODO pass errors up rather then just panicing fn new(device_file: String) -> Device { let pattern = device_file.clone() + "?[0-9]"; Device { device_file: PathBuf::from(device_file), partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(), } } } fn main() { let args: Args = Docopt::new(USAGE) .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); if args.cmd_install { unimplemented!() } else if args.cmd_mount { let d = Device::new(args.arg_device); println!("{:?}", d); } else if args.cmd_unmount { unimplemented!(); writeln!(&mut std::io::stderr(), "Error!").unwrap(); ::std::process::exit(1) } else if args.cmd_chroot { unimplemented!() } else { unimplemented!() } }
Args
identifier_name
main.rs
extern crate rustc_serialize; extern crate docopt; extern crate glob; use docopt::Docopt; use std::io::Write; use std::path::PathBuf; use glob::glob; #[cfg_attr(rustfmt, rustfmt_skip)] const USAGE: &'static str = " Kibar imager. Helper utils to download, format, install and manage raspbery pi images for the kibar project. Usage: img install <device> img mount <device> <location> img unmount (<device> | <location>) img chroot <device> img (-h | --help | --version) Options: -h --help Show this screen. --version Show version. "; #[derive(Debug, RustcDecodable)] struct Args { arg_device: String, arg_location: String, cmd_install: bool, cmd_mount: bool, cmd_unmount: bool, cmd_chroot: bool, } #[derive(Debug)] struct Device { device_file: PathBuf, partitions: Vec<PathBuf>, } impl Device { // TODO pass errors up rather then just panicing fn new(device_file: String) -> Device { let pattern = device_file.clone() + "?[0-9]"; Device { device_file: PathBuf::from(device_file), partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(), } } } fn main()
{ let args: Args = Docopt::new(USAGE) .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); if args.cmd_install { unimplemented!() } else if args.cmd_mount { let d = Device::new(args.arg_device); println!("{:?}", d); } else if args.cmd_unmount { unimplemented!(); writeln!(&mut std::io::stderr(), "Error!").unwrap(); ::std::process::exit(1) } else if args.cmd_chroot { unimplemented!() } else { unimplemented!() } }
identifier_body
main.rs
extern crate rustc_serialize; extern crate docopt; extern crate glob; use docopt::Docopt; use std::io::Write; use std::path::PathBuf; use glob::glob; #[cfg_attr(rustfmt, rustfmt_skip)] const USAGE: &'static str = " Kibar imager. Helper utils to download, format, install and manage raspbery pi images for the kibar project. Usage: img install <device> img mount <device> <location> img unmount (<device> | <location>) img chroot <device> img (-h | --help | --version) Options: -h --help Show this screen. --version Show version. "; #[derive(Debug, RustcDecodable)] struct Args { arg_device: String, arg_location: String, cmd_install: bool, cmd_mount: bool, cmd_unmount: bool, cmd_chroot: bool, } #[derive(Debug)] struct Device { device_file: PathBuf, partitions: Vec<PathBuf>, } impl Device { // TODO pass errors up rather then just panicing fn new(device_file: String) -> Device { let pattern = device_file.clone() + "?[0-9]"; Device { device_file: PathBuf::from(device_file), partitions: glob(&pattern).unwrap().map(|r| r.unwrap()).collect(), } } } fn main() { let args: Args = Docopt::new(USAGE) .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); if args.cmd_install
else if args.cmd_mount { let d = Device::new(args.arg_device); println!("{:?}", d); } else if args.cmd_unmount { unimplemented!(); writeln!(&mut std::io::stderr(), "Error!").unwrap(); ::std::process::exit(1) } else if args.cmd_chroot { unimplemented!() } else { unimplemented!() } }
{ unimplemented!() }
conditional_block
JavascriptSerializer.ts
import { JavascriptFileSystem } from './JavascriptFileSystem' import { v2 as webdav } from 'webdav-server' export class JavascriptSerializer extends webdav.VirtualSerializer { uid() : string { return 'JavascriptSerializer-1.0.0'; } serialize(fs : JavascriptFileSystem, callback : webdav.ReturnCallback<any>) : void { super.serialize(fs, (e, data) => { if(e) return callback(e); data.options = fs.options; callback(null, data); }) } unserialize(serializedData : any, callback : webdav.ReturnCallback<webdav.FileSystem>) : void { super.unserialize(serializedData, (e, fs) => { if(e) return callback(e); const options = serializedData.useEval !== undefined ? { useEval: serializedData.useEval, currentWorkingDirectory: serializedData.currentWorkingDirectory } : serializedData.options; const ffs = new JavascriptFileSystem(options); for(const name in fs) ffs[name] = fs[name]; ffs.setSerializer(this); callback(null, ffs); })
} }
random_line_split
JavascriptSerializer.ts
import { JavascriptFileSystem } from './JavascriptFileSystem' import { v2 as webdav } from 'webdav-server' export class JavascriptSerializer extends webdav.VirtualSerializer { uid() : string
serialize(fs : JavascriptFileSystem, callback : webdav.ReturnCallback<any>) : void { super.serialize(fs, (e, data) => { if(e) return callback(e); data.options = fs.options; callback(null, data); }) } unserialize(serializedData : any, callback : webdav.ReturnCallback<webdav.FileSystem>) : void { super.unserialize(serializedData, (e, fs) => { if(e) return callback(e); const options = serializedData.useEval !== undefined ? { useEval: serializedData.useEval, currentWorkingDirectory: serializedData.currentWorkingDirectory } : serializedData.options; const ffs = new JavascriptFileSystem(options); for(const name in fs) ffs[name] = fs[name]; ffs.setSerializer(this); callback(null, ffs); }) } }
{ return 'JavascriptSerializer-1.0.0'; }
identifier_body
JavascriptSerializer.ts
import { JavascriptFileSystem } from './JavascriptFileSystem' import { v2 as webdav } from 'webdav-server' export class JavascriptSerializer extends webdav.VirtualSerializer {
() : string { return 'JavascriptSerializer-1.0.0'; } serialize(fs : JavascriptFileSystem, callback : webdav.ReturnCallback<any>) : void { super.serialize(fs, (e, data) => { if(e) return callback(e); data.options = fs.options; callback(null, data); }) } unserialize(serializedData : any, callback : webdav.ReturnCallback<webdav.FileSystem>) : void { super.unserialize(serializedData, (e, fs) => { if(e) return callback(e); const options = serializedData.useEval !== undefined ? { useEval: serializedData.useEval, currentWorkingDirectory: serializedData.currentWorkingDirectory } : serializedData.options; const ffs = new JavascriptFileSystem(options); for(const name in fs) ffs[name] = fs[name]; ffs.setSerializer(this); callback(null, ffs); }) } }
uid
identifier_name
microtask.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and //! microtask queues. It is up to implementations of event loops to store a queue and //! perform checkpoints at appropriate times, as well as enqueue microtasks as required. use crate::dom::bindings::callback::ExceptionHandling; use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback; use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction; use crate::dom::bindings::root::DomRoot; use crate::dom::globalscope::GlobalScope; use crate::dom::htmlimageelement::ImageElementMicrotask; use crate::dom::htmlmediaelement::MediaElementMicrotask; use crate::dom::mutationobserver::MutationObserver; use crate::script_runtime::{notify_about_rejected_promises, JSContext}; use crate::script_thread::ScriptThread; use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty}; use msg::constellation_msg::PipelineId; use std::cell::Cell; use std::mem; use std::rc::Rc; /// A collection of microtasks in FIFO order. #[derive(Default, JSTraceable, MallocSizeOf)] pub struct MicrotaskQueue { /// The list of enqueued microtasks that will be invoked at the next microtask checkpoint. microtask_queue: DomRefCell<Vec<Microtask>>, /// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint> performing_a_microtask_checkpoint: Cell<bool>, } #[derive(JSTraceable, MallocSizeOf)] pub enum Microtask { Promise(EnqueuedPromiseCallback), User(UserMicrotask), MediaElement(MediaElementMicrotask), ImageElement(ImageElementMicrotask), CustomElementReaction, NotifyMutationObservers, } pub trait MicrotaskRunnable { fn handler(&self) {} } /// A promise callback scheduled to run during the next microtask checkpoint (#4283). #[derive(JSTraceable, MallocSizeOf)] pub struct EnqueuedPromiseCallback { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<PromiseJobCallback>, pub pipeline: PipelineId, } /// A microtask that comes from a queueMicrotask() Javascript call, /// identical to EnqueuedPromiseCallback once it's on the queue #[derive(JSTraceable, MallocSizeOf)] pub struct UserMicrotask { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<VoidFunction>, pub pipeline: PipelineId, } impl MicrotaskQueue { /// Add a new microtask to this queue. It will be invoked as part of the next /// microtask checkpoint. #[allow(unsafe_code)] pub fn enqueue(&self, job: Microtask, cx: JSContext) { self.microtask_queue.borrow_mut().push(job); unsafe { JobQueueMayNotBeEmpty(*cx) }; } /// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint> /// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty. #[allow(unsafe_code)] pub fn checkpoint<F>( &self, cx: JSContext, target_provider: F, globalscopes: Vec<DomRoot<GlobalScope>>, ) where F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>,
pub fn empty(&self) -> bool { self.microtask_queue.borrow().is_empty() } }
{ if self.performing_a_microtask_checkpoint.get() { return; } // Step 1 self.performing_a_microtask_checkpoint.set(true); debug!("Now performing a microtask checkpoint"); // Steps 2 while !self.microtask_queue.borrow().is_empty() { rooted_vec!(let mut pending_queue); mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut()); for (idx, job) in pending_queue.iter().enumerate() { if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() { unsafe { JobQueueIsEmpty(*cx) }; } match *job { Microtask::Promise(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::User(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::MediaElement(ref task) => { task.handler(); }, Microtask::ImageElement(ref task) => { task.handler(); }, Microtask::CustomElementReaction => { ScriptThread::invoke_backup_element_queue(); }, Microtask::NotifyMutationObservers => { MutationObserver::notify_mutation_observers(); }, } } } // Step 3 for global in globalscopes.into_iter() { notify_about_rejected_promises(&global); } // TODO: Step 4 - Cleanup Indexed Database transactions. // Step 5 self.performing_a_microtask_checkpoint.set(false); }
identifier_body
microtask.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and //! microtask queues. It is up to implementations of event loops to store a queue and //! perform checkpoints at appropriate times, as well as enqueue microtasks as required. use crate::dom::bindings::callback::ExceptionHandling; use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback; use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction; use crate::dom::bindings::root::DomRoot; use crate::dom::globalscope::GlobalScope; use crate::dom::htmlimageelement::ImageElementMicrotask; use crate::dom::htmlmediaelement::MediaElementMicrotask; use crate::dom::mutationobserver::MutationObserver; use crate::script_runtime::{notify_about_rejected_promises, JSContext}; use crate::script_thread::ScriptThread; use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty}; use msg::constellation_msg::PipelineId; use std::cell::Cell; use std::mem; use std::rc::Rc; /// A collection of microtasks in FIFO order. #[derive(Default, JSTraceable, MallocSizeOf)] pub struct MicrotaskQueue { /// The list of enqueued microtasks that will be invoked at the next microtask checkpoint. microtask_queue: DomRefCell<Vec<Microtask>>, /// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint> performing_a_microtask_checkpoint: Cell<bool>, } #[derive(JSTraceable, MallocSizeOf)] pub enum Microtask { Promise(EnqueuedPromiseCallback), User(UserMicrotask), MediaElement(MediaElementMicrotask), ImageElement(ImageElementMicrotask), CustomElementReaction, NotifyMutationObservers, } pub trait MicrotaskRunnable { fn handler(&self) {} } /// A promise callback scheduled to run during the next microtask checkpoint (#4283). #[derive(JSTraceable, MallocSizeOf)] pub struct EnqueuedPromiseCallback { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<PromiseJobCallback>, pub pipeline: PipelineId, } /// A microtask that comes from a queueMicrotask() Javascript call, /// identical to EnqueuedPromiseCallback once it's on the queue #[derive(JSTraceable, MallocSizeOf)] pub struct UserMicrotask { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<VoidFunction>, pub pipeline: PipelineId, } impl MicrotaskQueue { /// Add a new microtask to this queue. It will be invoked as part of the next /// microtask checkpoint. #[allow(unsafe_code)] pub fn
(&self, job: Microtask, cx: JSContext) { self.microtask_queue.borrow_mut().push(job); unsafe { JobQueueMayNotBeEmpty(*cx) }; } /// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint> /// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty. #[allow(unsafe_code)] pub fn checkpoint<F>( &self, cx: JSContext, target_provider: F, globalscopes: Vec<DomRoot<GlobalScope>>, ) where F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>, { if self.performing_a_microtask_checkpoint.get() { return; } // Step 1 self.performing_a_microtask_checkpoint.set(true); debug!("Now performing a microtask checkpoint"); // Steps 2 while !self.microtask_queue.borrow().is_empty() { rooted_vec!(let mut pending_queue); mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut()); for (idx, job) in pending_queue.iter().enumerate() { if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() { unsafe { JobQueueIsEmpty(*cx) }; } match *job { Microtask::Promise(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::User(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::MediaElement(ref task) => { task.handler(); }, Microtask::ImageElement(ref task) => { task.handler(); }, Microtask::CustomElementReaction => { ScriptThread::invoke_backup_element_queue(); }, Microtask::NotifyMutationObservers => { MutationObserver::notify_mutation_observers(); }, } } } // Step 3 for global in globalscopes.into_iter() { notify_about_rejected_promises(&global); } // TODO: Step 4 - Cleanup Indexed Database transactions. // Step 5 self.performing_a_microtask_checkpoint.set(false); } pub fn empty(&self) -> bool { self.microtask_queue.borrow().is_empty() } }
enqueue
identifier_name
microtask.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and //! microtask queues. It is up to implementations of event loops to store a queue and //! perform checkpoints at appropriate times, as well as enqueue microtasks as required. use crate::dom::bindings::callback::ExceptionHandling; use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback; use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction; use crate::dom::bindings::root::DomRoot; use crate::dom::globalscope::GlobalScope; use crate::dom::htmlimageelement::ImageElementMicrotask; use crate::dom::htmlmediaelement::MediaElementMicrotask; use crate::dom::mutationobserver::MutationObserver; use crate::script_runtime::{notify_about_rejected_promises, JSContext}; use crate::script_thread::ScriptThread; use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty}; use msg::constellation_msg::PipelineId; use std::cell::Cell; use std::mem; use std::rc::Rc; /// A collection of microtasks in FIFO order. #[derive(Default, JSTraceable, MallocSizeOf)] pub struct MicrotaskQueue { /// The list of enqueued microtasks that will be invoked at the next microtask checkpoint. microtask_queue: DomRefCell<Vec<Microtask>>, /// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint> performing_a_microtask_checkpoint: Cell<bool>, } #[derive(JSTraceable, MallocSizeOf)] pub enum Microtask { Promise(EnqueuedPromiseCallback), User(UserMicrotask), MediaElement(MediaElementMicrotask), ImageElement(ImageElementMicrotask), CustomElementReaction, NotifyMutationObservers, } pub trait MicrotaskRunnable { fn handler(&self) {} } /// A promise callback scheduled to run during the next microtask checkpoint (#4283). #[derive(JSTraceable, MallocSizeOf)] pub struct EnqueuedPromiseCallback { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<PromiseJobCallback>, pub pipeline: PipelineId, } /// A microtask that comes from a queueMicrotask() Javascript call, /// identical to EnqueuedPromiseCallback once it's on the queue #[derive(JSTraceable, MallocSizeOf)] pub struct UserMicrotask { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<VoidFunction>, pub pipeline: PipelineId, } impl MicrotaskQueue { /// Add a new microtask to this queue. It will be invoked as part of the next /// microtask checkpoint. #[allow(unsafe_code)] pub fn enqueue(&self, job: Microtask, cx: JSContext) { self.microtask_queue.borrow_mut().push(job); unsafe { JobQueueMayNotBeEmpty(*cx) }; } /// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint> /// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty. #[allow(unsafe_code)] pub fn checkpoint<F>( &self, cx: JSContext, target_provider: F, globalscopes: Vec<DomRoot<GlobalScope>>, ) where F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>, { if self.performing_a_microtask_checkpoint.get() { return; } // Step 1 self.performing_a_microtask_checkpoint.set(true); debug!("Now performing a microtask checkpoint"); // Steps 2 while !self.microtask_queue.borrow().is_empty() { rooted_vec!(let mut pending_queue); mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut()); for (idx, job) in pending_queue.iter().enumerate() { if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() { unsafe { JobQueueIsEmpty(*cx) }; } match *job { Microtask::Promise(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::User(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::MediaElement(ref task) => {
}, Microtask::CustomElementReaction => { ScriptThread::invoke_backup_element_queue(); }, Microtask::NotifyMutationObservers => { MutationObserver::notify_mutation_observers(); }, } } } // Step 3 for global in globalscopes.into_iter() { notify_about_rejected_promises(&global); } // TODO: Step 4 - Cleanup Indexed Database transactions. // Step 5 self.performing_a_microtask_checkpoint.set(false); } pub fn empty(&self) -> bool { self.microtask_queue.borrow().is_empty() } }
task.handler(); }, Microtask::ImageElement(ref task) => { task.handler();
random_line_split
microtask.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Implementation of [microtasks](https://html.spec.whatwg.org/multipage/#microtask) and //! microtask queues. It is up to implementations of event loops to store a queue and //! perform checkpoints at appropriate times, as well as enqueue microtasks as required. use crate::dom::bindings::callback::ExceptionHandling; use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::PromiseBinding::PromiseJobCallback; use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction; use crate::dom::bindings::root::DomRoot; use crate::dom::globalscope::GlobalScope; use crate::dom::htmlimageelement::ImageElementMicrotask; use crate::dom::htmlmediaelement::MediaElementMicrotask; use crate::dom::mutationobserver::MutationObserver; use crate::script_runtime::{notify_about_rejected_promises, JSContext}; use crate::script_thread::ScriptThread; use js::jsapi::{JobQueueIsEmpty, JobQueueMayNotBeEmpty}; use msg::constellation_msg::PipelineId; use std::cell::Cell; use std::mem; use std::rc::Rc; /// A collection of microtasks in FIFO order. #[derive(Default, JSTraceable, MallocSizeOf)] pub struct MicrotaskQueue { /// The list of enqueued microtasks that will be invoked at the next microtask checkpoint. microtask_queue: DomRefCell<Vec<Microtask>>, /// <https://html.spec.whatwg.org/multipage/#performing-a-microtask-checkpoint> performing_a_microtask_checkpoint: Cell<bool>, } #[derive(JSTraceable, MallocSizeOf)] pub enum Microtask { Promise(EnqueuedPromiseCallback), User(UserMicrotask), MediaElement(MediaElementMicrotask), ImageElement(ImageElementMicrotask), CustomElementReaction, NotifyMutationObservers, } pub trait MicrotaskRunnable { fn handler(&self) {} } /// A promise callback scheduled to run during the next microtask checkpoint (#4283). #[derive(JSTraceable, MallocSizeOf)] pub struct EnqueuedPromiseCallback { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<PromiseJobCallback>, pub pipeline: PipelineId, } /// A microtask that comes from a queueMicrotask() Javascript call, /// identical to EnqueuedPromiseCallback once it's on the queue #[derive(JSTraceable, MallocSizeOf)] pub struct UserMicrotask { #[ignore_malloc_size_of = "Rc has unclear ownership"] pub callback: Rc<VoidFunction>, pub pipeline: PipelineId, } impl MicrotaskQueue { /// Add a new microtask to this queue. It will be invoked as part of the next /// microtask checkpoint. #[allow(unsafe_code)] pub fn enqueue(&self, job: Microtask, cx: JSContext) { self.microtask_queue.borrow_mut().push(job); unsafe { JobQueueMayNotBeEmpty(*cx) }; } /// <https://html.spec.whatwg.org/multipage/#perform-a-microtask-checkpoint> /// Perform a microtask checkpoint, executing all queued microtasks until the queue is empty. #[allow(unsafe_code)] pub fn checkpoint<F>( &self, cx: JSContext, target_provider: F, globalscopes: Vec<DomRoot<GlobalScope>>, ) where F: Fn(PipelineId) -> Option<DomRoot<GlobalScope>>, { if self.performing_a_microtask_checkpoint.get() { return; } // Step 1 self.performing_a_microtask_checkpoint.set(true); debug!("Now performing a microtask checkpoint"); // Steps 2 while !self.microtask_queue.borrow().is_empty() { rooted_vec!(let mut pending_queue); mem::swap(&mut *pending_queue, &mut *self.microtask_queue.borrow_mut()); for (idx, job) in pending_queue.iter().enumerate() { if idx == pending_queue.len() - 1 && self.microtask_queue.borrow().is_empty() { unsafe { JobQueueIsEmpty(*cx) }; } match *job { Microtask::Promise(ref job) => { if let Some(target) = target_provider(job.pipeline) { let _ = job.callback.Call_(&*target, ExceptionHandling::Report); } }, Microtask::User(ref job) => { if let Some(target) = target_provider(job.pipeline)
}, Microtask::MediaElement(ref task) => { task.handler(); }, Microtask::ImageElement(ref task) => { task.handler(); }, Microtask::CustomElementReaction => { ScriptThread::invoke_backup_element_queue(); }, Microtask::NotifyMutationObservers => { MutationObserver::notify_mutation_observers(); }, } } } // Step 3 for global in globalscopes.into_iter() { notify_about_rejected_promises(&global); } // TODO: Step 4 - Cleanup Indexed Database transactions. // Step 5 self.performing_a_microtask_checkpoint.set(false); } pub fn empty(&self) -> bool { self.microtask_queue.borrow().is_empty() } }
{ let _ = job.callback.Call_(&*target, ExceptionHandling::Report); }
conditional_block
ExportFileJob.py
# Copyright (c) 2021 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import io from typing import List, Optional, Union from UM.FileHandler.FileHandler import FileHandler from UM.FileHandler.FileWriter import FileWriter from UM.FileHandler.WriteFileJob import WriteFileJob from UM.Logger import Logger from UM.MimeTypeDatabase import MimeTypeDatabase from UM.OutputDevice import OutputDeviceError from UM.Scene.SceneNode import SceneNode class ExportFileJob(WriteFileJob):
"""Job that exports the build plate to the correct file format for the Digital Factory Library project.""" def __init__(self, file_handler: FileHandler, nodes: List[SceneNode], job_name: str, extension: str) -> None: file_types = file_handler.getSupportedFileTypesWrite() if len(file_types) == 0: Logger.log("e", "There are no file types available to write with!") raise OutputDeviceError.WriteRequestFailedError("There are no file types available to write with!") mode = None file_writer = None for file_type in file_types: if file_type["extension"] == extension: file_writer = file_handler.getWriter(file_type["id"]) mode = file_type.get("mode") super().__init__(file_writer, self.createStream(mode = mode), nodes, mode) # Determine the filename. self.setFileName("{}.{}".format(job_name, extension)) def getOutput(self) -> bytes: """Get the job result as bytes as that is what we need to upload to the Digital Factory Library.""" output = self.getStream().getvalue() if isinstance(output, str): output = output.encode("utf-8") return output def getMimeType(self) -> str: """Get the mime type of the selected export file type.""" return MimeTypeDatabase.getMimeTypeForFile(self.getFileName()).name @staticmethod def createStream(mode) -> Union[io.BytesIO, io.StringIO]: """Creates the right kind of stream based on the preferred format.""" if mode == FileWriter.OutputMode.TextMode: return io.StringIO() else: return io.BytesIO()
identifier_body
ExportFileJob.py
# Copyright (c) 2021 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import io from typing import List, Optional, Union from UM.FileHandler.FileHandler import FileHandler from UM.FileHandler.FileWriter import FileWriter from UM.FileHandler.WriteFileJob import WriteFileJob from UM.Logger import Logger from UM.MimeTypeDatabase import MimeTypeDatabase from UM.OutputDevice import OutputDeviceError from UM.Scene.SceneNode import SceneNode class ExportFileJob(WriteFileJob): """Job that exports the build plate to the correct file format for the Digital Factory Library project.""" def __init__(self, file_handler: FileHandler, nodes: List[SceneNode], job_name: str, extension: str) -> None: file_types = file_handler.getSupportedFileTypesWrite() if len(file_types) == 0:
mode = None file_writer = None for file_type in file_types: if file_type["extension"] == extension: file_writer = file_handler.getWriter(file_type["id"]) mode = file_type.get("mode") super().__init__(file_writer, self.createStream(mode = mode), nodes, mode) # Determine the filename. self.setFileName("{}.{}".format(job_name, extension)) def getOutput(self) -> bytes: """Get the job result as bytes as that is what we need to upload to the Digital Factory Library.""" output = self.getStream().getvalue() if isinstance(output, str): output = output.encode("utf-8") return output def getMimeType(self) -> str: """Get the mime type of the selected export file type.""" return MimeTypeDatabase.getMimeTypeForFile(self.getFileName()).name @staticmethod def createStream(mode) -> Union[io.BytesIO, io.StringIO]: """Creates the right kind of stream based on the preferred format.""" if mode == FileWriter.OutputMode.TextMode: return io.StringIO() else: return io.BytesIO()
Logger.log("e", "There are no file types available to write with!") raise OutputDeviceError.WriteRequestFailedError("There are no file types available to write with!")
conditional_block
ExportFileJob.py
# Copyright (c) 2021 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import io from typing import List, Optional, Union from UM.FileHandler.FileHandler import FileHandler from UM.FileHandler.FileWriter import FileWriter from UM.FileHandler.WriteFileJob import WriteFileJob from UM.Logger import Logger from UM.MimeTypeDatabase import MimeTypeDatabase from UM.OutputDevice import OutputDeviceError from UM.Scene.SceneNode import SceneNode class ExportFileJob(WriteFileJob): """Job that exports the build plate to the correct file format for the Digital Factory Library project.""" def __init__(self, file_handler: FileHandler, nodes: List[SceneNode], job_name: str, extension: str) -> None: file_types = file_handler.getSupportedFileTypesWrite() if len(file_types) == 0: Logger.log("e", "There are no file types available to write with!") raise OutputDeviceError.WriteRequestFailedError("There are no file types available to write with!") mode = None file_writer = None for file_type in file_types: if file_type["extension"] == extension: file_writer = file_handler.getWriter(file_type["id"]) mode = file_type.get("mode") super().__init__(file_writer, self.createStream(mode = mode), nodes, mode) # Determine the filename. self.setFileName("{}.{}".format(job_name, extension)) def getOutput(self) -> bytes: """Get the job result as bytes as that is what we need to upload to the Digital Factory Library.""" output = self.getStream().getvalue() if isinstance(output, str): output = output.encode("utf-8") return output def
(self) -> str: """Get the mime type of the selected export file type.""" return MimeTypeDatabase.getMimeTypeForFile(self.getFileName()).name @staticmethod def createStream(mode) -> Union[io.BytesIO, io.StringIO]: """Creates the right kind of stream based on the preferred format.""" if mode == FileWriter.OutputMode.TextMode: return io.StringIO() else: return io.BytesIO()
getMimeType
identifier_name
ExportFileJob.py
# Copyright (c) 2021 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import io from typing import List, Optional, Union from UM.FileHandler.FileHandler import FileHandler from UM.FileHandler.FileWriter import FileWriter from UM.FileHandler.WriteFileJob import WriteFileJob from UM.Logger import Logger from UM.MimeTypeDatabase import MimeTypeDatabase from UM.OutputDevice import OutputDeviceError from UM.Scene.SceneNode import SceneNode
class ExportFileJob(WriteFileJob): """Job that exports the build plate to the correct file format for the Digital Factory Library project.""" def __init__(self, file_handler: FileHandler, nodes: List[SceneNode], job_name: str, extension: str) -> None: file_types = file_handler.getSupportedFileTypesWrite() if len(file_types) == 0: Logger.log("e", "There are no file types available to write with!") raise OutputDeviceError.WriteRequestFailedError("There are no file types available to write with!") mode = None file_writer = None for file_type in file_types: if file_type["extension"] == extension: file_writer = file_handler.getWriter(file_type["id"]) mode = file_type.get("mode") super().__init__(file_writer, self.createStream(mode = mode), nodes, mode) # Determine the filename. self.setFileName("{}.{}".format(job_name, extension)) def getOutput(self) -> bytes: """Get the job result as bytes as that is what we need to upload to the Digital Factory Library.""" output = self.getStream().getvalue() if isinstance(output, str): output = output.encode("utf-8") return output def getMimeType(self) -> str: """Get the mime type of the selected export file type.""" return MimeTypeDatabase.getMimeTypeForFile(self.getFileName()).name @staticmethod def createStream(mode) -> Union[io.BytesIO, io.StringIO]: """Creates the right kind of stream based on the preferred format.""" if mode == FileWriter.OutputMode.TextMode: return io.StringIO() else: return io.BytesIO()
random_line_split
fn_to_numeric_cast_any.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::source::snippet_with_applicability; use rustc_errors::Applicability; use rustc_hir::Expr; use rustc_lint::LateContext; use rustc_middle::ty::{self, Ty}; use super::FN_TO_NUMERIC_CAST_ANY; pub(super) fn
(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { // We allow casts from any function type to any function type. match cast_to.kind() { ty::FnDef(..) | ty::FnPtr(..) => return, _ => { /* continue to checks */ }, } match cast_from.kind() { ty::FnDef(..) | ty::FnPtr(_) => { let mut applicability = Applicability::MaybeIncorrect; let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability); span_lint_and_sugg( cx, FN_TO_NUMERIC_CAST_ANY, expr.span, &format!("casting function pointer `{}` to `{}`", from_snippet, cast_to), "did you mean to invoke the function?", format!("{}() as {}", from_snippet, cast_to), applicability, ); }, _ => {}, } }
check
identifier_name
fn_to_numeric_cast_any.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::source::snippet_with_applicability;
use rustc_errors::Applicability; use rustc_hir::Expr; use rustc_lint::LateContext; use rustc_middle::ty::{self, Ty}; use super::FN_TO_NUMERIC_CAST_ANY; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { // We allow casts from any function type to any function type. match cast_to.kind() { ty::FnDef(..) | ty::FnPtr(..) => return, _ => { /* continue to checks */ }, } match cast_from.kind() { ty::FnDef(..) | ty::FnPtr(_) => { let mut applicability = Applicability::MaybeIncorrect; let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability); span_lint_and_sugg( cx, FN_TO_NUMERIC_CAST_ANY, expr.span, &format!("casting function pointer `{}` to `{}`", from_snippet, cast_to), "did you mean to invoke the function?", format!("{}() as {}", from_snippet, cast_to), applicability, ); }, _ => {}, } }
random_line_split
fn_to_numeric_cast_any.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::source::snippet_with_applicability; use rustc_errors::Applicability; use rustc_hir::Expr; use rustc_lint::LateContext; use rustc_middle::ty::{self, Ty}; use super::FN_TO_NUMERIC_CAST_ANY; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { // We allow casts from any function type to any function type. match cast_to.kind() { ty::FnDef(..) | ty::FnPtr(..) => return, _ => { /* continue to checks */ }, } match cast_from.kind() { ty::FnDef(..) | ty::FnPtr(_) => { let mut applicability = Applicability::MaybeIncorrect; let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability); span_lint_and_sugg( cx, FN_TO_NUMERIC_CAST_ANY, expr.span, &format!("casting function pointer `{}` to `{}`", from_snippet, cast_to), "did you mean to invoke the function?", format!("{}() as {}", from_snippet, cast_to), applicability, ); }, _ =>
, } }
{}
conditional_block
fn_to_numeric_cast_any.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::source::snippet_with_applicability; use rustc_errors::Applicability; use rustc_hir::Expr; use rustc_lint::LateContext; use rustc_middle::ty::{self, Ty}; use super::FN_TO_NUMERIC_CAST_ANY; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>)
{ // We allow casts from any function type to any function type. match cast_to.kind() { ty::FnDef(..) | ty::FnPtr(..) => return, _ => { /* continue to checks */ }, } match cast_from.kind() { ty::FnDef(..) | ty::FnPtr(_) => { let mut applicability = Applicability::MaybeIncorrect; let from_snippet = snippet_with_applicability(cx, cast_expr.span, "..", &mut applicability); span_lint_and_sugg( cx, FN_TO_NUMERIC_CAST_ANY, expr.span, &format!("casting function pointer `{}` to `{}`", from_snippet, cast_to), "did you mean to invoke the function?", format!("{}() as {}", from_snippet, cast_to), applicability, ); }, _ => {}, } }
identifier_body
JoinAttributes.py
# -*- coding: utf-8 -*- """ *************************************************************************** JoinAttributes.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import str __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsFeature from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTable from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class JoinAttributes(GeoAlgorithm): OUTPUT_LAYER = 'OUTPUT_LAYER' INPUT_LAYER = 'INPUT_LAYER' INPUT_LAYER_2 = 'INPUT_LAYER_2' TABLE_FIELD = 'TABLE_FIELD' TABLE_FIELD_2 = 'TABLE_FIELD_2' def defineCharacteristics(self):
self.tr('Input layer'))) self.addParameter(ParameterTable(self.INPUT_LAYER_2, self.tr('Input layer 2'), False)) self.addParameter(ParameterTableField(self.TABLE_FIELD, self.tr('Table field'), self.INPUT_LAYER)) self.addParameter(ParameterTableField(self.TABLE_FIELD_2, self.tr('Table field 2'), self.INPUT_LAYER_2)) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Joined layer'))) def processAlgorithm(self, feedback): input = self.getParameterValue(self.INPUT_LAYER) input2 = self.getParameterValue(self.INPUT_LAYER_2) output = self.getOutputFromName(self.OUTPUT_LAYER) field = self.getParameterValue(self.TABLE_FIELD) field2 = self.getParameterValue(self.TABLE_FIELD_2) layer = dataobjects.getObjectFromUri(input) joinField1Index = layer.fields().lookupField(field) layer2 = dataobjects.getObjectFromUri(input2) joinField2Index = layer2.fields().lookupField(field2) outFields = vector.combineVectorFields(layer, layer2) writer = output.getVectorWriter(outFields, layer.wkbType(), layer.crs()) # Cache attributes of Layer 2 cache = {} features = vector.features(layer2) total = 100.0 / len(features) for current, feat in enumerate(features): attrs = feat.attributes() joinValue2 = str(attrs[joinField2Index]) if joinValue2 not in cache: cache[joinValue2] = attrs feedback.setProgress(int(current * total)) # Create output vector layer with additional attribute outFeat = QgsFeature() features = vector.features(layer) total = 100.0 / len(features) for current, feat in enumerate(features): outFeat.setGeometry(feat.geometry()) attrs = feat.attributes() joinValue1 = str(attrs[joinField1Index]) attrs.extend(cache.get(joinValue1, [])) outFeat.setAttributes(attrs) writer.addFeature(outFeat) feedback.setProgress(int(current * total)) del writer
self.name, self.i18n_name = self.trAlgorithm('Join attributes table') self.group, self.i18n_group = self.trAlgorithm('Vector general tools') self.addParameter(ParameterVector(self.INPUT_LAYER,
random_line_split
JoinAttributes.py
# -*- coding: utf-8 -*- """ *************************************************************************** JoinAttributes.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import str __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsFeature from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTable from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class JoinAttributes(GeoAlgorithm): OUTPUT_LAYER = 'OUTPUT_LAYER' INPUT_LAYER = 'INPUT_LAYER' INPUT_LAYER_2 = 'INPUT_LAYER_2' TABLE_FIELD = 'TABLE_FIELD' TABLE_FIELD_2 = 'TABLE_FIELD_2' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Join attributes table') self.group, self.i18n_group = self.trAlgorithm('Vector general tools') self.addParameter(ParameterVector(self.INPUT_LAYER, self.tr('Input layer'))) self.addParameter(ParameterTable(self.INPUT_LAYER_2, self.tr('Input layer 2'), False)) self.addParameter(ParameterTableField(self.TABLE_FIELD, self.tr('Table field'), self.INPUT_LAYER)) self.addParameter(ParameterTableField(self.TABLE_FIELD_2, self.tr('Table field 2'), self.INPUT_LAYER_2)) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Joined layer'))) def processAlgorithm(self, feedback): input = self.getParameterValue(self.INPUT_LAYER) input2 = self.getParameterValue(self.INPUT_LAYER_2) output = self.getOutputFromName(self.OUTPUT_LAYER) field = self.getParameterValue(self.TABLE_FIELD) field2 = self.getParameterValue(self.TABLE_FIELD_2) layer = dataobjects.getObjectFromUri(input) joinField1Index = layer.fields().lookupField(field) layer2 = dataobjects.getObjectFromUri(input2) joinField2Index = layer2.fields().lookupField(field2) outFields = vector.combineVectorFields(layer, layer2) writer = output.getVectorWriter(outFields, layer.wkbType(), layer.crs()) # Cache attributes of Layer 2 cache = {} features = vector.features(layer2) total = 100.0 / len(features) for current, feat in enumerate(features):
# Create output vector layer with additional attribute outFeat = QgsFeature() features = vector.features(layer) total = 100.0 / len(features) for current, feat in enumerate(features): outFeat.setGeometry(feat.geometry()) attrs = feat.attributes() joinValue1 = str(attrs[joinField1Index]) attrs.extend(cache.get(joinValue1, [])) outFeat.setAttributes(attrs) writer.addFeature(outFeat) feedback.setProgress(int(current * total)) del writer
attrs = feat.attributes() joinValue2 = str(attrs[joinField2Index]) if joinValue2 not in cache: cache[joinValue2] = attrs feedback.setProgress(int(current * total))
conditional_block
JoinAttributes.py
# -*- coding: utf-8 -*- """ *************************************************************************** JoinAttributes.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import str __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsFeature from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTable from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class JoinAttributes(GeoAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER' INPUT_LAYER = 'INPUT_LAYER' INPUT_LAYER_2 = 'INPUT_LAYER_2' TABLE_FIELD = 'TABLE_FIELD' TABLE_FIELD_2 = 'TABLE_FIELD_2' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Join attributes table') self.group, self.i18n_group = self.trAlgorithm('Vector general tools') self.addParameter(ParameterVector(self.INPUT_LAYER, self.tr('Input layer'))) self.addParameter(ParameterTable(self.INPUT_LAYER_2, self.tr('Input layer 2'), False)) self.addParameter(ParameterTableField(self.TABLE_FIELD, self.tr('Table field'), self.INPUT_LAYER)) self.addParameter(ParameterTableField(self.TABLE_FIELD_2, self.tr('Table field 2'), self.INPUT_LAYER_2)) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Joined layer'))) def processAlgorithm(self, feedback): input = self.getParameterValue(self.INPUT_LAYER) input2 = self.getParameterValue(self.INPUT_LAYER_2) output = self.getOutputFromName(self.OUTPUT_LAYER) field = self.getParameterValue(self.TABLE_FIELD) field2 = self.getParameterValue(self.TABLE_FIELD_2) layer = dataobjects.getObjectFromUri(input) joinField1Index = layer.fields().lookupField(field) layer2 = dataobjects.getObjectFromUri(input2) joinField2Index = layer2.fields().lookupField(field2) outFields = vector.combineVectorFields(layer, layer2) writer = output.getVectorWriter(outFields, layer.wkbType(), layer.crs()) # Cache attributes of Layer 2 cache = {} features = vector.features(layer2) total = 100.0 / len(features) for current, feat in enumerate(features): attrs = feat.attributes() joinValue2 = str(attrs[joinField2Index]) if joinValue2 not in cache: cache[joinValue2] = attrs feedback.setProgress(int(current * total)) # Create output vector layer with additional attribute outFeat = QgsFeature() features = vector.features(layer) total = 100.0 / len(features) for current, feat in enumerate(features): outFeat.setGeometry(feat.geometry()) attrs = feat.attributes() joinValue1 = str(attrs[joinField1Index]) attrs.extend(cache.get(joinValue1, [])) outFeat.setAttributes(attrs) writer.addFeature(outFeat) feedback.setProgress(int(current * total)) del writer
identifier_body
JoinAttributes.py
# -*- coding: utf-8 -*- """ *************************************************************************** JoinAttributes.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import str __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsFeature from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTable from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class JoinAttributes(GeoAlgorithm): OUTPUT_LAYER = 'OUTPUT_LAYER' INPUT_LAYER = 'INPUT_LAYER' INPUT_LAYER_2 = 'INPUT_LAYER_2' TABLE_FIELD = 'TABLE_FIELD' TABLE_FIELD_2 = 'TABLE_FIELD_2' def
(self): self.name, self.i18n_name = self.trAlgorithm('Join attributes table') self.group, self.i18n_group = self.trAlgorithm('Vector general tools') self.addParameter(ParameterVector(self.INPUT_LAYER, self.tr('Input layer'))) self.addParameter(ParameterTable(self.INPUT_LAYER_2, self.tr('Input layer 2'), False)) self.addParameter(ParameterTableField(self.TABLE_FIELD, self.tr('Table field'), self.INPUT_LAYER)) self.addParameter(ParameterTableField(self.TABLE_FIELD_2, self.tr('Table field 2'), self.INPUT_LAYER_2)) self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Joined layer'))) def processAlgorithm(self, feedback): input = self.getParameterValue(self.INPUT_LAYER) input2 = self.getParameterValue(self.INPUT_LAYER_2) output = self.getOutputFromName(self.OUTPUT_LAYER) field = self.getParameterValue(self.TABLE_FIELD) field2 = self.getParameterValue(self.TABLE_FIELD_2) layer = dataobjects.getObjectFromUri(input) joinField1Index = layer.fields().lookupField(field) layer2 = dataobjects.getObjectFromUri(input2) joinField2Index = layer2.fields().lookupField(field2) outFields = vector.combineVectorFields(layer, layer2) writer = output.getVectorWriter(outFields, layer.wkbType(), layer.crs()) # Cache attributes of Layer 2 cache = {} features = vector.features(layer2) total = 100.0 / len(features) for current, feat in enumerate(features): attrs = feat.attributes() joinValue2 = str(attrs[joinField2Index]) if joinValue2 not in cache: cache[joinValue2] = attrs feedback.setProgress(int(current * total)) # Create output vector layer with additional attribute outFeat = QgsFeature() features = vector.features(layer) total = 100.0 / len(features) for current, feat in enumerate(features): outFeat.setGeometry(feat.geometry()) attrs = feat.attributes() joinValue1 = str(attrs[joinField1Index]) attrs.extend(cache.get(joinValue1, [])) outFeat.setAttributes(attrs) writer.addFeature(outFeat) feedback.setProgress(int(current * total)) del writer
defineCharacteristics
identifier_name
LogInPage.tsx
import * as React from 'react'; import { Provider } from 'react-redux'; // import { Office } from 'Office'; export class LogInPage extends React.Component<{}, {}> { public render(): React.ReactElement<Provider> { var style_img = { align: 'center' }; var style_button = { backgroundcolor: 'rgb(0,122,204)', // save button blue textalign: 'center', textcolor: 'rgb(255,255,255)', font: "Arial, sans-serif, 12px", align: 'center' }; var style_section = { color: 'rgb(104,33,122)', // the VS purple font: "Arial, sans-serif, 12px" }; var style_signin = { color: 'rgb(104,33,122)', // the VS purple font: "Arial, sans-serif, 12px" }; var style_text1 = { color: "rgb(30,30,30)", // black font: "Arial, sans-serif, 12px" }; var style_text2 = { color: 'rgb(157,157,157)' // dark gray // font: "Arial, sans-serif, 12px" }; var style_bottomlogo = { width:'500px', height:'120px', align: 'center' }; console.log('got to vsts'); return ( <div> <div> <img src="../Images/logo.png" alt="VSOLogo" style = {style_img}/> </div> <div> <button id="msgprops" class="ms-Button stretch"> <span class="ms-Button-label" align='center'> Create New Account </span> </button> </div> <div> <p style = {style_text1}> Already have an account? <a href="../Authenticate/authenticate.tsx" style = {style_signin}> Sign in</a> // pass in user to authenticate.tsx </p> </div>
<h1 style = {style_section}> Create work items </h1> <p style = {style_text2}> Do you have an email thread that should be turned into a work item or has your boss sent you a list of things to do? Create work items directly from your email.</p> <h2 style = {style_section}>Respond to comments</h2> <p style = {style_text2}> When you are mentioned in a comment thread, post a reply without clicking away from the email notification.</p> <h3 style = {style_section}>View details of a work item</h3> <p style = {style_text2}> If a work item number is present in the email body, details will be displayed in the adjacent task pane.</p> </div> <img src="./logo_strip.png" alt="VSOLogo" style={style_bottomlogo}/> </div> ); } }
<div>
random_line_split
fields.py
from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import Column, ForeignKey # Since SQLAlchemy 1.0.0 # from sqlalchemy.types import MatchType from .types import ( LimitedString, LimitedText, LimitedUnicode, LimitedBigInteger, LimitedInteger, LimitedSmallInteger, LimitedFloat, LimitedNumeric, LimitedUnicodeText, DateTime, Boolean, Date, Interval, LargeBinary, PickleType, Time, Choice, Dict, ChoiceArray, ) class ProcessableMixin(object): """ Mixin that allows running callables on a value that is being set on a field. """ def __init__(self, *args, **kwargs): """ Pop before/after validation processors :before_validation: Processors that are run before session.flush() :after_validation: Processors that are run after session.flush() but before session.commit() """ self.before_validation = kwargs.pop('before_validation', ()) self.after_validation = kwargs.pop('after_validation', ()) super(ProcessableMixin, self).__init__(*args, **kwargs) def apply_processors(self, instance, new_value, before=False, after=False): processors = [] if before: processors += list(self.before_validation) if after: processors += list(self.after_validation) for proc in processors: new_value = proc(instance=instance, new_value=new_value) return new_value class BaseField(Column): """ Base plain column that otherwise would be created as sqlalchemy.Column(sqlalchemy.Type()) Attributes: _sqla_type_cls: SQLAlchemy type class used to instantiate the column type. _type_unchanged_kwargs: sequence of strings that represent arguments received by `_sqla_type_cls`, the names of which have not been changed. Values of field init arguments with these names will be extracted from field init kwargs and passed to Type init as is. _column_valid_kwargs: sequence of string names of valid kwargs that a Column may receive. """ _sqla_type_cls = None _type_unchanged_kwargs = () _column_valid_kwargs = ( 'name', 'type_', 'autoincrement', 'default', 'doc', 'key', 'index', 'info', 'nullable', 'onupdate', 'primary_key', 'server_default', 'server_onupdate', 'quote', 'unique', 'system', '_proxies') def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) col_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: col_kw['name'], col_kw['type_'] = args # Column init when defining a schema else: col_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) super(BaseField, self).__init__(**col_kw) def __setattr__(self, key, value): """ Store column name on 'self.type' This allows error messages in custom types' validation be more explicit. """ if value is not None and key == 'name': self.type._column_name = value return super(BaseField, self).__setattr__(key, value) def process_type_args(self, kwargs): """ Process arguments of a sqla Type. http://docs.sqlalchemy.org/en/rel_0_9/core/type_basics.html#generic-types Process `kwargs` to extract type-specific arguments. If some arguments' names should be changed, extend this method with a manual args processor. Returns: * type_args: sequence of type-specific posional arguments * type_kw: dict of type-specific kwargs * cleaned_kw: input kwargs cleaned from type-specific args """ type_kw = dict() type_args = () cleaned_kw = kwargs.copy() for arg in self._type_unchanged_kwargs: if arg in cleaned_kw: type_kw[arg] = cleaned_kw.pop(arg) return type_args, type_kw, cleaned_kw def _drop_invalid_kwargs(self, kwargs): """ Drop keys from `kwargs` that are not present in `self._column_valid_kwargs`, thus are not valid kwargs to be passed to Column. """ return {k: v for k, v in kwargs.items() if k in self._column_valid_kwargs} def
(self, kwargs): """ Process/extract/rename Column arguments. http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#column-table-metadata-api Changed: required -> nullable help_text -> doc """ col_kw = kwargs.copy() col_kw['nullable'] = not col_kw.pop('required', False) col_kw['doc'] = col_kw.pop('help_text', None) col_kw = self._drop_invalid_kwargs(col_kw) return col_kw @property def _constructor(self): return self.__class__ class BigIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedBigInteger _type_unchanged_kwargs = ('min_value', 'max_value') class BooleanField(ProcessableMixin, BaseField): _sqla_type_cls = Boolean _type_unchanged_kwargs = ('create_constraint') def process_type_args(self, kwargs): """ Changed: constraint_name -> name """ type_args, type_kw, cleaned_kw = super( BooleanField, self).process_type_args(kwargs) type_kw.update({ 'name': cleaned_kw.pop('constraint_name', None), }) return type_args, type_kw, cleaned_kw class DateField(ProcessableMixin, BaseField): _sqla_type_cls = Date _type_unchanged_kwargs = () class DateTimeField(ProcessableMixin, BaseField): _sqla_type_cls = DateTime _type_unchanged_kwargs = ('timezone',) class ChoiceField(ProcessableMixin, BaseField): _sqla_type_cls = Choice _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'choices') class FloatField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedFloat _type_unchanged_kwargs = ( 'precision', 'asdecimal', 'decimal_return_scale', 'min_value', 'max_value') class IntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedInteger _type_unchanged_kwargs = ('min_value', 'max_value') class IdField(IntegerField): """ Just a subclass of IntegerField that must be used for fields that represent database-specific 'id' field. """ pass class IntervalField(ProcessableMixin, BaseField): _sqla_type_cls = Interval _type_unchanged_kwargs = ( 'native', 'second_precision', 'day_precision') class BinaryField(ProcessableMixin, BaseField): _sqla_type_cls = LargeBinary _type_unchanged_kwargs = ('length',) # Since SQLAlchemy 1.0.0 # class MatchField(BooleanField): # _sqla_type_cls = MatchType class DecimalField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedNumeric _type_unchanged_kwargs = ( 'precision', 'scale', 'decimal_return_scale', 'asdecimal', 'min_value', 'max_value') class PickleField(ProcessableMixin, BaseField): _sqla_type_cls = PickleType _type_unchanged_kwargs = ( 'protocol', 'pickler', 'comparator') class SmallIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedSmallInteger _type_unchanged_kwargs = ('min_value', 'max_value') class StringField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedString _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'min_length', 'max_length') def process_type_args(self, kwargs): """ Changed: max_length -> length """ type_args, type_kw, cleaned_kw = super( StringField, self).process_type_args(kwargs) type_kw.update({ 'length': type_kw.get('max_length'), }) return type_args, type_kw, cleaned_kw class TextField(StringField): _sqla_type_cls = LimitedText class TimeField(DateTimeField): _sqla_type_cls = Time class UnicodeField(StringField): _sqla_type_cls = LimitedUnicode class UnicodeTextField(StringField): _sqla_type_cls = LimitedUnicodeText class DictField(BaseField): _sqla_type_cls = Dict _type_unchanged_kwargs = () def process_type_args(self, kwargs): type_args, type_kw, cleaned_kw = super( DictField, self).process_type_args(kwargs) cleaned_kw['default'] = cleaned_kw.get('default') or {} return type_args, type_kw, cleaned_kw class ListField(BaseField): _sqla_type_cls = ChoiceArray _type_unchanged_kwargs = ( 'as_tuple', 'dimensions', 'zero_indexes', 'choices') def process_type_args(self, kwargs): """ Covert field class to its `_sqla_type_cls`. StringField & UnicodeField are replaced with corresponding Text fields because when String* fields are used, SQLA creates db column of postgresql type 'varying[]'. But when querying that column with text, requested text if submited as 'text[]'. Changed: item_type field class -> item_type field type """ type_args, type_kw, cleaned_kw = super( ListField, self).process_type_args(kwargs) if 'item_type' in cleaned_kw: item_type_field = cleaned_kw['item_type'] if item_type_field is StringField: item_type_field = TextField if item_type_field is UnicodeField: item_type_field = UnicodeTextField type_kw['item_type'] = item_type_field._sqla_type_cls cleaned_kw['default'] = cleaned_kw.get('default') or [] return type_args, type_kw, cleaned_kw class BaseSchemaItemField(BaseField): """ Base class for fields/columns that accept a schema item/constraint on column init. E.g. Column(Integer, ForeignKey('user.id')) It differs from regular columns in that an item/constraint passed to the Column on init has to be passed as a positional argument and should also receive arguments. Thus 3 objects need to be created on init: Column, Type, and SchemaItem/Constraint. Attributes: _schema_class: Class to be instantiated to create a schema item. _schema_kwarg_prefix: Prefix schema item's kwargs should have. This is used to avoid making a mess, as both column, type and schemaitem kwargs may be passed at once. _schema_valid_kwargs: Sequence of strings that represent names of kwargs `_schema_class` may receive. Should not include prefix. """ _schema_class = None _schema_kwarg_prefix = '' _schema_valid_kwargs = () def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out `_schema_class` kwargs and init `_schema_class`. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) if not args: schema_item, cleaned_kw = self._generate_schema_item(cleaned_kw) column_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: column_kw['name'], column_kw['type_'], schema_item = args # Column init when defining a schema else: column_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) column_args = (schema_item,) return Column.__init__(self, *column_args, **column_kw) def _generate_schema_item(self, cleaned_kw): """ Generate SchemaItem using `_schema_class` and kwargs filtered out from `cleaned_kw`. Returns created instance and cleaned kwargs. """ schema_kwargs = {} for key in self._schema_valid_kwargs: prefixed_key = self._schema_kwarg_prefix + key if prefixed_key in cleaned_kw: schema_kwargs[key] = cleaned_kw.pop(prefixed_key) schema_item = self._schema_class(**schema_kwargs) return schema_item, cleaned_kw class ForeignKeyField(BaseSchemaItemField): """ Integer ForeignKey field. This is the place where `ondelete` rules kwargs should be passed. If you switched from the mongodb engine, copy the same `ondelete` rules you passed to mongo's `Relationship` constructor. `ondelete` kwargs may be kept in both fields with no side-effects when switching between the sqla and mongo engines. Developers are not encouraged to change the value of this field on model to add/update relationship. Use `Relationship` constructor with backreference settings instead. """ _sqla_type_cls = None _type_unchanged_kwargs = () _schema_class = ForeignKey _schema_kwarg_prefix = 'ref_' _schema_valid_kwargs = ( 'column', '_constraint', 'use_alter', 'name', 'onupdate', 'ondelete', 'deferrable', 'initially', 'link_to_name', 'match') def __init__(self, *args, **kwargs): """ Override to determine `self._sqla_type_cls`. Type is determined using 'ref_column_type' value from :kwargs:. Its value must be a *Field class of a field that is being referenced by FK field or a `_sqla_type_cls` of that *Field cls. """ if not args: field_type = kwargs.pop(self._schema_kwarg_prefix + 'column_type') if hasattr(field_type, '_sqla_type_cls'): field_type = field_type._sqla_type_cls self._sqla_type_cls = field_type super(ForeignKeyField, self).__init__(*args, **kwargs) def _get_referential_action(self, kwargs, key): """ Determine/translate generic rule name to SQLA-specific rule. Output rule name is a valid SQL Referential action name. If `ondelete` kwarg is not provided, no referential action will be created. Valid kwargs for `ondelete` kwarg are: CASCADE Translates to SQL as `CASCADE` RESTRICT Translates to SQL as `RESTRICT` NULLIFY Translates to SQL as `SET NULL Not supported SQL referential actions: `NO ACTION`, `SET DEFAULT` """ key = self._schema_kwarg_prefix + key action = kwargs.pop(key, None) if action is None: return action rules = { 'CASCADE': 'CASCADE', 'RESTRICT': 'RESTRICT', 'NULLIFY': 'SET NULL', } action = action.upper() if action not in rules: raise KeyError('Invalid `{}` argument value. Must be ' 'one of: {}'.format(key, ', '.join(rules.keys()))) return rules[action] def _generate_schema_item(self, cleaned_kw): """ Override default implementation to generate 'ondelete' and 'onupdate' arguments. """ pref = self._schema_kwarg_prefix cleaned_kw[pref + 'ondelete'] = self._get_referential_action( cleaned_kw, 'ondelete') cleaned_kw[pref + 'onupdate'] = self._get_referential_action( cleaned_kw, 'onupdate') return super(ForeignKeyField, self)._generate_schema_item(cleaned_kw) relationship_kwargs = { 'secondary', 'primaryjoin', 'secondaryjoin', 'foreign_keys', 'uselist', 'order_by', 'backref', 'back_populates', 'post_update', 'cascade', 'extension', 'viewonly', 'lazy', 'collection_class', 'passive_deletes', 'passive_updates', 'remote_side', 'enable_typechecks', 'join_depth', 'comparator_factory', 'single_parent', 'innerjoin', 'distinct_target_key', 'doc', 'active_history', 'cascade_backrefs', 'load_on_pending', 'strategy_class', '_local_remote_pairs', 'query_class', 'info', 'document', 'name' } def Relationship(**kwargs): """ Thin wrapper around sqlalchemy.orm.relationship. The goal of this wrapper is to allow passing both relationship and backref arguments to a single function. Backref arguments should be prefixed with 'backref_'. This function splits relationship-specific and backref-specific arguments and makes a call like: relationship(..., ..., backref=backref(...)) :lazy: setting is set to 'immediate' on the 'One' side of One2One or One2Many relationships. This is done both for relationship itself and backref so ORM 'after_update' events are fired when relationship is updated. For backref 'uselist' is assumed to be False by default. From SQLAlchemy docs: immediate - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. """ backref_pre = 'backref_' if 'help_text' in kwargs: kwargs['doc'] = kwargs.pop('help_text', None) if (backref_pre + 'help_text') in kwargs: kwargs[backref_pre + 'doc'] = kwargs.pop( backref_pre + 'help_text', None) kwargs = {k: v for k, v in kwargs.items() if k in relationship_kwargs or k[len(backref_pre):] in relationship_kwargs} rel_kw, backref_kw = {}, {} for key, val in kwargs.items(): if key.startswith(backref_pre): key = key[len(backref_pre):] backref_kw[key] = val else: rel_kw[key] = val rel_document = rel_kw.pop('document') if 'uselist' in rel_kw and not rel_kw['uselist']: rel_kw['lazy'] = 'immediate' if backref_kw: if not backref_kw.get('uselist'): backref_kw['lazy'] = 'immediate' backref_name = backref_kw.pop('name') rel_kw['backref'] = backref(backref_name, **backref_kw) return relationship(rel_document, **rel_kw)
process_column_args
identifier_name
fields.py
from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import Column, ForeignKey # Since SQLAlchemy 1.0.0 # from sqlalchemy.types import MatchType from .types import ( LimitedString, LimitedText, LimitedUnicode, LimitedBigInteger, LimitedInteger, LimitedSmallInteger, LimitedFloat, LimitedNumeric, LimitedUnicodeText, DateTime, Boolean, Date, Interval, LargeBinary, PickleType, Time, Choice, Dict, ChoiceArray, ) class ProcessableMixin(object): """ Mixin that allows running callables on a value that is being set on a field. """ def __init__(self, *args, **kwargs): """ Pop before/after validation processors :before_validation: Processors that are run before session.flush() :after_validation: Processors that are run after session.flush() but before session.commit() """ self.before_validation = kwargs.pop('before_validation', ()) self.after_validation = kwargs.pop('after_validation', ()) super(ProcessableMixin, self).__init__(*args, **kwargs) def apply_processors(self, instance, new_value, before=False, after=False): processors = [] if before: processors += list(self.before_validation) if after: processors += list(self.after_validation) for proc in processors: new_value = proc(instance=instance, new_value=new_value) return new_value class BaseField(Column): """ Base plain column that otherwise would be created as sqlalchemy.Column(sqlalchemy.Type()) Attributes: _sqla_type_cls: SQLAlchemy type class used to instantiate the column type. _type_unchanged_kwargs: sequence of strings that represent arguments received by `_sqla_type_cls`, the names of which have not been changed. Values of field init arguments with these names will be extracted from field init kwargs and passed to Type init as is. _column_valid_kwargs: sequence of string names of valid kwargs that a Column may receive. """ _sqla_type_cls = None _type_unchanged_kwargs = () _column_valid_kwargs = ( 'name', 'type_', 'autoincrement', 'default', 'doc', 'key', 'index', 'info', 'nullable', 'onupdate', 'primary_key', 'server_default', 'server_onupdate', 'quote', 'unique', 'system', '_proxies') def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) col_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: col_kw['name'], col_kw['type_'] = args # Column init when defining a schema else: col_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) super(BaseField, self).__init__(**col_kw) def __setattr__(self, key, value): """ Store column name on 'self.type' This allows error messages in custom types' validation be more explicit. """ if value is not None and key == 'name': self.type._column_name = value return super(BaseField, self).__setattr__(key, value) def process_type_args(self, kwargs): """ Process arguments of a sqla Type. http://docs.sqlalchemy.org/en/rel_0_9/core/type_basics.html#generic-types Process `kwargs` to extract type-specific arguments. If some arguments' names should be changed, extend this method with a manual args processor. Returns: * type_args: sequence of type-specific posional arguments * type_kw: dict of type-specific kwargs * cleaned_kw: input kwargs cleaned from type-specific args """ type_kw = dict() type_args = () cleaned_kw = kwargs.copy() for arg in self._type_unchanged_kwargs: if arg in cleaned_kw: type_kw[arg] = cleaned_kw.pop(arg) return type_args, type_kw, cleaned_kw def _drop_invalid_kwargs(self, kwargs): """ Drop keys from `kwargs` that are not present in `self._column_valid_kwargs`, thus are not valid kwargs to be passed to Column. """ return {k: v for k, v in kwargs.items() if k in self._column_valid_kwargs} def process_column_args(self, kwargs): """ Process/extract/rename Column arguments. http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#column-table-metadata-api Changed: required -> nullable help_text -> doc """ col_kw = kwargs.copy() col_kw['nullable'] = not col_kw.pop('required', False) col_kw['doc'] = col_kw.pop('help_text', None) col_kw = self._drop_invalid_kwargs(col_kw) return col_kw @property def _constructor(self): return self.__class__ class BigIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedBigInteger _type_unchanged_kwargs = ('min_value', 'max_value') class BooleanField(ProcessableMixin, BaseField): _sqla_type_cls = Boolean _type_unchanged_kwargs = ('create_constraint') def process_type_args(self, kwargs): """ Changed: constraint_name -> name """ type_args, type_kw, cleaned_kw = super( BooleanField, self).process_type_args(kwargs) type_kw.update({ 'name': cleaned_kw.pop('constraint_name', None), }) return type_args, type_kw, cleaned_kw class DateField(ProcessableMixin, BaseField): _sqla_type_cls = Date _type_unchanged_kwargs = () class DateTimeField(ProcessableMixin, BaseField): _sqla_type_cls = DateTime _type_unchanged_kwargs = ('timezone',) class ChoiceField(ProcessableMixin, BaseField): _sqla_type_cls = Choice _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'choices') class FloatField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedFloat _type_unchanged_kwargs = ( 'precision', 'asdecimal', 'decimal_return_scale', 'min_value', 'max_value') class IntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedInteger _type_unchanged_kwargs = ('min_value', 'max_value') class IdField(IntegerField): """ Just a subclass of IntegerField that must be used for fields that represent database-specific 'id' field. """ pass class IntervalField(ProcessableMixin, BaseField):
class BinaryField(ProcessableMixin, BaseField): _sqla_type_cls = LargeBinary _type_unchanged_kwargs = ('length',) # Since SQLAlchemy 1.0.0 # class MatchField(BooleanField): # _sqla_type_cls = MatchType class DecimalField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedNumeric _type_unchanged_kwargs = ( 'precision', 'scale', 'decimal_return_scale', 'asdecimal', 'min_value', 'max_value') class PickleField(ProcessableMixin, BaseField): _sqla_type_cls = PickleType _type_unchanged_kwargs = ( 'protocol', 'pickler', 'comparator') class SmallIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedSmallInteger _type_unchanged_kwargs = ('min_value', 'max_value') class StringField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedString _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'min_length', 'max_length') def process_type_args(self, kwargs): """ Changed: max_length -> length """ type_args, type_kw, cleaned_kw = super( StringField, self).process_type_args(kwargs) type_kw.update({ 'length': type_kw.get('max_length'), }) return type_args, type_kw, cleaned_kw class TextField(StringField): _sqla_type_cls = LimitedText class TimeField(DateTimeField): _sqla_type_cls = Time class UnicodeField(StringField): _sqla_type_cls = LimitedUnicode class UnicodeTextField(StringField): _sqla_type_cls = LimitedUnicodeText class DictField(BaseField): _sqla_type_cls = Dict _type_unchanged_kwargs = () def process_type_args(self, kwargs): type_args, type_kw, cleaned_kw = super( DictField, self).process_type_args(kwargs) cleaned_kw['default'] = cleaned_kw.get('default') or {} return type_args, type_kw, cleaned_kw class ListField(BaseField): _sqla_type_cls = ChoiceArray _type_unchanged_kwargs = ( 'as_tuple', 'dimensions', 'zero_indexes', 'choices') def process_type_args(self, kwargs): """ Covert field class to its `_sqla_type_cls`. StringField & UnicodeField are replaced with corresponding Text fields because when String* fields are used, SQLA creates db column of postgresql type 'varying[]'. But when querying that column with text, requested text if submited as 'text[]'. Changed: item_type field class -> item_type field type """ type_args, type_kw, cleaned_kw = super( ListField, self).process_type_args(kwargs) if 'item_type' in cleaned_kw: item_type_field = cleaned_kw['item_type'] if item_type_field is StringField: item_type_field = TextField if item_type_field is UnicodeField: item_type_field = UnicodeTextField type_kw['item_type'] = item_type_field._sqla_type_cls cleaned_kw['default'] = cleaned_kw.get('default') or [] return type_args, type_kw, cleaned_kw class BaseSchemaItemField(BaseField): """ Base class for fields/columns that accept a schema item/constraint on column init. E.g. Column(Integer, ForeignKey('user.id')) It differs from regular columns in that an item/constraint passed to the Column on init has to be passed as a positional argument and should also receive arguments. Thus 3 objects need to be created on init: Column, Type, and SchemaItem/Constraint. Attributes: _schema_class: Class to be instantiated to create a schema item. _schema_kwarg_prefix: Prefix schema item's kwargs should have. This is used to avoid making a mess, as both column, type and schemaitem kwargs may be passed at once. _schema_valid_kwargs: Sequence of strings that represent names of kwargs `_schema_class` may receive. Should not include prefix. """ _schema_class = None _schema_kwarg_prefix = '' _schema_valid_kwargs = () def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out `_schema_class` kwargs and init `_schema_class`. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) if not args: schema_item, cleaned_kw = self._generate_schema_item(cleaned_kw) column_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: column_kw['name'], column_kw['type_'], schema_item = args # Column init when defining a schema else: column_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) column_args = (schema_item,) return Column.__init__(self, *column_args, **column_kw) def _generate_schema_item(self, cleaned_kw): """ Generate SchemaItem using `_schema_class` and kwargs filtered out from `cleaned_kw`. Returns created instance and cleaned kwargs. """ schema_kwargs = {} for key in self._schema_valid_kwargs: prefixed_key = self._schema_kwarg_prefix + key if prefixed_key in cleaned_kw: schema_kwargs[key] = cleaned_kw.pop(prefixed_key) schema_item = self._schema_class(**schema_kwargs) return schema_item, cleaned_kw class ForeignKeyField(BaseSchemaItemField): """ Integer ForeignKey field. This is the place where `ondelete` rules kwargs should be passed. If you switched from the mongodb engine, copy the same `ondelete` rules you passed to mongo's `Relationship` constructor. `ondelete` kwargs may be kept in both fields with no side-effects when switching between the sqla and mongo engines. Developers are not encouraged to change the value of this field on model to add/update relationship. Use `Relationship` constructor with backreference settings instead. """ _sqla_type_cls = None _type_unchanged_kwargs = () _schema_class = ForeignKey _schema_kwarg_prefix = 'ref_' _schema_valid_kwargs = ( 'column', '_constraint', 'use_alter', 'name', 'onupdate', 'ondelete', 'deferrable', 'initially', 'link_to_name', 'match') def __init__(self, *args, **kwargs): """ Override to determine `self._sqla_type_cls`. Type is determined using 'ref_column_type' value from :kwargs:. Its value must be a *Field class of a field that is being referenced by FK field or a `_sqla_type_cls` of that *Field cls. """ if not args: field_type = kwargs.pop(self._schema_kwarg_prefix + 'column_type') if hasattr(field_type, '_sqla_type_cls'): field_type = field_type._sqla_type_cls self._sqla_type_cls = field_type super(ForeignKeyField, self).__init__(*args, **kwargs) def _get_referential_action(self, kwargs, key): """ Determine/translate generic rule name to SQLA-specific rule. Output rule name is a valid SQL Referential action name. If `ondelete` kwarg is not provided, no referential action will be created. Valid kwargs for `ondelete` kwarg are: CASCADE Translates to SQL as `CASCADE` RESTRICT Translates to SQL as `RESTRICT` NULLIFY Translates to SQL as `SET NULL Not supported SQL referential actions: `NO ACTION`, `SET DEFAULT` """ key = self._schema_kwarg_prefix + key action = kwargs.pop(key, None) if action is None: return action rules = { 'CASCADE': 'CASCADE', 'RESTRICT': 'RESTRICT', 'NULLIFY': 'SET NULL', } action = action.upper() if action not in rules: raise KeyError('Invalid `{}` argument value. Must be ' 'one of: {}'.format(key, ', '.join(rules.keys()))) return rules[action] def _generate_schema_item(self, cleaned_kw): """ Override default implementation to generate 'ondelete' and 'onupdate' arguments. """ pref = self._schema_kwarg_prefix cleaned_kw[pref + 'ondelete'] = self._get_referential_action( cleaned_kw, 'ondelete') cleaned_kw[pref + 'onupdate'] = self._get_referential_action( cleaned_kw, 'onupdate') return super(ForeignKeyField, self)._generate_schema_item(cleaned_kw) relationship_kwargs = { 'secondary', 'primaryjoin', 'secondaryjoin', 'foreign_keys', 'uselist', 'order_by', 'backref', 'back_populates', 'post_update', 'cascade', 'extension', 'viewonly', 'lazy', 'collection_class', 'passive_deletes', 'passive_updates', 'remote_side', 'enable_typechecks', 'join_depth', 'comparator_factory', 'single_parent', 'innerjoin', 'distinct_target_key', 'doc', 'active_history', 'cascade_backrefs', 'load_on_pending', 'strategy_class', '_local_remote_pairs', 'query_class', 'info', 'document', 'name' } def Relationship(**kwargs): """ Thin wrapper around sqlalchemy.orm.relationship. The goal of this wrapper is to allow passing both relationship and backref arguments to a single function. Backref arguments should be prefixed with 'backref_'. This function splits relationship-specific and backref-specific arguments and makes a call like: relationship(..., ..., backref=backref(...)) :lazy: setting is set to 'immediate' on the 'One' side of One2One or One2Many relationships. This is done both for relationship itself and backref so ORM 'after_update' events are fired when relationship is updated. For backref 'uselist' is assumed to be False by default. From SQLAlchemy docs: immediate - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. """ backref_pre = 'backref_' if 'help_text' in kwargs: kwargs['doc'] = kwargs.pop('help_text', None) if (backref_pre + 'help_text') in kwargs: kwargs[backref_pre + 'doc'] = kwargs.pop( backref_pre + 'help_text', None) kwargs = {k: v for k, v in kwargs.items() if k in relationship_kwargs or k[len(backref_pre):] in relationship_kwargs} rel_kw, backref_kw = {}, {} for key, val in kwargs.items(): if key.startswith(backref_pre): key = key[len(backref_pre):] backref_kw[key] = val else: rel_kw[key] = val rel_document = rel_kw.pop('document') if 'uselist' in rel_kw and not rel_kw['uselist']: rel_kw['lazy'] = 'immediate' if backref_kw: if not backref_kw.get('uselist'): backref_kw['lazy'] = 'immediate' backref_name = backref_kw.pop('name') rel_kw['backref'] = backref(backref_name, **backref_kw) return relationship(rel_document, **rel_kw)
_sqla_type_cls = Interval _type_unchanged_kwargs = ( 'native', 'second_precision', 'day_precision')
identifier_body
fields.py
from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import Column, ForeignKey # Since SQLAlchemy 1.0.0 # from sqlalchemy.types import MatchType from .types import ( LimitedString, LimitedText, LimitedUnicode, LimitedBigInteger, LimitedInteger, LimitedSmallInteger, LimitedFloat, LimitedNumeric, LimitedUnicodeText, DateTime, Boolean, Date, Interval, LargeBinary, PickleType, Time, Choice, Dict, ChoiceArray, ) class ProcessableMixin(object): """ Mixin that allows running callables on a value that is being set on a field. """ def __init__(self, *args, **kwargs): """ Pop before/after validation processors :before_validation: Processors that are run before session.flush() :after_validation: Processors that are run after session.flush() but before session.commit() """ self.before_validation = kwargs.pop('before_validation', ()) self.after_validation = kwargs.pop('after_validation', ()) super(ProcessableMixin, self).__init__(*args, **kwargs) def apply_processors(self, instance, new_value, before=False, after=False): processors = [] if before: processors += list(self.before_validation) if after: processors += list(self.after_validation) for proc in processors: new_value = proc(instance=instance, new_value=new_value) return new_value class BaseField(Column): """ Base plain column that otherwise would be created as sqlalchemy.Column(sqlalchemy.Type()) Attributes: _sqla_type_cls: SQLAlchemy type class used to instantiate the column type. _type_unchanged_kwargs: sequence of strings that represent arguments received by `_sqla_type_cls`, the names of which have not been changed. Values of field init arguments with these names will be extracted from field init kwargs and passed to Type init as is. _column_valid_kwargs: sequence of string names of valid kwargs that a Column may receive. """ _sqla_type_cls = None _type_unchanged_kwargs = () _column_valid_kwargs = ( 'name', 'type_', 'autoincrement', 'default', 'doc', 'key', 'index', 'info', 'nullable', 'onupdate', 'primary_key', 'server_default', 'server_onupdate', 'quote', 'unique', 'system', '_proxies') def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) col_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: col_kw['name'], col_kw['type_'] = args # Column init when defining a schema else: col_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) super(BaseField, self).__init__(**col_kw) def __setattr__(self, key, value): """ Store column name on 'self.type' This allows error messages in custom types' validation be more explicit. """ if value is not None and key == 'name': self.type._column_name = value return super(BaseField, self).__setattr__(key, value) def process_type_args(self, kwargs): """ Process arguments of a sqla Type. http://docs.sqlalchemy.org/en/rel_0_9/core/type_basics.html#generic-types Process `kwargs` to extract type-specific arguments. If some arguments' names should be changed, extend this method with a manual args processor. Returns: * type_args: sequence of type-specific posional arguments * type_kw: dict of type-specific kwargs * cleaned_kw: input kwargs cleaned from type-specific args """ type_kw = dict() type_args = () cleaned_kw = kwargs.copy() for arg in self._type_unchanged_kwargs: if arg in cleaned_kw: type_kw[arg] = cleaned_kw.pop(arg) return type_args, type_kw, cleaned_kw def _drop_invalid_kwargs(self, kwargs): """ Drop keys from `kwargs` that are not present in `self._column_valid_kwargs`, thus are not valid kwargs to be passed to Column. """ return {k: v for k, v in kwargs.items() if k in self._column_valid_kwargs} def process_column_args(self, kwargs): """ Process/extract/rename Column arguments. http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#column-table-metadata-api Changed: required -> nullable help_text -> doc """ col_kw = kwargs.copy() col_kw['nullable'] = not col_kw.pop('required', False) col_kw['doc'] = col_kw.pop('help_text', None) col_kw = self._drop_invalid_kwargs(col_kw) return col_kw @property def _constructor(self): return self.__class__ class BigIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedBigInteger _type_unchanged_kwargs = ('min_value', 'max_value') class BooleanField(ProcessableMixin, BaseField): _sqla_type_cls = Boolean _type_unchanged_kwargs = ('create_constraint') def process_type_args(self, kwargs): """ Changed: constraint_name -> name """ type_args, type_kw, cleaned_kw = super( BooleanField, self).process_type_args(kwargs) type_kw.update({ 'name': cleaned_kw.pop('constraint_name', None), }) return type_args, type_kw, cleaned_kw class DateField(ProcessableMixin, BaseField): _sqla_type_cls = Date _type_unchanged_kwargs = () class DateTimeField(ProcessableMixin, BaseField): _sqla_type_cls = DateTime _type_unchanged_kwargs = ('timezone',) class ChoiceField(ProcessableMixin, BaseField): _sqla_type_cls = Choice _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'choices') class FloatField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedFloat _type_unchanged_kwargs = ( 'precision', 'asdecimal', 'decimal_return_scale', 'min_value', 'max_value') class IntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedInteger _type_unchanged_kwargs = ('min_value', 'max_value') class IdField(IntegerField): """ Just a subclass of IntegerField that must be used for fields that represent database-specific 'id' field. """ pass class IntervalField(ProcessableMixin, BaseField): _sqla_type_cls = Interval _type_unchanged_kwargs = ( 'native', 'second_precision', 'day_precision') class BinaryField(ProcessableMixin, BaseField): _sqla_type_cls = LargeBinary _type_unchanged_kwargs = ('length',) # Since SQLAlchemy 1.0.0 # class MatchField(BooleanField): # _sqla_type_cls = MatchType class DecimalField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedNumeric _type_unchanged_kwargs = ( 'precision', 'scale', 'decimal_return_scale', 'asdecimal', 'min_value', 'max_value') class PickleField(ProcessableMixin, BaseField): _sqla_type_cls = PickleType _type_unchanged_kwargs = ( 'protocol', 'pickler', 'comparator') class SmallIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedSmallInteger _type_unchanged_kwargs = ('min_value', 'max_value') class StringField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedString _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'min_length', 'max_length') def process_type_args(self, kwargs): """ Changed: max_length -> length """ type_args, type_kw, cleaned_kw = super( StringField, self).process_type_args(kwargs) type_kw.update({ 'length': type_kw.get('max_length'), }) return type_args, type_kw, cleaned_kw class TextField(StringField): _sqla_type_cls = LimitedText class TimeField(DateTimeField): _sqla_type_cls = Time class UnicodeField(StringField): _sqla_type_cls = LimitedUnicode class UnicodeTextField(StringField): _sqla_type_cls = LimitedUnicodeText class DictField(BaseField): _sqla_type_cls = Dict _type_unchanged_kwargs = () def process_type_args(self, kwargs): type_args, type_kw, cleaned_kw = super( DictField, self).process_type_args(kwargs) cleaned_kw['default'] = cleaned_kw.get('default') or {} return type_args, type_kw, cleaned_kw class ListField(BaseField): _sqla_type_cls = ChoiceArray _type_unchanged_kwargs = ( 'as_tuple', 'dimensions', 'zero_indexes', 'choices') def process_type_args(self, kwargs): """ Covert field class to its `_sqla_type_cls`. StringField & UnicodeField are replaced with corresponding Text fields because when String* fields are used, SQLA creates db column of postgresql type 'varying[]'. But when querying that column with text, requested text if submited as 'text[]'. Changed: item_type field class -> item_type field type """ type_args, type_kw, cleaned_kw = super( ListField, self).process_type_args(kwargs) if 'item_type' in cleaned_kw: item_type_field = cleaned_kw['item_type'] if item_type_field is StringField: item_type_field = TextField if item_type_field is UnicodeField: item_type_field = UnicodeTextField type_kw['item_type'] = item_type_field._sqla_type_cls cleaned_kw['default'] = cleaned_kw.get('default') or [] return type_args, type_kw, cleaned_kw class BaseSchemaItemField(BaseField): """ Base class for fields/columns that accept a schema item/constraint on column init. E.g. Column(Integer, ForeignKey('user.id')) It differs from regular columns in that an item/constraint passed to the Column on init has to be passed as a positional argument and should also receive arguments. Thus 3 objects need to be created on init: Column, Type, and SchemaItem/Constraint. Attributes: _schema_class: Class to be instantiated to create a schema item. _schema_kwarg_prefix: Prefix schema item's kwargs should have. This is used to avoid making a mess, as both column, type and schemaitem kwargs may be passed at once. _schema_valid_kwargs: Sequence of strings that represent names of kwargs `_schema_class` may receive. Should not include prefix. """ _schema_class = None _schema_kwarg_prefix = '' _schema_valid_kwargs = () def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out `_schema_class` kwargs and init `_schema_class`. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) if not args: schema_item, cleaned_kw = self._generate_schema_item(cleaned_kw) column_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: column_kw['name'], column_kw['type_'], schema_item = args # Column init when defining a schema else: column_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) column_args = (schema_item,) return Column.__init__(self, *column_args, **column_kw) def _generate_schema_item(self, cleaned_kw): """ Generate SchemaItem using `_schema_class` and kwargs filtered out from `cleaned_kw`. Returns created instance and cleaned kwargs. """ schema_kwargs = {} for key in self._schema_valid_kwargs: prefixed_key = self._schema_kwarg_prefix + key if prefixed_key in cleaned_kw: schema_kwargs[key] = cleaned_kw.pop(prefixed_key) schema_item = self._schema_class(**schema_kwargs) return schema_item, cleaned_kw class ForeignKeyField(BaseSchemaItemField): """ Integer ForeignKey field. This is the place where `ondelete` rules kwargs should be passed. If you switched from the mongodb engine, copy the same `ondelete` rules you passed to mongo's `Relationship` constructor. `ondelete` kwargs may be kept in both fields with no side-effects when switching between the sqla and mongo engines. Developers are not encouraged to change the value of this field on model to add/update relationship. Use `Relationship` constructor with backreference settings instead. """ _sqla_type_cls = None _type_unchanged_kwargs = () _schema_class = ForeignKey _schema_kwarg_prefix = 'ref_' _schema_valid_kwargs = ( 'column', '_constraint', 'use_alter', 'name', 'onupdate',
Type is determined using 'ref_column_type' value from :kwargs:. Its value must be a *Field class of a field that is being referenced by FK field or a `_sqla_type_cls` of that *Field cls. """ if not args: field_type = kwargs.pop(self._schema_kwarg_prefix + 'column_type') if hasattr(field_type, '_sqla_type_cls'): field_type = field_type._sqla_type_cls self._sqla_type_cls = field_type super(ForeignKeyField, self).__init__(*args, **kwargs) def _get_referential_action(self, kwargs, key): """ Determine/translate generic rule name to SQLA-specific rule. Output rule name is a valid SQL Referential action name. If `ondelete` kwarg is not provided, no referential action will be created. Valid kwargs for `ondelete` kwarg are: CASCADE Translates to SQL as `CASCADE` RESTRICT Translates to SQL as `RESTRICT` NULLIFY Translates to SQL as `SET NULL Not supported SQL referential actions: `NO ACTION`, `SET DEFAULT` """ key = self._schema_kwarg_prefix + key action = kwargs.pop(key, None) if action is None: return action rules = { 'CASCADE': 'CASCADE', 'RESTRICT': 'RESTRICT', 'NULLIFY': 'SET NULL', } action = action.upper() if action not in rules: raise KeyError('Invalid `{}` argument value. Must be ' 'one of: {}'.format(key, ', '.join(rules.keys()))) return rules[action] def _generate_schema_item(self, cleaned_kw): """ Override default implementation to generate 'ondelete' and 'onupdate' arguments. """ pref = self._schema_kwarg_prefix cleaned_kw[pref + 'ondelete'] = self._get_referential_action( cleaned_kw, 'ondelete') cleaned_kw[pref + 'onupdate'] = self._get_referential_action( cleaned_kw, 'onupdate') return super(ForeignKeyField, self)._generate_schema_item(cleaned_kw) relationship_kwargs = { 'secondary', 'primaryjoin', 'secondaryjoin', 'foreign_keys', 'uselist', 'order_by', 'backref', 'back_populates', 'post_update', 'cascade', 'extension', 'viewonly', 'lazy', 'collection_class', 'passive_deletes', 'passive_updates', 'remote_side', 'enable_typechecks', 'join_depth', 'comparator_factory', 'single_parent', 'innerjoin', 'distinct_target_key', 'doc', 'active_history', 'cascade_backrefs', 'load_on_pending', 'strategy_class', '_local_remote_pairs', 'query_class', 'info', 'document', 'name' } def Relationship(**kwargs): """ Thin wrapper around sqlalchemy.orm.relationship. The goal of this wrapper is to allow passing both relationship and backref arguments to a single function. Backref arguments should be prefixed with 'backref_'. This function splits relationship-specific and backref-specific arguments and makes a call like: relationship(..., ..., backref=backref(...)) :lazy: setting is set to 'immediate' on the 'One' side of One2One or One2Many relationships. This is done both for relationship itself and backref so ORM 'after_update' events are fired when relationship is updated. For backref 'uselist' is assumed to be False by default. From SQLAlchemy docs: immediate - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. """ backref_pre = 'backref_' if 'help_text' in kwargs: kwargs['doc'] = kwargs.pop('help_text', None) if (backref_pre + 'help_text') in kwargs: kwargs[backref_pre + 'doc'] = kwargs.pop( backref_pre + 'help_text', None) kwargs = {k: v for k, v in kwargs.items() if k in relationship_kwargs or k[len(backref_pre):] in relationship_kwargs} rel_kw, backref_kw = {}, {} for key, val in kwargs.items(): if key.startswith(backref_pre): key = key[len(backref_pre):] backref_kw[key] = val else: rel_kw[key] = val rel_document = rel_kw.pop('document') if 'uselist' in rel_kw and not rel_kw['uselist']: rel_kw['lazy'] = 'immediate' if backref_kw: if not backref_kw.get('uselist'): backref_kw['lazy'] = 'immediate' backref_name = backref_kw.pop('name') rel_kw['backref'] = backref(backref_name, **backref_kw) return relationship(rel_document, **rel_kw)
'ondelete', 'deferrable', 'initially', 'link_to_name', 'match') def __init__(self, *args, **kwargs): """ Override to determine `self._sqla_type_cls`.
random_line_split
fields.py
from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import Column, ForeignKey # Since SQLAlchemy 1.0.0 # from sqlalchemy.types import MatchType from .types import ( LimitedString, LimitedText, LimitedUnicode, LimitedBigInteger, LimitedInteger, LimitedSmallInteger, LimitedFloat, LimitedNumeric, LimitedUnicodeText, DateTime, Boolean, Date, Interval, LargeBinary, PickleType, Time, Choice, Dict, ChoiceArray, ) class ProcessableMixin(object): """ Mixin that allows running callables on a value that is being set on a field. """ def __init__(self, *args, **kwargs): """ Pop before/after validation processors :before_validation: Processors that are run before session.flush() :after_validation: Processors that are run after session.flush() but before session.commit() """ self.before_validation = kwargs.pop('before_validation', ()) self.after_validation = kwargs.pop('after_validation', ()) super(ProcessableMixin, self).__init__(*args, **kwargs) def apply_processors(self, instance, new_value, before=False, after=False): processors = [] if before: processors += list(self.before_validation) if after: processors += list(self.after_validation) for proc in processors: new_value = proc(instance=instance, new_value=new_value) return new_value class BaseField(Column): """ Base plain column that otherwise would be created as sqlalchemy.Column(sqlalchemy.Type()) Attributes: _sqla_type_cls: SQLAlchemy type class used to instantiate the column type. _type_unchanged_kwargs: sequence of strings that represent arguments received by `_sqla_type_cls`, the names of which have not been changed. Values of field init arguments with these names will be extracted from field init kwargs and passed to Type init as is. _column_valid_kwargs: sequence of string names of valid kwargs that a Column may receive. """ _sqla_type_cls = None _type_unchanged_kwargs = () _column_valid_kwargs = ( 'name', 'type_', 'autoincrement', 'default', 'doc', 'key', 'index', 'info', 'nullable', 'onupdate', 'primary_key', 'server_default', 'server_onupdate', 'quote', 'unique', 'system', '_proxies') def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) col_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: col_kw['name'], col_kw['type_'] = args # Column init when defining a schema else: col_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw) super(BaseField, self).__init__(**col_kw) def __setattr__(self, key, value): """ Store column name on 'self.type' This allows error messages in custom types' validation be more explicit. """ if value is not None and key == 'name': self.type._column_name = value return super(BaseField, self).__setattr__(key, value) def process_type_args(self, kwargs): """ Process arguments of a sqla Type. http://docs.sqlalchemy.org/en/rel_0_9/core/type_basics.html#generic-types Process `kwargs` to extract type-specific arguments. If some arguments' names should be changed, extend this method with a manual args processor. Returns: * type_args: sequence of type-specific posional arguments * type_kw: dict of type-specific kwargs * cleaned_kw: input kwargs cleaned from type-specific args """ type_kw = dict() type_args = () cleaned_kw = kwargs.copy() for arg in self._type_unchanged_kwargs: if arg in cleaned_kw: type_kw[arg] = cleaned_kw.pop(arg) return type_args, type_kw, cleaned_kw def _drop_invalid_kwargs(self, kwargs): """ Drop keys from `kwargs` that are not present in `self._column_valid_kwargs`, thus are not valid kwargs to be passed to Column. """ return {k: v for k, v in kwargs.items() if k in self._column_valid_kwargs} def process_column_args(self, kwargs): """ Process/extract/rename Column arguments. http://docs.sqlalchemy.org/en/rel_0_9/core/metadata.html#column-table-metadata-api Changed: required -> nullable help_text -> doc """ col_kw = kwargs.copy() col_kw['nullable'] = not col_kw.pop('required', False) col_kw['doc'] = col_kw.pop('help_text', None) col_kw = self._drop_invalid_kwargs(col_kw) return col_kw @property def _constructor(self): return self.__class__ class BigIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedBigInteger _type_unchanged_kwargs = ('min_value', 'max_value') class BooleanField(ProcessableMixin, BaseField): _sqla_type_cls = Boolean _type_unchanged_kwargs = ('create_constraint') def process_type_args(self, kwargs): """ Changed: constraint_name -> name """ type_args, type_kw, cleaned_kw = super( BooleanField, self).process_type_args(kwargs) type_kw.update({ 'name': cleaned_kw.pop('constraint_name', None), }) return type_args, type_kw, cleaned_kw class DateField(ProcessableMixin, BaseField): _sqla_type_cls = Date _type_unchanged_kwargs = () class DateTimeField(ProcessableMixin, BaseField): _sqla_type_cls = DateTime _type_unchanged_kwargs = ('timezone',) class ChoiceField(ProcessableMixin, BaseField): _sqla_type_cls = Choice _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'choices') class FloatField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedFloat _type_unchanged_kwargs = ( 'precision', 'asdecimal', 'decimal_return_scale', 'min_value', 'max_value') class IntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedInteger _type_unchanged_kwargs = ('min_value', 'max_value') class IdField(IntegerField): """ Just a subclass of IntegerField that must be used for fields that represent database-specific 'id' field. """ pass class IntervalField(ProcessableMixin, BaseField): _sqla_type_cls = Interval _type_unchanged_kwargs = ( 'native', 'second_precision', 'day_precision') class BinaryField(ProcessableMixin, BaseField): _sqla_type_cls = LargeBinary _type_unchanged_kwargs = ('length',) # Since SQLAlchemy 1.0.0 # class MatchField(BooleanField): # _sqla_type_cls = MatchType class DecimalField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedNumeric _type_unchanged_kwargs = ( 'precision', 'scale', 'decimal_return_scale', 'asdecimal', 'min_value', 'max_value') class PickleField(ProcessableMixin, BaseField): _sqla_type_cls = PickleType _type_unchanged_kwargs = ( 'protocol', 'pickler', 'comparator') class SmallIntegerField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedSmallInteger _type_unchanged_kwargs = ('min_value', 'max_value') class StringField(ProcessableMixin, BaseField): _sqla_type_cls = LimitedString _type_unchanged_kwargs = ( 'collation', 'convert_unicode', 'unicode_error', '_warn_on_bytestring', 'min_length', 'max_length') def process_type_args(self, kwargs): """ Changed: max_length -> length """ type_args, type_kw, cleaned_kw = super( StringField, self).process_type_args(kwargs) type_kw.update({ 'length': type_kw.get('max_length'), }) return type_args, type_kw, cleaned_kw class TextField(StringField): _sqla_type_cls = LimitedText class TimeField(DateTimeField): _sqla_type_cls = Time class UnicodeField(StringField): _sqla_type_cls = LimitedUnicode class UnicodeTextField(StringField): _sqla_type_cls = LimitedUnicodeText class DictField(BaseField): _sqla_type_cls = Dict _type_unchanged_kwargs = () def process_type_args(self, kwargs): type_args, type_kw, cleaned_kw = super( DictField, self).process_type_args(kwargs) cleaned_kw['default'] = cleaned_kw.get('default') or {} return type_args, type_kw, cleaned_kw class ListField(BaseField): _sqla_type_cls = ChoiceArray _type_unchanged_kwargs = ( 'as_tuple', 'dimensions', 'zero_indexes', 'choices') def process_type_args(self, kwargs): """ Covert field class to its `_sqla_type_cls`. StringField & UnicodeField are replaced with corresponding Text fields because when String* fields are used, SQLA creates db column of postgresql type 'varying[]'. But when querying that column with text, requested text if submited as 'text[]'. Changed: item_type field class -> item_type field type """ type_args, type_kw, cleaned_kw = super( ListField, self).process_type_args(kwargs) if 'item_type' in cleaned_kw: item_type_field = cleaned_kw['item_type'] if item_type_field is StringField: item_type_field = TextField if item_type_field is UnicodeField: item_type_field = UnicodeTextField type_kw['item_type'] = item_type_field._sqla_type_cls cleaned_kw['default'] = cleaned_kw.get('default') or [] return type_args, type_kw, cleaned_kw class BaseSchemaItemField(BaseField): """ Base class for fields/columns that accept a schema item/constraint on column init. E.g. Column(Integer, ForeignKey('user.id')) It differs from regular columns in that an item/constraint passed to the Column on init has to be passed as a positional argument and should also receive arguments. Thus 3 objects need to be created on init: Column, Type, and SchemaItem/Constraint. Attributes: _schema_class: Class to be instantiated to create a schema item. _schema_kwarg_prefix: Prefix schema item's kwargs should have. This is used to avoid making a mess, as both column, type and schemaitem kwargs may be passed at once. _schema_valid_kwargs: Sequence of strings that represent names of kwargs `_schema_class` may receive. Should not include prefix. """ _schema_class = None _schema_kwarg_prefix = '' _schema_valid_kwargs = () def __init__(self, *args, **kwargs): """ Responsible for: * Filter out type-specific kwargs and init Type using these. * Filter out `_schema_class` kwargs and init `_schema_class`. * Filter out column-slecific kwargs and init column using them. * If `args` are provided, that means column proxy is being created. In this case Type does not need to be created. """ type_args, type_kw, cleaned_kw = self.process_type_args(kwargs) if not args: schema_item, cleaned_kw = self._generate_schema_item(cleaned_kw) column_kw = self.process_column_args(cleaned_kw) # Column proxy is created by declarative extension if args: column_kw['name'], column_kw['type_'], schema_item = args # Column init when defining a schema else:
column_args = (schema_item,) return Column.__init__(self, *column_args, **column_kw) def _generate_schema_item(self, cleaned_kw): """ Generate SchemaItem using `_schema_class` and kwargs filtered out from `cleaned_kw`. Returns created instance and cleaned kwargs. """ schema_kwargs = {} for key in self._schema_valid_kwargs: prefixed_key = self._schema_kwarg_prefix + key if prefixed_key in cleaned_kw: schema_kwargs[key] = cleaned_kw.pop(prefixed_key) schema_item = self._schema_class(**schema_kwargs) return schema_item, cleaned_kw class ForeignKeyField(BaseSchemaItemField): """ Integer ForeignKey field. This is the place where `ondelete` rules kwargs should be passed. If you switched from the mongodb engine, copy the same `ondelete` rules you passed to mongo's `Relationship` constructor. `ondelete` kwargs may be kept in both fields with no side-effects when switching between the sqla and mongo engines. Developers are not encouraged to change the value of this field on model to add/update relationship. Use `Relationship` constructor with backreference settings instead. """ _sqla_type_cls = None _type_unchanged_kwargs = () _schema_class = ForeignKey _schema_kwarg_prefix = 'ref_' _schema_valid_kwargs = ( 'column', '_constraint', 'use_alter', 'name', 'onupdate', 'ondelete', 'deferrable', 'initially', 'link_to_name', 'match') def __init__(self, *args, **kwargs): """ Override to determine `self._sqla_type_cls`. Type is determined using 'ref_column_type' value from :kwargs:. Its value must be a *Field class of a field that is being referenced by FK field or a `_sqla_type_cls` of that *Field cls. """ if not args: field_type = kwargs.pop(self._schema_kwarg_prefix + 'column_type') if hasattr(field_type, '_sqla_type_cls'): field_type = field_type._sqla_type_cls self._sqla_type_cls = field_type super(ForeignKeyField, self).__init__(*args, **kwargs) def _get_referential_action(self, kwargs, key): """ Determine/translate generic rule name to SQLA-specific rule. Output rule name is a valid SQL Referential action name. If `ondelete` kwarg is not provided, no referential action will be created. Valid kwargs for `ondelete` kwarg are: CASCADE Translates to SQL as `CASCADE` RESTRICT Translates to SQL as `RESTRICT` NULLIFY Translates to SQL as `SET NULL Not supported SQL referential actions: `NO ACTION`, `SET DEFAULT` """ key = self._schema_kwarg_prefix + key action = kwargs.pop(key, None) if action is None: return action rules = { 'CASCADE': 'CASCADE', 'RESTRICT': 'RESTRICT', 'NULLIFY': 'SET NULL', } action = action.upper() if action not in rules: raise KeyError('Invalid `{}` argument value. Must be ' 'one of: {}'.format(key, ', '.join(rules.keys()))) return rules[action] def _generate_schema_item(self, cleaned_kw): """ Override default implementation to generate 'ondelete' and 'onupdate' arguments. """ pref = self._schema_kwarg_prefix cleaned_kw[pref + 'ondelete'] = self._get_referential_action( cleaned_kw, 'ondelete') cleaned_kw[pref + 'onupdate'] = self._get_referential_action( cleaned_kw, 'onupdate') return super(ForeignKeyField, self)._generate_schema_item(cleaned_kw) relationship_kwargs = { 'secondary', 'primaryjoin', 'secondaryjoin', 'foreign_keys', 'uselist', 'order_by', 'backref', 'back_populates', 'post_update', 'cascade', 'extension', 'viewonly', 'lazy', 'collection_class', 'passive_deletes', 'passive_updates', 'remote_side', 'enable_typechecks', 'join_depth', 'comparator_factory', 'single_parent', 'innerjoin', 'distinct_target_key', 'doc', 'active_history', 'cascade_backrefs', 'load_on_pending', 'strategy_class', '_local_remote_pairs', 'query_class', 'info', 'document', 'name' } def Relationship(**kwargs): """ Thin wrapper around sqlalchemy.orm.relationship. The goal of this wrapper is to allow passing both relationship and backref arguments to a single function. Backref arguments should be prefixed with 'backref_'. This function splits relationship-specific and backref-specific arguments and makes a call like: relationship(..., ..., backref=backref(...)) :lazy: setting is set to 'immediate' on the 'One' side of One2One or One2Many relationships. This is done both for relationship itself and backref so ORM 'after_update' events are fired when relationship is updated. For backref 'uselist' is assumed to be False by default. From SQLAlchemy docs: immediate - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. """ backref_pre = 'backref_' if 'help_text' in kwargs: kwargs['doc'] = kwargs.pop('help_text', None) if (backref_pre + 'help_text') in kwargs: kwargs[backref_pre + 'doc'] = kwargs.pop( backref_pre + 'help_text', None) kwargs = {k: v for k, v in kwargs.items() if k in relationship_kwargs or k[len(backref_pre):] in relationship_kwargs} rel_kw, backref_kw = {}, {} for key, val in kwargs.items(): if key.startswith(backref_pre): key = key[len(backref_pre):] backref_kw[key] = val else: rel_kw[key] = val rel_document = rel_kw.pop('document') if 'uselist' in rel_kw and not rel_kw['uselist']: rel_kw['lazy'] = 'immediate' if backref_kw: if not backref_kw.get('uselist'): backref_kw['lazy'] = 'immediate' backref_name = backref_kw.pop('name') rel_kw['backref'] = backref(backref_name, **backref_kw) return relationship(rel_document, **rel_kw)
column_kw['type_'] = self._sqla_type_cls(*type_args, **type_kw)
conditional_block
bootstrap.js
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ // Check for fx presence. if (typeof javafx.application.Application != "function") { print("JavaFX is not available."); exit(1); } // Extend the javafx.application.Application class overriding init, start and stop. com.sun.javafx.application.LauncherImpl.launchApplication((Java.extend(javafx.application.Application, { // Overridden javafx.application.Application.init(); init: function() { // Java FX packages and classes must be defined here because // they may not be viable until launch time due to clinit ordering. }, // Overridden javafx.application.Application.start(Stage stage); start: function(stage) { // Set up stage global. $STAGE = stage; // Load user FX scripts. for each (var script in $SCRIPTS) { load(script); } // Call the global init function if present. if ($GLOBAL.init) { init(); } // Call the global start function if present. Otherwise show the stage. if ($GLOBAL.start) { start(stage);
} }, // Overridden javafx.application.Application.stop(); stop: function() { // Call the global stop function if present. if ($GLOBAL.stop) { stop(); } } // No arguments passed to application (handled thru $ARG.) })).class, new (Java.type("java.lang.String[]"))(0));
} else { stage.show();
random_line_split
bootstrap.js
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ // Check for fx presence. if (typeof javafx.application.Application != "function")
// Extend the javafx.application.Application class overriding init, start and stop. com.sun.javafx.application.LauncherImpl.launchApplication((Java.extend(javafx.application.Application, { // Overridden javafx.application.Application.init(); init: function() { // Java FX packages and classes must be defined here because // they may not be viable until launch time due to clinit ordering. }, // Overridden javafx.application.Application.start(Stage stage); start: function(stage) { // Set up stage global. $STAGE = stage; // Load user FX scripts. for each (var script in $SCRIPTS) { load(script); } // Call the global init function if present. if ($GLOBAL.init) { init(); } // Call the global start function if present. Otherwise show the stage. if ($GLOBAL.start) { start(stage); } else { stage.show(); } }, // Overridden javafx.application.Application.stop(); stop: function() { // Call the global stop function if present. if ($GLOBAL.stop) { stop(); } } // No arguments passed to application (handled thru $ARG.) })).class, new (Java.type("java.lang.String[]"))(0));
{ print("JavaFX is not available."); exit(1); }
conditional_block
sample_file_parser.py
import json import logging from os import listdir, path from typing import Dict, List from tqdm import tqdm TELEM_DIR_PATH = "../envs/monkey_zoo/blackbox/tests/performance/telemetry_sample" MAX_SAME_TYPE_TELEM_FILES = 10000 LOGGER = logging.getLogger(__name__) class SampleFileParser: @staticmethod def save_teletries_to_files(telems: List[Dict]): for telem in tqdm(telems, desc="Telemetries saved to files", position=3): SampleFileParser.save_telemetry_to_file(telem) @staticmethod def save_telemetry_to_file(telem: Dict): telem_filename = telem["name"] + telem["method"] for i in range(MAX_SAME_TYPE_TELEM_FILES): if not path.exists(path.join(TELEM_DIR_PATH, (str(i) + telem_filename))):
with open(path.join(TELEM_DIR_PATH, telem_filename), "w") as file: file.write(json.dumps(telem)) @staticmethod def read_telem_files() -> List[str]: telems = [] try: file_paths = [ path.join(TELEM_DIR_PATH, f) for f in listdir(TELEM_DIR_PATH) if path.isfile(path.join(TELEM_DIR_PATH, f)) ] except FileNotFoundError: raise FileNotFoundError( "Telemetries to send not found. " "Refer to readme to figure out how to generate telemetries and where to put them." ) for file_path in file_paths: with open(file_path, "r") as telem_file: telem_string = "".join(telem_file.readlines()).replace("\n", "") telems.append(telem_string) return telems @staticmethod def get_all_telemetries() -> List[Dict]: return [json.loads(t) for t in SampleFileParser.read_telem_files()]
telem_filename = str(i) + telem_filename break
conditional_block
sample_file_parser.py
import json import logging from os import listdir, path from typing import Dict, List from tqdm import tqdm TELEM_DIR_PATH = "../envs/monkey_zoo/blackbox/tests/performance/telemetry_sample" MAX_SAME_TYPE_TELEM_FILES = 10000 LOGGER = logging.getLogger(__name__) class SampleFileParser: @staticmethod def save_teletries_to_files(telems: List[Dict]): for telem in tqdm(telems, desc="Telemetries saved to files", position=3): SampleFileParser.save_telemetry_to_file(telem) @staticmethod def save_telemetry_to_file(telem: Dict): telem_filename = telem["name"] + telem["method"] for i in range(MAX_SAME_TYPE_TELEM_FILES): if not path.exists(path.join(TELEM_DIR_PATH, (str(i) + telem_filename))): telem_filename = str(i) + telem_filename break with open(path.join(TELEM_DIR_PATH, telem_filename), "w") as file: file.write(json.dumps(telem)) @staticmethod def read_telem_files() -> List[str]: telems = [] try: file_paths = [ path.join(TELEM_DIR_PATH, f)
for f in listdir(TELEM_DIR_PATH) if path.isfile(path.join(TELEM_DIR_PATH, f)) ] except FileNotFoundError: raise FileNotFoundError( "Telemetries to send not found. " "Refer to readme to figure out how to generate telemetries and where to put them." ) for file_path in file_paths: with open(file_path, "r") as telem_file: telem_string = "".join(telem_file.readlines()).replace("\n", "") telems.append(telem_string) return telems @staticmethod def get_all_telemetries() -> List[Dict]: return [json.loads(t) for t in SampleFileParser.read_telem_files()]
random_line_split