file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
migrations.js | // File describing all migrations and their upward/downward changes
// for API Usage Information see https://github.com/percolatestudio/meteor-migrations
import {Meteor} from 'meteor/meteor';
import {ImageResources} from '../imports/modules/constants';
/* eslint-disable lodash/prefer-lodash-method */
if(Meteor.isServer) |
/* eslint-enable */
| {
Migrations.config({
log: true,
logIfLatest: false
});
Migrations.add({
version: 1,
name: 'Adds profileImage field to every user if not already there. Uses the id `Default/default_<gender-name>`.',
up: () => {
const users = Meteor.users.find().fetch(); //eslint-disable-line
_(users)
.filter((u) => _.isNil(u.profile.profileImage))
.forEach((user) => {
const gender = user.profile.gender;
Meteor.users.update({_id: user._id}, {
$set: {
'profile.profileImage': ImageResources.profile.defaultProfileImageUrl(gender)
}
});
});
},
down: () => Meteor.users.update({}, {$unset: {'profile.profileImage': ""}}, {validate: false, multi: true})
});
Migrations.add({
version: 2,
name: 'Move api details from root user object to a stressApi sub-document',
up: () => {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.stressApi = {
apiKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
};
delete user.apiAuthKey;
delete user.apiAuthType;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
apiAuthKey: "",
apiAuthType: ""
},
$set: {
"stressApi.apiKey": user.stressApi.apiKey,
"stressApi.apiAuthType": user.stressApi.apiKey
}
}, {validate: false, multi: true}));
},
down() {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.apiAuthKey = user.stressApi.apiKey;
user.apiAuthType = user.stressApi.apiAuthType;
delete user.stressApi;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
stressApi: ""
},
$set: {
apiAuthKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
}
}, {validate: false, multi: true}));
}
});
} | conditional_block |
migrations.js | // File describing all migrations and their upward/downward changes
// for API Usage Information see https://github.com/percolatestudio/meteor-migrations
import {Meteor} from 'meteor/meteor';
import {ImageResources} from '../imports/modules/constants';
/* eslint-disable lodash/prefer-lodash-method */
if(Meteor.isServer) {
Migrations.config({
log: true,
logIfLatest: false
});
Migrations.add({
version: 1,
name: 'Adds profileImage field to every user if not already there. Uses the id `Default/default_<gender-name>`.',
up: () => {
const users = Meteor.users.find().fetch(); //eslint-disable-line
_(users)
.filter((u) => _.isNil(u.profile.profileImage))
.forEach((user) => {
const gender = user.profile.gender;
Meteor.users.update({_id: user._id}, {
$set: {
'profile.profileImage': ImageResources.profile.defaultProfileImageUrl(gender)
}
});
});
},
down: () => Meteor.users.update({}, {$unset: {'profile.profileImage': ""}}, {validate: false, multi: true})
});
Migrations.add({
version: 2,
name: 'Move api details from root user object to a stressApi sub-document',
up: () => {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.stressApi = {
apiKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
};
delete user.apiAuthKey;
delete user.apiAuthType;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
apiAuthKey: "",
apiAuthType: ""
},
$set: {
"stressApi.apiKey": user.stressApi.apiKey,
"stressApi.apiAuthType": user.stressApi.apiKey
}
}, {validate: false, multi: true})); | _(users)
.thru((user) => {
user.apiAuthKey = user.stressApi.apiKey;
user.apiAuthType = user.stressApi.apiAuthType;
delete user.stressApi;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
stressApi: ""
},
$set: {
apiAuthKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
}
}, {validate: false, multi: true}));
}
});
}
/* eslint-enable */ | },
down() {
const users = Meteor.users.find().fetch();
| random_line_split |
migrations.js | // File describing all migrations and their upward/downward changes
// for API Usage Information see https://github.com/percolatestudio/meteor-migrations
import {Meteor} from 'meteor/meteor';
import {ImageResources} from '../imports/modules/constants';
/* eslint-disable lodash/prefer-lodash-method */
if(Meteor.isServer) {
Migrations.config({
log: true,
logIfLatest: false
});
Migrations.add({
version: 1,
name: 'Adds profileImage field to every user if not already there. Uses the id `Default/default_<gender-name>`.',
up: () => {
const users = Meteor.users.find().fetch(); //eslint-disable-line
_(users)
.filter((u) => _.isNil(u.profile.profileImage))
.forEach((user) => {
const gender = user.profile.gender;
Meteor.users.update({_id: user._id}, {
$set: {
'profile.profileImage': ImageResources.profile.defaultProfileImageUrl(gender)
}
});
});
},
down: () => Meteor.users.update({}, {$unset: {'profile.profileImage': ""}}, {validate: false, multi: true})
});
Migrations.add({
version: 2,
name: 'Move api details from root user object to a stressApi sub-document',
up: () => {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.stressApi = {
apiKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
};
delete user.apiAuthKey;
delete user.apiAuthType;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
apiAuthKey: "",
apiAuthType: ""
},
$set: {
"stressApi.apiKey": user.stressApi.apiKey,
"stressApi.apiAuthType": user.stressApi.apiKey
}
}, {validate: false, multi: true}));
},
| () {
const users = Meteor.users.find().fetch();
_(users)
.thru((user) => {
user.apiAuthKey = user.stressApi.apiKey;
user.apiAuthType = user.stressApi.apiAuthType;
delete user.stressApi;
return user;
}).forEach((user) => Meteor.users.update({_id: user._id}, {
$unset: {
stressApi: ""
},
$set: {
apiAuthKey: user.apiAuthKey,
apiAuthType: user.apiAuthType
}
}, {validate: false, multi: true}));
}
});
}
/* eslint-enable */
| down | identifier_name |
digest.py | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of RFC2617: HTTP Digest Authentication
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from zope.interface import implements
from twisted.cred import credentials
from twisted.web.iweb import ICredentialFactory
class DigestCredentialFactory(object):
"""
Wrapper for L{digest.DigestCredentialFactory} that implements the
L{ICredentialFactory} interface.
"""
implements(ICredentialFactory)
scheme = 'digest'
def __init__(self, algorithm, authenticationRealm):
"""
Create the digest credential factory that this object wraps.
"""
self.digest = credentials.DigestCredentialFactory(algorithm,
authenticationRealm)
def getChallenge(self, request):
"""
Generate the challenge for use in the WWW-Authenticate header
| response to which this challenge is being generated.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
return self.digest.getChallenge(request.getClientIP())
def decode(self, response, request):
"""
Create a L{twisted.cred.digest.DigestedCredentials} object from the
given response and request.
@see: L{ICredentialFactory.decode}
"""
return self.digest.decode(response,
request.method,
request.getClientIP()) | @param request: The L{IRequest} to with access was denied and for the | random_line_split |
digest.py | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of RFC2617: HTTP Digest Authentication
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from zope.interface import implements
from twisted.cred import credentials
from twisted.web.iweb import ICredentialFactory
class | (object):
"""
Wrapper for L{digest.DigestCredentialFactory} that implements the
L{ICredentialFactory} interface.
"""
implements(ICredentialFactory)
scheme = 'digest'
def __init__(self, algorithm, authenticationRealm):
"""
Create the digest credential factory that this object wraps.
"""
self.digest = credentials.DigestCredentialFactory(algorithm,
authenticationRealm)
def getChallenge(self, request):
"""
Generate the challenge for use in the WWW-Authenticate header
@param request: The L{IRequest} to with access was denied and for the
response to which this challenge is being generated.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
return self.digest.getChallenge(request.getClientIP())
def decode(self, response, request):
"""
Create a L{twisted.cred.digest.DigestedCredentials} object from the
given response and request.
@see: L{ICredentialFactory.decode}
"""
return self.digest.decode(response,
request.method,
request.getClientIP())
| DigestCredentialFactory | identifier_name |
digest.py | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of RFC2617: HTTP Digest Authentication
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from zope.interface import implements
from twisted.cred import credentials
from twisted.web.iweb import ICredentialFactory
class DigestCredentialFactory(object):
"""
Wrapper for L{digest.DigestCredentialFactory} that implements the
L{ICredentialFactory} interface.
"""
implements(ICredentialFactory)
scheme = 'digest'
def __init__(self, algorithm, authenticationRealm):
|
def getChallenge(self, request):
"""
Generate the challenge for use in the WWW-Authenticate header
@param request: The L{IRequest} to with access was denied and for the
response to which this challenge is being generated.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
return self.digest.getChallenge(request.getClientIP())
def decode(self, response, request):
"""
Create a L{twisted.cred.digest.DigestedCredentials} object from the
given response and request.
@see: L{ICredentialFactory.decode}
"""
return self.digest.decode(response,
request.method,
request.getClientIP())
| """
Create the digest credential factory that this object wraps.
"""
self.digest = credentials.DigestCredentialFactory(algorithm,
authenticationRealm) | identifier_body |
postgres.py | # -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
|
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close()
| log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start) | conditional_block |
postgres.py | # -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details. | import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close() | """
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division | random_line_split |
postgres.py | # -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
| """Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close() | identifier_body |
|
postgres.py | # -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def | (conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close()
| profile | identifier_name |
patch-buildtools_wafsamba_samba__conftests.py | $NetBSD: patch-buildtools_wafsamba_samba__conftests.py,v 1.2 2019/11/10 17:01:58 adam Exp $
|
--- buildtools/wafsamba/samba_conftests.py.orig 2019-07-09 10:08:41.000000000 +0000
+++ buildtools/wafsamba/samba_conftests.py
@@ -97,9 +97,9 @@ def CHECK_LARGEFILE(conf, define='HAVE_L
if flag[:2] == "-D":
flag_split = flag[2:].split('=')
if len(flag_split) == 1:
- conf.DEFINE(flag_split[0], '1')
+ conf.DEFINE(str(flag_split[0]), '1')
else:
- conf.DEFINE(flag_split[0], flag_split[1])
+ conf.DEFINE(str(flag_split[0]), str(flag_split[1]))
if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1',
define, | Ensure defines are strings to avoid assertion failure, some
returned values are unicode. | random_line_split |
serializers.py | from rest_framework import serializers
from .models import User, Activity, Period
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
extra_kwargs = {
'url': {'view_name': 'timeperiod:user-detail'},
}
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Activity
fields = ('url', 'user', 'name', 'total', 'running')
extra_kwargs = {
'url': {'view_name': 'timeperiod:activity-detail'},
'user': {'view_name': 'timeperiod:user-detail'},
}
class PeriodSerializer(serializers.HyperlinkedModelSerializer):
| class Meta:
model = Period
fields = ('url', 'activity', 'start', 'end', 'valid')
extra_kwargs = {
'url': {'view_name': 'timeperiod:period-detail'},
'activity': {'view_name': 'timeperiod:activity-detail'},
} | identifier_body |
|
serializers.py | from rest_framework import serializers
from .models import User, Activity, Period
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
extra_kwargs = {
'url': {'view_name': 'timeperiod:user-detail'}, | class ActivitySerializer(serializers.HyperlinkedModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Activity
fields = ('url', 'user', 'name', 'total', 'running')
extra_kwargs = {
'url': {'view_name': 'timeperiod:activity-detail'},
'user': {'view_name': 'timeperiod:user-detail'},
}
class PeriodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Period
fields = ('url', 'activity', 'start', 'end', 'valid')
extra_kwargs = {
'url': {'view_name': 'timeperiod:period-detail'},
'activity': {'view_name': 'timeperiod:activity-detail'},
} | }
| random_line_split |
serializers.py | from rest_framework import serializers
from .models import User, Activity, Period
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
extra_kwargs = {
'url': {'view_name': 'timeperiod:user-detail'},
}
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class | :
model = Activity
fields = ('url', 'user', 'name', 'total', 'running')
extra_kwargs = {
'url': {'view_name': 'timeperiod:activity-detail'},
'user': {'view_name': 'timeperiod:user-detail'},
}
class PeriodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Period
fields = ('url', 'activity', 'start', 'end', 'valid')
extra_kwargs = {
'url': {'view_name': 'timeperiod:period-detail'},
'activity': {'view_name': 'timeperiod:activity-detail'},
}
| Meta | identifier_name |
test_dhcp_for_vpcrouter_cidr.py | '''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
|
'''
to be define
'''
def env_recover():
pass
| pass | identifier_body |
test_dhcp_for_vpcrouter_cidr.py | '''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
|
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def env_recover():
pass
| test_util.test_fail("dhcp server ip create fail") | conditional_block |
test_dhcp_for_vpcrouter_cidr.py | '''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"] |
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def env_recover():
pass |
def test():
test_util.test_logger("start dhcp test for l3 public network") | random_line_split |
test_dhcp_for_vpcrouter_cidr.py | '''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def | ():
pass
| env_recover | identifier_name |
lyrics.py | #!/usr/bin/env python3
import os
from typing import Optional
import requests
from bs4 import BeautifulSoup
import pysonic.utils as utils
base_url = "https://api.genius.com"
def get_token() -> Optional[str]:
user_home_path = utils.get_home()
try:
with open(os.path.join(user_home_path, "genius_api_key"), "r") as token_file:
token = token_file.read()
except (IOError, FileNotFoundError):
token = input("Please enter your Genius API token to use this feature (or press enter to cancel): ").strip()
if not token:
return None
with open(os.path.join(user_home_path, "genius_api_key"), "w") as token_file:
token_file.write(token)
return token
def _lyrics_from_song_api_path(song_api_path):
song_url = base_url + song_api_path
response = requests.get(song_url, headers={'Authorization': f'Bearer {get_token()}'})
json = response.json()
path = json["response"]["song"]["path"]
# Regular html scraping...
page_url = "https://genius.com" + path
page = requests.get(page_url)
html = BeautifulSoup(page.text, "html.parser")
# Remove script tags that they put in the middle of the lyrics
[h.extract() for h in html('script')]
divs = html.findAll("div")
lyrics = ''
for div in divs:
classes = div.get('class')
if classes:
for one_class in classes:
if one_class.startswith('Lyrics__Container'):
lyrics += div.get_text(separator="\n")
if lyrics:
return lyrics
return 'Got a song response, but didn\'t find any lyircs.'
def get_lyrics(song_title, artist_name):
| """ Return the lyrics for a given song and artist. """
search_url = base_url + "/search"
data = {'q': song_title + " " + artist_name}
token = get_token()
if not token:
return None, None, None
response = requests.get(search_url, params=data, headers={'Authorization': f'Bearer {token}'})
json = response.json()
# The first hit is the best?
if len(json["response"]["hits"]) > 0:
song_info = json["response"]["hits"][0]
song_api_path = song_info["result"]["api_path"]
artist = song_info["result"]["primary_artist"]["name"]
title = song_info["result"]["title"]
return artist, title, _lyrics_from_song_api_path(song_api_path).strip()
return None, None, None | identifier_body |
|
lyrics.py | #!/usr/bin/env python3
import os
from typing import Optional
import requests
from bs4 import BeautifulSoup
import pysonic.utils as utils
base_url = "https://api.genius.com"
def get_token() -> Optional[str]:
user_home_path = utils.get_home()
try:
with open(os.path.join(user_home_path, "genius_api_key"), "r") as token_file:
token = token_file.read()
except (IOError, FileNotFoundError):
token = input("Please enter your Genius API token to use this feature (or press enter to cancel): ").strip()
if not token:
return None
with open(os.path.join(user_home_path, "genius_api_key"), "w") as token_file:
token_file.write(token)
return token
def | (song_api_path):
song_url = base_url + song_api_path
response = requests.get(song_url, headers={'Authorization': f'Bearer {get_token()}'})
json = response.json()
path = json["response"]["song"]["path"]
# Regular html scraping...
page_url = "https://genius.com" + path
page = requests.get(page_url)
html = BeautifulSoup(page.text, "html.parser")
# Remove script tags that they put in the middle of the lyrics
[h.extract() for h in html('script')]
divs = html.findAll("div")
lyrics = ''
for div in divs:
classes = div.get('class')
if classes:
for one_class in classes:
if one_class.startswith('Lyrics__Container'):
lyrics += div.get_text(separator="\n")
if lyrics:
return lyrics
return 'Got a song response, but didn\'t find any lyircs.'
def get_lyrics(song_title, artist_name):
""" Return the lyrics for a given song and artist. """
search_url = base_url + "/search"
data = {'q': song_title + " " + artist_name}
token = get_token()
if not token:
return None, None, None
response = requests.get(search_url, params=data, headers={'Authorization': f'Bearer {token}'})
json = response.json()
# The first hit is the best?
if len(json["response"]["hits"]) > 0:
song_info = json["response"]["hits"][0]
song_api_path = song_info["result"]["api_path"]
artist = song_info["result"]["primary_artist"]["name"]
title = song_info["result"]["title"]
return artist, title, _lyrics_from_song_api_path(song_api_path).strip()
return None, None, None
| _lyrics_from_song_api_path | identifier_name |
lyrics.py | #!/usr/bin/env python3
import os
from typing import Optional
import requests
from bs4 import BeautifulSoup
import pysonic.utils as utils
base_url = "https://api.genius.com"
def get_token() -> Optional[str]:
user_home_path = utils.get_home()
try:
with open(os.path.join(user_home_path, "genius_api_key"), "r") as token_file:
token = token_file.read()
except (IOError, FileNotFoundError):
token = input("Please enter your Genius API token to use this feature (or press enter to cancel): ").strip()
if not token:
|
with open(os.path.join(user_home_path, "genius_api_key"), "w") as token_file:
token_file.write(token)
return token
def _lyrics_from_song_api_path(song_api_path):
song_url = base_url + song_api_path
response = requests.get(song_url, headers={'Authorization': f'Bearer {get_token()}'})
json = response.json()
path = json["response"]["song"]["path"]
# Regular html scraping...
page_url = "https://genius.com" + path
page = requests.get(page_url)
html = BeautifulSoup(page.text, "html.parser")
# Remove script tags that they put in the middle of the lyrics
[h.extract() for h in html('script')]
divs = html.findAll("div")
lyrics = ''
for div in divs:
classes = div.get('class')
if classes:
for one_class in classes:
if one_class.startswith('Lyrics__Container'):
lyrics += div.get_text(separator="\n")
if lyrics:
return lyrics
return 'Got a song response, but didn\'t find any lyircs.'
def get_lyrics(song_title, artist_name):
""" Return the lyrics for a given song and artist. """
search_url = base_url + "/search"
data = {'q': song_title + " " + artist_name}
token = get_token()
if not token:
return None, None, None
response = requests.get(search_url, params=data, headers={'Authorization': f'Bearer {token}'})
json = response.json()
# The first hit is the best?
if len(json["response"]["hits"]) > 0:
song_info = json["response"]["hits"][0]
song_api_path = song_info["result"]["api_path"]
artist = song_info["result"]["primary_artist"]["name"]
title = song_info["result"]["title"]
return artist, title, _lyrics_from_song_api_path(song_api_path).strip()
return None, None, None
| return None | conditional_block |
lyrics.py | #!/usr/bin/env python3
import os
from typing import Optional
import requests
from bs4 import BeautifulSoup
import pysonic.utils as utils
base_url = "https://api.genius.com"
def get_token() -> Optional[str]:
user_home_path = utils.get_home()
try:
with open(os.path.join(user_home_path, "genius_api_key"), "r") as token_file:
token = token_file.read()
except (IOError, FileNotFoundError):
token = input("Please enter your Genius API token to use this feature (or press enter to cancel): ").strip()
if not token:
return None
with open(os.path.join(user_home_path, "genius_api_key"), "w") as token_file:
token_file.write(token)
return token
|
def _lyrics_from_song_api_path(song_api_path):
song_url = base_url + song_api_path
response = requests.get(song_url, headers={'Authorization': f'Bearer {get_token()}'})
json = response.json()
path = json["response"]["song"]["path"]
# Regular html scraping...
page_url = "https://genius.com" + path
page = requests.get(page_url)
html = BeautifulSoup(page.text, "html.parser")
# Remove script tags that they put in the middle of the lyrics
[h.extract() for h in html('script')]
divs = html.findAll("div")
lyrics = ''
for div in divs:
classes = div.get('class')
if classes:
for one_class in classes:
if one_class.startswith('Lyrics__Container'):
lyrics += div.get_text(separator="\n")
if lyrics:
return lyrics
return 'Got a song response, but didn\'t find any lyircs.'
def get_lyrics(song_title, artist_name):
""" Return the lyrics for a given song and artist. """
search_url = base_url + "/search"
data = {'q': song_title + " " + artist_name}
token = get_token()
if not token:
return None, None, None
response = requests.get(search_url, params=data, headers={'Authorization': f'Bearer {token}'})
json = response.json()
# The first hit is the best?
if len(json["response"]["hits"]) > 0:
song_info = json["response"]["hits"][0]
song_api_path = song_info["result"]["api_path"]
artist = song_info["result"]["primary_artist"]["name"]
title = song_info["result"]["title"]
return artist, title, _lyrics_from_song_api_path(song_api_path).strip()
return None, None, None | random_line_split |
|
__init__.py | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
# | # ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
# Note that the use of "from x import *" is safe here. Modules include
# the __all__ variable.
from warnings import warn
try:
import numpy
except ImportError, e:
numpy = None
#warn("Clustering module could not be loaded. Is numpy installed?")
#warn(e)
from ncbi_taxonomy import *
from coretype.tree import *
from coretype.seqgroup import *
from phylo.phylotree import *
from evol.evoltree import *
from webplugin.webapp import *
from phyloxml import Phyloxml, PhyloxmlTree
from nexml import Nexml, NexmlTree
from evol import EvolTree
from coretype.arraytable import *
from clustering.clustertree import *
try:
from phylomedb.phylomeDB3 import *
except ImportError, e:
pass
#warn("MySQLdb module could not be loaded")
#warn(e)
try:
from treeview.svg_colors import *
from treeview.main import *
from treeview.faces import *
from treeview import faces
from treeview import layouts
except ImportError, e:
#print e
pass
#warn("Treeview module could not be loaded")
#warn(e)
try:
from version import __version__, __installid__
except ImportError:
__version__ = 'dev'
__installid__ = None | # You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
# | random_line_split |
publish.py | import gevent
from cloudly.pubsub import RedisWebSocket
from cloudly.tweets import Tweets, StreamManager, keep
from cloudly import logger
from webapp import config
log = logger.init(__name__)
pubsub = RedisWebSocket(config.pubsub_channel)
pubsub.spawn()
running = False
def | (tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets)
def run():
log.info("Starting Twitter stream manager.")
streamer = StreamManager('locate', processor, is_queuing=False)
tweets = Tweets()
streamer.run(tweets.with_coordinates(), stop)
log.info("Twitter stream manager has stopped.")
def start():
global running
if not running:
running = True
gevent.spawn(run)
def subscribe(websocket):
log.info("Subscribed a new websocket client.")
pubsub.register(websocket)
def stop():
global running
if len(pubsub.websockets) == 0:
log.info("Stopping Twitter stream manager.")
running = False
return True
return False
| processor | identifier_name |
publish.py | import gevent
from cloudly.pubsub import RedisWebSocket
from cloudly.tweets import Tweets, StreamManager, keep
from cloudly import logger
from webapp import config
log = logger.init(__name__)
pubsub = RedisWebSocket(config.pubsub_channel)
pubsub.spawn()
running = False
def processor(tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets)
def run():
log.info("Starting Twitter stream manager.")
streamer = StreamManager('locate', processor, is_queuing=False)
tweets = Tweets()
streamer.run(tweets.with_coordinates(), stop)
log.info("Twitter stream manager has stopped.")
def start():
global running
if not running:
running = True
gevent.spawn(run)
def subscribe(websocket):
log.info("Subscribed a new websocket client.")
pubsub.register(websocket)
def stop():
global running
if len(pubsub.websockets) == 0:
|
return False
| log.info("Stopping Twitter stream manager.")
running = False
return True | conditional_block |
publish.py | import gevent
from cloudly.pubsub import RedisWebSocket
from cloudly.tweets import Tweets, StreamManager, keep
from cloudly import logger
from webapp import config
log = logger.init(__name__)
pubsub = RedisWebSocket(config.pubsub_channel)
pubsub.spawn()
running = False
def processor(tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets)
def run():
log.info("Starting Twitter stream manager.")
streamer = StreamManager('locate', processor, is_queuing=False)
tweets = Tweets()
streamer.run(tweets.with_coordinates(), stop)
log.info("Twitter stream manager has stopped.")
def start():
|
def subscribe(websocket):
log.info("Subscribed a new websocket client.")
pubsub.register(websocket)
def stop():
global running
if len(pubsub.websockets) == 0:
log.info("Stopping Twitter stream manager.")
running = False
return True
return False
| global running
if not running:
running = True
gevent.spawn(run) | identifier_body |
publish.py | import gevent
from cloudly.pubsub import RedisWebSocket
from cloudly.tweets import Tweets, StreamManager, keep
from cloudly import logger
from webapp import config
log = logger.init(__name__)
pubsub = RedisWebSocket(config.pubsub_channel)
pubsub.spawn()
running = False
def processor(tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets) | tweets = Tweets()
streamer.run(tweets.with_coordinates(), stop)
log.info("Twitter stream manager has stopped.")
def start():
global running
if not running:
running = True
gevent.spawn(run)
def subscribe(websocket):
log.info("Subscribed a new websocket client.")
pubsub.register(websocket)
def stop():
global running
if len(pubsub.websockets) == 0:
log.info("Stopping Twitter stream manager.")
running = False
return True
return False |
def run():
log.info("Starting Twitter stream manager.")
streamer = StreamManager('locate', processor, is_queuing=False) | random_line_split |
Map.py | #!/usr/bin/env python
TestFileName = "data/TestMap.png"
import wx
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
#import sys
#sys.path.append("..")
#from floatcanvas import NavCanvas, FloatCanvas
class DrawFrame(wx.Frame):
"""
A frame used for the FloatCanvas Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.CreateStatusBar()
# Add the Canvas
NC = NavCanvas.NavCanvas(self,-1,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "White",
)
self.Canvas = NC.Canvas
self.LoadMap(TestFileName)
self.Canvas.Bind(FloatCanvas.EVT_MOTION, self.OnMove )
self.Show()
self.Canvas.ZoomToBB()
def LoadMap(self, filename):
Image = wx.Image(filename)
self.Canvas.AddScaledBitmap(Image, (0,0), Height = Image.GetSize()[1], Position = "tl")
self.Canvas.AddPoint((0,0), Diameter=3)
self.Canvas.AddText("(0,0)", (0,0), Position="cl")
p = (Image.GetSize()[0],-Image.GetSize()[1])
self.Canvas.AddPoint(p, Diameter=3)
self.Canvas.AddText("(%i,%i)"%p, p, Position="cl")
self.Canvas.MinScale = 0.15
self.Canvas.MaxScale = 1.0
def Binding(self, event):
print("Writing a png file:")
self.Canvas.SaveAsImage("junk.png")
print("Writing a jpeg file:")
self.Canvas.SaveAsImage("junk.jpg",wx.BITMAP_TYPE_JPEG)
def OnMove(self, event):
"""
Updates the status bar with the world coordinates
And moves a point if there is one selected
| F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop() | """
self.SetStatusText("%.2f, %.2f"%tuple(event.Coords))
app = wx.App(False) | random_line_split |
Map.py | #!/usr/bin/env python
TestFileName = "data/TestMap.png"
import wx
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
#import sys
#sys.path.append("..")
#from floatcanvas import NavCanvas, FloatCanvas
class DrawFrame(wx.Frame):
"""
A frame used for the FloatCanvas Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.CreateStatusBar()
# Add the Canvas
NC = NavCanvas.NavCanvas(self,-1,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "White",
)
self.Canvas = NC.Canvas
self.LoadMap(TestFileName)
self.Canvas.Bind(FloatCanvas.EVT_MOTION, self.OnMove )
self.Show()
self.Canvas.ZoomToBB()
def LoadMap(self, filename):
Image = wx.Image(filename)
self.Canvas.AddScaledBitmap(Image, (0,0), Height = Image.GetSize()[1], Position = "tl")
self.Canvas.AddPoint((0,0), Diameter=3)
self.Canvas.AddText("(0,0)", (0,0), Position="cl")
p = (Image.GetSize()[0],-Image.GetSize()[1])
self.Canvas.AddPoint(p, Diameter=3)
self.Canvas.AddText("(%i,%i)"%p, p, Position="cl")
self.Canvas.MinScale = 0.15
self.Canvas.MaxScale = 1.0
def Binding(self, event):
|
def OnMove(self, event):
"""
Updates the status bar with the world coordinates
And moves a point if there is one selected
"""
self.SetStatusText("%.2f, %.2f"%tuple(event.Coords))
app = wx.App(False)
F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop()
| print("Writing a png file:")
self.Canvas.SaveAsImage("junk.png")
print("Writing a jpeg file:")
self.Canvas.SaveAsImage("junk.jpg",wx.BITMAP_TYPE_JPEG) | identifier_body |
Map.py | #!/usr/bin/env python
TestFileName = "data/TestMap.png"
import wx
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
#import sys
#sys.path.append("..")
#from floatcanvas import NavCanvas, FloatCanvas
class | (wx.Frame):
"""
A frame used for the FloatCanvas Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.CreateStatusBar()
# Add the Canvas
NC = NavCanvas.NavCanvas(self,-1,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "White",
)
self.Canvas = NC.Canvas
self.LoadMap(TestFileName)
self.Canvas.Bind(FloatCanvas.EVT_MOTION, self.OnMove )
self.Show()
self.Canvas.ZoomToBB()
def LoadMap(self, filename):
Image = wx.Image(filename)
self.Canvas.AddScaledBitmap(Image, (0,0), Height = Image.GetSize()[1], Position = "tl")
self.Canvas.AddPoint((0,0), Diameter=3)
self.Canvas.AddText("(0,0)", (0,0), Position="cl")
p = (Image.GetSize()[0],-Image.GetSize()[1])
self.Canvas.AddPoint(p, Diameter=3)
self.Canvas.AddText("(%i,%i)"%p, p, Position="cl")
self.Canvas.MinScale = 0.15
self.Canvas.MaxScale = 1.0
def Binding(self, event):
print("Writing a png file:")
self.Canvas.SaveAsImage("junk.png")
print("Writing a jpeg file:")
self.Canvas.SaveAsImage("junk.jpg",wx.BITMAP_TYPE_JPEG)
def OnMove(self, event):
"""
Updates the status bar with the world coordinates
And moves a point if there is one selected
"""
self.SetStatusText("%.2f, %.2f"%tuple(event.Coords))
app = wx.App(False)
F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop()
| DrawFrame | identifier_name |
IssueCreate.py | #!/usr/bin/python3
#!python3
#encoding:utf-8
import sys
import os.path
import subprocess
import configparser
import argparse
import web.service.github.api.v3.AuthenticationsCreator
import web.service.github.api.v3.Client
import database.src.Database
import cui.uploader.Main
import web.log.Log
import database.src.contributions.Main
import setting.Setting
class Main:
def __init__(self):
pass
def Run(self):
self.__def_args()
self.__setting = setting.Setting.Setting(os.path.abspath(os.path.dirname(__file__)))
# os.path.basename()で空文字を返されないための対策
# https://docs.python.jp/3/library/os.path.html#os.path.basename
# if self.__args.path_dir_pj.endswith('/'): self.__args.path_dir_pj = self.__args.path_dir_pj[:-1]
if None is self.__args.username: self.__args.username = self.__setting.GithubUsername
self.__db = database.src.Database.Database(os.path.abspath(os.path.dirname(__file__)))
self.__db.Initialize()
if None is self.__db.Accounts['Accounts'].find_one(Username=self.__args.username):
web.log.Log.Log().Logger.warni | _args.username)
self.__ssh_configures = self.__db.Accounts['SshConfigures'].find_one(AccountId=self.__account['Id'])
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
self.__repos = self.__db.Repositories[self.__args.username]['Repositories'].find_one(Name=self.__repo_name)
if None is self.__repos:
web.log.Log.Log().Logger.warning('指定リポジトリがDBに存在しません。: {0}/{1}'.format(self.__args.username, self.__repo_name))
return
# self.__log()
issue = self.__create_issue()
print('Issue番号:', issue['number'])
print(issue)
def __def_args(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Creator.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-u', '--username')
# parser.add_argument('-r', '--reponame', required=True)
parser.add_argument('-i', '--issues', required=True, action='append')
parser.add_argument('-l', '--labels', action='append')
parser.add_argument('-c', '--is-close', action='store_false') # is_close
self.__args = parser.parse_args()
def __log(self):
web.log.Log.Log().Logger.info('ユーザ名: {0}'.format(self.__account['Username']))
web.log.Log.Log().Logger.info('メアド: {0}'.format(self.__account['MailAddress']))
web.log.Log.Log().Logger.info('SSH HOST: {0}'.format(self.__ssh_configures['HostName']))
# web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repos['Name']))
# web.log.Log.Log().Logger.info('説明: {0}'.format(self.__repos['Description']))
# web.log.Log.Log().Logger.info('URL: {0}'.format(self.__repos['Homepage']))
web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repo_name))
web.log.Log.Log().Logger.info('説明: {0}'.format(self.__args.description))
web.log.Log.Log().Logger.info('URL: {0}'.format(self.__args.homepage))
def __create_issue(self):
auth_creator = web.service.github.api.v3.AuthenticationsCreator.AuthenticationsCreator(self.__db, self.__args.username)
authentications = auth_creator.Create()
client = web.service.github.api.v3.Client.Client(self.__db, authentications, self.__args)
title = self.__args.issues[0]
body = None
# 1行目タイトル, 2行目空行, 3行目以降本文。
if 1 < len(self.__args.issues): body = '\n'.join(self.__args.issues[1:])
return client.Issues.create(title, body=body)
# main = cui.uploader.Main.Main(self.__db, client, args)
# main.Run()
# creator = cui.uploader.command.repository.Creator.Creator(self.__db, client, self.__args)
# creator.Create()
if __name__ == '__main__':
main = Main()
main.Run()
| ng('指定したユーザ {0} はDBに存在しません。UserRegister.pyで登録してください。'.format(self.__args.username))
return
self.__account = self.__db.Accounts['Accounts'].find_one(Username=self._ | conditional_block |
IssueCreate.py | #!/usr/bin/python3
#!python3
#encoding:utf-8
import sys
import os.path
import subprocess
import configparser
import argparse
import web.service.github.api.v3.AuthenticationsCreator
import web.service.github.api.v3.Client
import database.src.Database
import cui.uploader.Main
import web.log.Log
import database.src.contributions.Main
import setting.Setting
class Main:
def __init__(self):
pass
def Run(self):
self.__def_args()
self.__setting = setting.Setting.Setting(os.path.abspath(os.path.dirname(__file__)))
# os.path.basename()で空文字を返されないための対策
# https://docs.python.jp/3/library/os.path.html#os.path.basename
# if self.__args.path_dir_pj.endswith('/'): self.__args.path_dir_pj = self.__args.path_dir_pj[:-1]
if None is self.__args.username: self.__args.username = self.__setting.GithubUsername
self.__db = database.src.Database.Database(os.path.abspath(os.path.dirname(__file__)))
self.__db.Initialize()
if None is self.__db.Accounts['Accounts'].find_one(Username=self.__args.username):
web.log.Log.Log().Logger.warning('指定したユーザ {0} はDBに存在しません。UserRegister.pyで登録してください。'.format(self.__args.username))
return
self.__account = self.__db.Accounts['Accounts'].find_one(Username=self.__args.username)
self.__ssh_configures = self.__db.Accounts['SshConfigures'].find_one(AccountId=self.__account['Id'])
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
self.__repos = self.__db.Repositories[self.__args.username]['Repositories'].find_one(Name=self.__repo_name)
if None is self.__repos:
web.log.Log.Log().Logger.warning('指定リポジトリがDBに存在しません。: {0}/{1}'.format(self.__args.username, self.__repo_name))
return
# self.__log()
issue = self.__create_issue()
print('Issue番号:', issue['number'])
print(issue)
def __def_args(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Creator.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-u', '--username')
# parser.add_argument('-r', '--reponame', required=True)
parser.add_argument('-i', '--issues', required=True, action='append')
parser.add_argument('-l', '--labels', action='append')
parser.add_argument('-c', '--is-close', action='store_false') # is_close
self.__args = parser.parse_args()
def __log(self):
web.log.Log.Log().Logger.info('ユーザ名: {0}'.format(self.__account['Username']))
web.log.Log.Log().Logger.info('メアド: {0}'.format(self.__account['MailAddress']))
web.log.Log.Log().Logger.info('SSH HOST: {0}'.format(self.__ssh_configures['HostName']))
# web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repos['Name']))
# web.log.Log.Log().Logger.info('説明: {0}'.format(self.__repos['Description']))
# web.log.Log.Log().Logger.info('URL: {0}'.format(self.__repos['Homepage']))
web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repo_name))
web.log.Log.Log().Logger.info('説明: {0}'.format(self.__args.description))
web.log.Log.Log().Logger.info('URL: {0}'.format(self.__args.homepage))
def __create_issue(self):
auth_creator = web.service.github.api.v3.AuthenticationsCreator.AuthenticationsCreator(self.__db, self.__args.username)
authen | th_creator.Create()
client = web.service.github.api.v3.Client.Client(self.__db, authentications, self.__args)
title = self.__args.issues[0]
body = None
# 1行目タイトル, 2行目空行, 3行目以降本文。
if 1 < len(self.__args.issues): body = '\n'.join(self.__args.issues[1:])
return client.Issues.create(title, body=body)
# main = cui.uploader.Main.Main(self.__db, client, args)
# main.Run()
# creator = cui.uploader.command.repository.Creator.Creator(self.__db, client, self.__args)
# creator.Create()
if __name__ == '__main__':
main = Main()
main.Run()
| tications = au | identifier_name |
IssueCreate.py | #!/usr/bin/python3
#!python3
#encoding:utf-8
import sys
import os.path
import subprocess
import configparser
import argparse
import web.service.github.api.v3.AuthenticationsCreator
import web.service.github.api.v3.Client
import database.src.Database
import cui.uploader.Main
import web.log.Log
import database.src.contributions.Main
import setting.Setting
class Main:
def __init__(self):
pass
def Run(self):
self.__def_args()
self.__setting = setting.Setting.Setting(os.path.abspath(os.path.dirname(__file__)))
# os.path.basename()で空文字を返されないための対策
# https://docs.python.jp/3/library/os.path.html#os.path.basename
# if self.__args.path_dir_pj.endswith('/'): self.__args.path_dir_pj = self.__args.path_dir_pj[:-1]
if None is self.__args.username: self.__args.username = self.__setting.GithubUsername
self.__db = database.src.Database.Database(os.path.abspath(os.path.dirname(__file__)))
self.__db.Initialize()
if None is self.__db.Accounts['Accounts'].find_one(Username=self.__args.username):
web.log.Log.Log().Logger.warning('指定したユーザ {0} はDBに存在しません。UserRegister.pyで登録してください。'.format(self.__args.username))
return
self.__account = self.__db.Accounts['Accounts'].find_one(Username=self.__args.username)
self.__ssh_configures = self.__db.Accounts['SshConfigures'].find_one(AccountId=self.__account['Id'])
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
self.__repos = self.__db.Repositories[self.__args.username]['Repositories'].find_one(Name=self.__repo_name)
if None is self.__repos:
web.log.Log.Log().Logger.warning('指定リポジトリがDBに存在しません。: {0}/{1}'.format(self.__args.username, self.__repo_name))
return
# self.__log()
issue = self.__create_issue()
print('Issue番号:', issue['number'])
print(issue)
def __def_args(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Creator.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-u', '--username')
# parser.add_argument('-r', '--reponame', required=True)
parser.add_argument('-i', '--issues', required=True, action='append')
parser.add_argument('-l', '--labels', action='append')
parser.add_argument('-c', '--is-close', action='store_false') # is_close
self.__args = parser.parse_args()
def __log(self):
web.log.Log.Log().Logger.info('ユーザ名: {0}'.format(self.__account['Username']))
web.log.Log.Log().Logger.info('メ | authentications = auth_creator.Create()
client = web.service.github.api.v3.Client.Client(self.__db, authentications, self.__args)
title = self.__args.issues[0]
body = None
# 1行目タイトル, 2行目空行, 3行目以降本文。
if 1 < len(self.__args.issues): body = '\n'.join(self.__args.issues[1:])
return client.Issues.create(title, body=body)
# main = cui.uploader.Main.Main(self.__db, client, args)
# main.Run()
# creator = cui.uploader.command.repository.Creator.Creator(self.__db, client, self.__args)
# creator.Create()
if __name__ == '__main__':
main = Main()
main.Run()
| アド: {0}'.format(self.__account['MailAddress']))
web.log.Log.Log().Logger.info('SSH HOST: {0}'.format(self.__ssh_configures['HostName']))
# web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repos['Name']))
# web.log.Log.Log().Logger.info('説明: {0}'.format(self.__repos['Description']))
# web.log.Log.Log().Logger.info('URL: {0}'.format(self.__repos['Homepage']))
web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repo_name))
web.log.Log.Log().Logger.info('説明: {0}'.format(self.__args.description))
web.log.Log.Log().Logger.info('URL: {0}'.format(self.__args.homepage))
def __create_issue(self):
auth_creator = web.service.github.api.v3.AuthenticationsCreator.AuthenticationsCreator(self.__db, self.__args.username)
| identifier_body |
IssueCreate.py | #!/usr/bin/python3
#!python3
#encoding:utf-8
import sys
import os.path
import subprocess
import configparser
import argparse
import web.service.github.api.v3.AuthenticationsCreator
import web.service.github.api.v3.Client
import database.src.Database
import cui.uploader.Main
import web.log.Log
import database.src.contributions.Main
import setting.Setting
class Main:
def __init__(self):
pass
def Run(self):
self.__def_args()
self.__setting = setting.Setting.Setting(os.path.abspath(os.path.dirname(__file__)))
# os.path.basename()で空文字を返されないための対策
# https://docs.python.jp/3/library/os.path.html#os.path.basename
# if self.__args.path_dir_pj.endswith('/'): self.__args.path_dir_pj = self.__args.path_dir_pj[:-1]
if None is self.__args.username: self.__args.username = self.__setting.GithubUsername
self.__db = database.src.Database.Database(os.path.abspath(os.path.dirname(__file__)))
self.__db.Initialize()
if None is self.__db.Accounts['Accounts'].find_one(Username=self.__args.username):
web.log.Log.Log().Logger.warning('指定したユーザ {0} はDBに存在しません。UserRegister.pyで登録してください。'.format(self.__args.username))
return
self.__account = self.__db.Accounts['Accounts'].find_one(Username=self.__args.username)
self.__ssh_configures = self.__db.Accounts['SshConfigures'].find_one(AccountId=self.__account['Id'])
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
self.__repos = self.__db.Repositories[self.__args.username]['Repositories'].find_one(Name=self.__repo_name)
if None is self.__repos:
web.log.Log.Log().Logger.warning('指定リポジトリがDBに存在しません。: {0}/{1}'.format(self.__args.username, self.__repo_name))
return
# self.__log()
issue = self.__create_issue()
print('Issue番号:', issue['number'])
print(issue)
def __def_args(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Creator.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-u', '--username')
# parser.add_argument('-r', '--reponame', required=True)
parser.add_argument('-i', '--issues', required=True, action='append')
parser.add_argument('-l', '--labels', action='append')
parser.add_argument('-c', '--is-close', action='store_false') # is_close
self.__args = parser.parse_args()
def __log(self):
web.log.Log.Log().Logger.info('ユーザ名: {0}'.format(self.__account['Username']))
web.log.Log.Log().Logger.info('メアド: {0}'.format(self.__account['MailAddress']))
web.log.Log.Log().Logger.info('SSH HOST: {0}'.format(self.__ssh_configures['HostName']))
# web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repos['Name']))
# web.log.Log.Log().Logger.info('説明: {0}'.format(self.__repos['Description']))
# web.log.Log.Log().Logger.info('URL: {0}'.format(self.__repos['Homepage']))
web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repo_name))
web.log.Log.Log().Logger.info('説明: {0}'.format(self.__args.description))
web.log.Log.Log().Logger.info('URL: {0}'.format(self.__args.homepage))
def __create_issue(self):
auth_creator = web.service.github.api.v3.AuthenticationsCreator.AuthenticationsCreator(self.__db, self.__args.username)
authentications = auth_creator.Create()
client = web.service.github.api.v3.Client.Client(self.__db, authentications, self.__args)
title = self.__args.issues[0]
body = None
# 1行目タイトル, 2行目空行, 3行目以降本文。
if 1 < len(self.__args.issues): body = '\n'.join(self.__args.issues[1:])
return client.Issues.create(title, body=body)
# main = cui.uploader.Main.Main(self.__db, client, args)
# main.Run()
# creator = cui.uploader.command.repository.Creator.Creator(self.__db, client, self.__args)
# creator.Create()
if __name__ == '__main__':
main = Main() | main.Run() | random_line_split |
|
github.js | var async = require('async'),
fs = require('graceful-fs'),
path = require('path'),
colors = require('colors'),
swig = require('swig'),
spawn = require('child_process').spawn, | commitMessage = require('./util').commitMessage;
// http://git-scm.com/docs/git-clone
var rRepo = /(:|\/)([^\/]+)\/([^\/]+)\.git\/?$/;
module.exports = function(args, callback){
var baseDir = hexo.base_dir,
deployDir = path.join(baseDir, '.deploy'),
publicDir = hexo.public_dir;
if (!args.repo && !args.repository){
var help = '';
help += 'You should configure deployment settings in _config.yml first!\n\n';
help += 'Example:\n';
help += ' deploy:\n';
help += ' type: github\n';
help += ' repo: <repository url>\n';
help += ' branch: [branch]\n';
help += ' message: [message]\n\n';
help += 'For more help, you can check the docs: ' + 'http://hexo.io/docs/deployment.html'.underline;
console.log(help);
return callback();
}
var url = args.repo || args.repository;
if (!rRepo.test(url)){
hexo.log.e(url + ' is not a valid repository URL!');
return callback();
}
var branch = args.branch;
if (!branch){
var match = url.match(rRepo),
username = match[2],
repo = match[3],
rGh = new RegExp('^' + username + '\\.github\\.[io|com]', 'i');
// https://help.github.com/articles/user-organization-and-project-pages
if (repo.match(rGh)){
branch = 'master';
} else {
branch = 'gh-pages';
}
}
var run = function(command, args, callback){
var cp = spawn(command, args, {cwd: deployDir});
cp.stdout.on('data', function(data){
process.stdout.write(data);
});
cp.stderr.on('data', function(data){
process.stderr.write(data);
});
cp.on('close', callback);
};
async.series([
// Set up
function(next){
fs.exists(deployDir, function(exist){
if (exist && !args.setup) return next();
hexo.log.i('Setting up GitHub deployment...');
var commands = [
['init'],
['add', '-A', '.'],
['commit', '-m', 'First commit']
];
if (branch !== 'master') commands.push(['branch', '-M', branch]);
commands.push(['remote', 'add', 'github', url]);
file.writeFile(path.join(deployDir, 'placeholder'), '', function(err){
if (err) callback(err);
async.eachSeries(commands, function(item, next){
run('git', item, function(code){
if (code === 0) next();
});
}, function(){
if (!args.setup) next();
});
});
});
},
function(next){
hexo.log.i('Clearing .deploy folder...');
file.emptyDir(deployDir, next);
},
function(next){
hexo.log.i('Copying files from public folder...');
file.copyDir(publicDir, deployDir, next);
},
function(next){
var commands = [
['add', '-A'],
['commit', '-m', commitMessage(args)],
['push', '-u', 'github', branch, '--force']
];
async.eachSeries(commands, function(item, next){
run('git', item, function(){
next();
});
}, next);
}
], callback);
}; | util = require('../../util'),
file = util.file2, | random_line_split |
ConditionObserver.ts | import * as $ from "jquery";
import ConditionType from "@enhavo/form/Type/ConditionType";
import ConditionObserverConfig from "@enhavo/form/Type/ConditionObserverConfig";
export default class ConditionObserver
{
private $element: JQuery;
private configs: ConditionObserverConfig[];
private $row: JQuery;
private subjects: ConditionType[] = [];
constructor(element: HTMLElement)
{
this.$element = $(element);
this.configs = this.$element.data('condition-type-observer');
let parent = this.$element.parents('[data-form-row]').get(0);
this.$row = $(parent);
for (let subject of ConditionType.subjects) {
let config = this.getConfig(subject);
if(config !== null) {
this.subjects.push(subject);
subject.register(this);
}
}
}
private getConfig(subject: ConditionType) : ConditionObserverConfig|null
{
for (let config of this.configs) {
if (subject.getId() == config.id) {
return config;
}
}
return null;
}
public wakeUp(subject: ConditionType)
{
let condition = null;
for (let subject of this.subjects) {
let config = this.getConfig(subject);
let subjectCondition = config.values.indexOf(subject.getValue()) >= 0;
if(condition === null) {
condition = subjectCondition;
} else {
if(config.operator == 'and') {
condition = condition && subjectCondition;
} else if(config.operator == 'or') {
condition = condition || subjectCondition;
}
}
}
if(condition) {
this.show();
} else {
this.hide();
}
}
private hide()
|
private show()
{
this.$row.show();
}
} | {
this.$row.hide();
} | identifier_body |
ConditionObserver.ts | import * as $ from "jquery";
import ConditionType from "@enhavo/form/Type/ConditionType";
import ConditionObserverConfig from "@enhavo/form/Type/ConditionObserverConfig";
export default class ConditionObserver
{
private $element: JQuery;
private configs: ConditionObserverConfig[];
private $row: JQuery;
private subjects: ConditionType[] = [];
constructor(element: HTMLElement)
{
this.$element = $(element);
this.configs = this.$element.data('condition-type-observer');
let parent = this.$element.parents('[data-form-row]').get(0);
this.$row = $(parent);
for (let subject of ConditionType.subjects) {
let config = this.getConfig(subject);
if(config !== null) {
this.subjects.push(subject);
subject.register(this);
}
}
}
private getConfig(subject: ConditionType) : ConditionObserverConfig|null
{
for (let config of this.configs) {
if (subject.getId() == config.id) |
}
return null;
}
public wakeUp(subject: ConditionType)
{
let condition = null;
for (let subject of this.subjects) {
let config = this.getConfig(subject);
let subjectCondition = config.values.indexOf(subject.getValue()) >= 0;
if(condition === null) {
condition = subjectCondition;
} else {
if(config.operator == 'and') {
condition = condition && subjectCondition;
} else if(config.operator == 'or') {
condition = condition || subjectCondition;
}
}
}
if(condition) {
this.show();
} else {
this.hide();
}
}
private hide()
{
this.$row.hide();
}
private show()
{
this.$row.show();
}
} | {
return config;
} | conditional_block |
ConditionObserver.ts | import * as $ from "jquery";
import ConditionType from "@enhavo/form/Type/ConditionType";
import ConditionObserverConfig from "@enhavo/form/Type/ConditionObserverConfig";
export default class ConditionObserver
{
private $element: JQuery;
private configs: ConditionObserverConfig[];
private $row: JQuery;
private subjects: ConditionType[] = [];
constructor(element: HTMLElement)
{
this.$element = $(element);
this.configs = this.$element.data('condition-type-observer');
let parent = this.$element.parents('[data-form-row]').get(0);
this.$row = $(parent);
for (let subject of ConditionType.subjects) {
let config = this.getConfig(subject);
if(config !== null) {
this.subjects.push(subject);
subject.register(this);
}
}
}
private getConfig(subject: ConditionType) : ConditionObserverConfig|null
{
for (let config of this.configs) {
if (subject.getId() == config.id) {
return config;
}
}
return null;
}
public wakeUp(subject: ConditionType)
{
let condition = null;
for (let subject of this.subjects) {
let config = this.getConfig(subject);
let subjectCondition = config.values.indexOf(subject.getValue()) >= 0;
if(condition === null) {
condition = subjectCondition;
} else {
if(config.operator == 'and') {
condition = condition && subjectCondition;
} else if(config.operator == 'or') {
condition = condition || subjectCondition;
}
}
}
if(condition) {
this.show();
} else { | {
this.$row.hide();
}
private show()
{
this.$row.show();
}
} | this.hide();
}
}
private hide() | random_line_split |
ConditionObserver.ts | import * as $ from "jquery";
import ConditionType from "@enhavo/form/Type/ConditionType";
import ConditionObserverConfig from "@enhavo/form/Type/ConditionObserverConfig";
export default class |
{
private $element: JQuery;
private configs: ConditionObserverConfig[];
private $row: JQuery;
private subjects: ConditionType[] = [];
constructor(element: HTMLElement)
{
this.$element = $(element);
this.configs = this.$element.data('condition-type-observer');
let parent = this.$element.parents('[data-form-row]').get(0);
this.$row = $(parent);
for (let subject of ConditionType.subjects) {
let config = this.getConfig(subject);
if(config !== null) {
this.subjects.push(subject);
subject.register(this);
}
}
}
private getConfig(subject: ConditionType) : ConditionObserverConfig|null
{
for (let config of this.configs) {
if (subject.getId() == config.id) {
return config;
}
}
return null;
}
public wakeUp(subject: ConditionType)
{
let condition = null;
for (let subject of this.subjects) {
let config = this.getConfig(subject);
let subjectCondition = config.values.indexOf(subject.getValue()) >= 0;
if(condition === null) {
condition = subjectCondition;
} else {
if(config.operator == 'and') {
condition = condition && subjectCondition;
} else if(config.operator == 'or') {
condition = condition || subjectCondition;
}
}
}
if(condition) {
this.show();
} else {
this.hide();
}
}
private hide()
{
this.$row.hide();
}
private show()
{
this.$row.show();
}
} | ConditionObserver | identifier_name |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | //
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct Srgb<T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> {
Srgb { r: r, g: g, b: b }
}
} | // You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct Srgb<T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> |
}
| {
Srgb { r: r, g: g, b: b }
} | identifier_body |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct | <T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> {
Srgb { r: r, g: g, b: b }
}
}
| Srgb | identifier_name |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
/// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur.
// TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn | (&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance {
result = Some((isect, obj));
distance = isect.distance;
}
}
}
}
result
}
}
| intersect | identifier_name |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
| // TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn intersect(&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance {
result = Some((isect, obj));
distance = isect.distance;
}
}
}
}
result
}
} | /// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur. | random_line_split |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
/// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur.
// TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn intersect(&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance |
}
}
}
result
}
}
| {
result = Some((isect, obj));
distance = isect.distance;
} | conditional_block |
app.js | 'use strict';
angular.module('pizzaApp', [])
.factory('shopCartService', function(){
var factoryInstance = {};
var pizzaListCart = [];
factoryInstance.addPizzaToCart = function(pizza) {
pizzaListCart.push({name: pizza.name, price: pizza.price});
};
factoryInstance.getCart = function(){
return pizzaListCart;
};
return factoryInstance;
})
.controller('shopListController', function($scope, $http, shopCartService) {
$scope.buy = function ( pizza ) {
shopCartService.addPizzaToCart(pizza);
};
$http({method: 'GET', url: '/pizzas.json'}).
success(function(data) {
$scope.pizzaList = data;
} | .controller('shopCartController', function($scope, shopCartService){
$scope.shopCartList = shopCartService.getCart();
$scope.totalPrice = function () {
var total = 0;
angular.forEach($scope.shopCartList, function (pizza){
total += pizza.price;
});
return total;
};
$scope.removeFromCart = function (idx){
$scope.shopCartList.splice(idx,1);
};
}); | );
$scope.orderValue = 'name';
}) | random_line_split |
SoapEventDecoder.ts | /*
* Copyright (C) 2017 ZeXtras S.r.l.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License.
* If not, see <http://www.gnu.org/licenses/>.
*/
import {LogEngine} from "../../../../../lib/log/LogEngine";
import {Logger} from "../../../../../lib/log/Logger";
import {IChatEvent} from "../../../../events/IChatEvent";
import {ISoapEventObject} from "../SoapEventParser";
export abstract class SoapEventDecoder<T extends IChatEvent> {
public Log: Logger;
private mEventCode: number;
| (eventCode: number) {
this.mEventCode = eventCode;
this.Log = LogEngine.getLogger(LogEngine.CHAT);
}
public getEventCode(): number {
return this.mEventCode;
}
public abstract decodeEvent(eventObj: ISoapEventObject, originEvent?: IChatEvent): T;
}
| constructor | identifier_name |
SoapEventDecoder.ts | /*
* Copyright (C) 2017 ZeXtras S.r.l.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License.
* If not, see <http://www.gnu.org/licenses/>.
*/
import {LogEngine} from "../../../../../lib/log/LogEngine";
import {Logger} from "../../../../../lib/log/Logger";
import {IChatEvent} from "../../../../events/IChatEvent";
import {ISoapEventObject} from "../SoapEventParser";
export abstract class SoapEventDecoder<T extends IChatEvent> {
public Log: Logger;
private mEventCode: number;
constructor(eventCode: number) {
this.mEventCode = eventCode;
this.Log = LogEngine.getLogger(LogEngine.CHAT);
}
public getEventCode(): number |
public abstract decodeEvent(eventObj: ISoapEventObject, originEvent?: IChatEvent): T;
}
| {
return this.mEventCode;
} | identifier_body |
SoapEventDecoder.ts | /*
* Copyright (C) 2017 ZeXtras S.r.l.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License.
* If not, see <http://www.gnu.org/licenses/>.
*/
import {LogEngine} from "../../../../../lib/log/LogEngine";
import {Logger} from "../../../../../lib/log/Logger";
import {IChatEvent} from "../../../../events/IChatEvent";
import {ISoapEventObject} from "../SoapEventParser";
export abstract class SoapEventDecoder<T extends IChatEvent> {
public Log: Logger;
private mEventCode: number;
| }
public getEventCode(): number {
return this.mEventCode;
}
public abstract decodeEvent(eventObj: ISoapEventObject, originEvent?: IChatEvent): T;
} | constructor(eventCode: number) {
this.mEventCode = eventCode;
this.Log = LogEngine.getLogger(LogEngine.CHAT); | random_line_split |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| {
{
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn | <'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
}
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
}
| annotations | identifier_name |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| {
{
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> |
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
}
| {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
} | identifier_body |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate | {
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
}
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
} | fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| { | random_line_split |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn new_inherited(encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool |
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
};
match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
}
}
}
| {
self.fatal
} | identifier_body |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn | (encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool {
self.fatal
}
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
};
match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
}
}
}
| new_inherited | identifier_name |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn new_inherited(encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool {
self.fatal
}
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
}; | }
} | match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
} | random_line_split |
save_results.py | # argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.mat')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(endo_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
# pull JSON data into python dict
json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict
json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
|
f.close()
| f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n') | conditional_block |
save_results.py | # argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.mat')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(endo_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
| json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict
json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n')
f.close() | # pull JSON data into python dict | random_line_split |
vsw-602_mp_queue_stats.py | from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPQueueStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPQueueStatsRequest(datapath, 0, ofproto.OFPP_ANY,
ofproto.OFPQ_ALL)
datapath.send_msg(req)
@set_ev_cls(EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev): | queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec))
self.logger.info('QueueStats: %s', queues) | queues = []
for stat in ev.msg.body: | random_line_split |
vsw-602_mp_queue_stats.py | from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPQueueStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPQueueStatsRequest(datapath, 0, ofproto.OFPP_ANY,
ofproto.OFPQ_ALL)
datapath.send_msg(req)
@set_ev_cls(EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev):
queues = []
for stat in ev.msg.body:
|
self.logger.info('QueueStats: %s', queues)
| queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec)) | conditional_block |
vsw-602_mp_queue_stats.py | from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPQueueStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def install_sample(self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPQueueStatsRequest(datapath, 0, ofproto.OFPP_ANY,
ofproto.OFPQ_ALL)
datapath.send_msg(req)
@set_ev_cls(EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev):
| queues = []
for stat in ev.msg.body:
queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec))
self.logger.info('QueueStats: %s', queues) | identifier_body |
|
vsw-602_mp_queue_stats.py | from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPQueueStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
class App(RyuApp):
OFP_VERSIONS = [OFP_VERSION]
def __init__(self, *args, **kwargs):
super(App, self).__init__(*args, **kwargs)
@set_ev_cls(EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
[self.install_sample(datapath, n) for n in [0]]
def create_meter_mod(self, datapath, command, flags_, meter_id, bands):
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
meter_mod = ofp_parser.OFPMeterMod(datapath, command, flags_,
meter_id, bands)
return meter_mod
def | (self, datapath, table_id):
parser = datapath.ofproto_parser
ofproto = datapath.ofproto
req = parser.OFPQueueStatsRequest(datapath, 0, ofproto.OFPP_ANY,
ofproto.OFPQ_ALL)
datapath.send_msg(req)
@set_ev_cls(EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev):
queues = []
for stat in ev.msg.body:
queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec))
self.logger.info('QueueStats: %s', queues)
| install_sample | identifier_name |
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn main() | {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
} | identifier_body |
|
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn | () {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
}
| main | identifier_name |
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn main() {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
} | random_line_split |
|
context_discovery.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import '../util/ng_dev_mode';
import {assertDomNode} from '../util/assert';
import {EMPTY_ARRAY} from './empty';
import {LContext, MONKEY_PATCH_KEY_NAME} from './interfaces/context';
import {TNode, TNodeFlags} from './interfaces/node';
import {RElement, RNode} from './interfaces/renderer';
import {CONTEXT, HEADER_OFFSET, HOST, LView, TVIEW} from './interfaces/view';
import {getComponentViewByIndex, getNativeByTNodeOrNull, readPatchedData, unwrapRNode} from './util/view_utils';
/** Returns the matching `LContext` data for a given DOM node, directive or component instance.
*
* This function will examine the provided DOM element, component, or directive instance\'s
* monkey-patched property to derive the `LContext` data. Once called then the monkey-patched
* value will be that of the newly created `LContext`.
*
* If the monkey-patched value is the `LView` instance then the context value for that
* target will be created and the monkey-patch reference will be updated. Therefore when this
* function is called it may mutate the provided element\'s, component\'s or any of the associated
* directive\'s monkey-patch values.
*
* If the monkey-patch value is not detected then the code will walk up the DOM until an element
* is found which contains a monkey-patch reference. When that occurs then the provided element
* will be updated with a new context (which is then returned). If the monkey-patch value is not
* detected for a component/directive instance then it will throw an error (all components and
* directives should be automatically monkey-patched by ivy).
*
* @param target Component, Directive or DOM Node.
*/
export function getLContext(target: any): LContext|null {
let mpValue = readPatchedData(target);
if (mpValue) {
// only when it's an array is it considered an LView instance
// ... otherwise it's an already constructed LContext instance
if (Array.isArray(mpValue)) {
const lView: LView = mpValue !;
let nodeIndex: number;
let component: any = undefined;
let directives: any[]|null|undefined = undefined;
if (isComponentInstance(target)) {
nodeIndex = findViaComponent(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided component was not found in the application');
}
component = target;
} else if (isDirectiveInstance(target)) {
nodeIndex = findViaDirective(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided directive was not found in the application');
}
directives = getDirectivesAtNodeIndex(nodeIndex, lView, false);
} else {
nodeIndex = findViaNativeElement(lView, target as RElement);
if (nodeIndex == -1) {
return null;
}
}
// the goal is not to fill the entire context full of data because the lookups
// are expensive. Instead, only the target data (the element, component, container, ICU
// expression or directive details) are filled into the context. If called multiple times
// with different target values then the missing target data will be filled in.
const native = unwrapRNode(lView[nodeIndex]);
const existingCtx = readPatchedData(native);
const context: LContext = (existingCtx && !Array.isArray(existingCtx)) ?
existingCtx :
createLContext(lView, nodeIndex, native);
// only when the component has been discovered then update the monkey-patch
if (component && context.component === undefined) {
context.component = component;
attachPatchData(context.component, context);
}
// only when the directives have been discovered then update the monkey-patch
if (directives && context.directives === undefined) {
context.directives = directives;
for (let i = 0; i < directives.length; i++) {
attachPatchData(directives[i], context);
}
}
attachPatchData(context.native, context);
mpValue = context;
}
} else {
const rElement = target as RElement;
ngDevMode && assertDomNode(rElement);
// if the context is not found then we need to traverse upwards up the DOM
// to find the nearest element that has already been monkey patched with data
let parent = rElement as any;
while (parent = parent.parentNode) {
const parentContext = readPatchedData(parent);
if (parentContext) {
let lView: LView|null;
if (Array.isArray(parentContext)) {
lView = parentContext as LView;
} else {
lView = parentContext.lView;
}
// the edge of the app was also reached here through another means
// (maybe because the DOM was changed manually).
if (!lView) {
return null;
}
const index = findViaNativeElement(lView, rElement);
if (index >= 0) {
const native = unwrapRNode(lView[index]);
const context = createLContext(lView, index, native);
attachPatchData(native, context);
mpValue = context;
break;
}
}
}
}
return (mpValue as LContext) || null;
}
/**
* Creates an empty instance of a `LContext` context
*/
function createLContext(lView: LView, nodeIndex: number, native: RNode): LContext {
return {
lView,
nodeIndex,
native,
component: undefined,
directives: undefined,
localRefs: undefined,
};
}
/**
* Takes a component instance and returns the view for that component.
*
* @param componentInstance
* @returns The component's view
*/
export function getComponentViewByInstance(componentInstance: {}): LView {
let lView = readPatchedData(componentInstance);
let view: LView;
if (Array.isArray(lView)) {
const nodeIndex = findViaComponent(lView, componentInstance);
view = getComponentViewByIndex(nodeIndex, lView);
const context = createLContext(lView, nodeIndex, view[HOST] as RElement);
context.component = componentInstance;
attachPatchData(componentInstance, context);
attachPatchData(context.native, context);
} else {
const context = lView as any as LContext;
view = getComponentViewByIndex(context.nodeIndex, context.lView);
}
return view;
}
/**
* Assigns the given data to the given target (which could be a component,
* directive or DOM node instance) using monkey-patching.
*/
export function attachPatchData(target: any, data: LView | LContext) {
target[MONKEY_PATCH_KEY_NAME] = data;
}
export function isComponentInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵcmp;
}
export function isDirectiveInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵdir;
}
/**
* Locates the element within the given LView and returns the matching index
*/
function findViaNativeElement(lView: LView, target: RElement): number {
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const native = getNativeByTNodeOrNull(tNode, lView) !;
if (native === target) {
return tNode.index;
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Locates the next tNode (child, sibling or parent).
*/
function traverseNextElement(tNode: TNode): TNode|null {
if (tNode.child) {
return tNode.child;
} else if (tNode.next) {
return tNode.next;
} else {
// Let's take the following template: <div><span>text</span></div><component/>
// After checking the text node, we need to find the next parent that has a "next" TNode,
// in this case the parent `div`, so that we can find the component.
while (tNode.parent && !tNode.parent.next) {
tNode = tNode.parent;
}
return tNode.parent && tNode.parent.next;
}
}
/**
* Locates the component within the given LView and returns the matching index
*/
function findViaComponent(lView: LView, componentInstance: {}): number {
const componentIndices = lView[TVIEW].components;
if (componentIndices) {
for (let i = 0; i < componentIndices.length; i++) {
const elementComponentIndex = componentIndices[i];
const componentView = getComponentViewByIndex(elementComponentIndex, lView);
if (componentView[CONTEXT] === componentInstance) {
return elementComponentIndex;
}
}
} else {
const rootComponentView = getComponentViewByIndex(HEADER_OFFSET, lView);
const rootComponent = rootComponentView[CONTEXT];
if (rootComponent === componentInstance) {
// we are dealing with the root element here therefore we know that the
// element is the very first element after the HEADER data in the lView
return HEADER_OFFSET;
}
}
return -1;
}
/**
* Locates the directive within the given LView and returns the matching index
*/
function findViaDirective(lView: LView, directiveInstance: {}): number {
// if a directive is monkey patched then it will (by default)
// have a reference to the LView of the current view. The
// element bound to the directive being search lives somewhere
// in the view data. We loop through the nodes and check their
// list of directives for the instance.
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const directiveIndexStart = tNode.directiveStart;
const directiveIndexEnd = tNode.directiveEnd;
for (let i = directiveIndexStart; i < directiveIndexEnd; i++) {
| tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Returns a list of directives extracted from the given view based on the
* provided list of directive index values.
*
* @param nodeIndex The node index
* @param lView The target view data
* @param includeComponents Whether or not to include components in returned directives
*/
export function getDirectivesAtNodeIndex(
nodeIndex: number, lView: LView, includeComponents: boolean): any[]|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
if (directiveStartIndex == 0) return EMPTY_ARRAY;
const directiveEndIndex = tNode.directiveEnd;
if (!includeComponents && tNode.flags & TNodeFlags.isComponentHost) directiveStartIndex++;
return lView.slice(directiveStartIndex, directiveEndIndex);
}
export function getComponentAtNodeIndex(nodeIndex: number, lView: LView): {}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
return tNode.flags & TNodeFlags.isComponentHost ? lView[directiveStartIndex] : null;
}
/**
* Returns a map of local references (local reference name => element or directive instance) that
* exist on a given element.
*/
export function discoverLocalRefs(lView: LView, nodeIndex: number): {[key: string]: any}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
if (tNode && tNode.localNames) {
const result: {[key: string]: any} = {};
let localIndex = tNode.index + 1;
for (let i = 0; i < tNode.localNames.length; i += 2) {
result[tNode.localNames[i]] = lView[localIndex];
localIndex++;
}
return result;
}
return null;
}
| if (lView[i] === directiveInstance) {
return tNode.index;
}
}
| conditional_block |
context_discovery.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import '../util/ng_dev_mode';
import {assertDomNode} from '../util/assert';
import {EMPTY_ARRAY} from './empty';
import {LContext, MONKEY_PATCH_KEY_NAME} from './interfaces/context';
import {TNode, TNodeFlags} from './interfaces/node';
import {RElement, RNode} from './interfaces/renderer';
import {CONTEXT, HEADER_OFFSET, HOST, LView, TVIEW} from './interfaces/view';
import {getComponentViewByIndex, getNativeByTNodeOrNull, readPatchedData, unwrapRNode} from './util/view_utils';
/** Returns the matching `LContext` data for a given DOM node, directive or component instance.
*
* This function will examine the provided DOM element, component, or directive instance\'s
* monkey-patched property to derive the `LContext` data. Once called then the monkey-patched
* value will be that of the newly created `LContext`.
*
* If the monkey-patched value is the `LView` instance then the context value for that
* target will be created and the monkey-patch reference will be updated. Therefore when this
* function is called it may mutate the provided element\'s, component\'s or any of the associated
* directive\'s monkey-patch values.
*
* If the monkey-patch value is not detected then the code will walk up the DOM until an element
* is found which contains a monkey-patch reference. When that occurs then the provided element
* will be updated with a new context (which is then returned). If the monkey-patch value is not
* detected for a component/directive instance then it will throw an error (all components and
* directives should be automatically monkey-patched by ivy).
*
* @param target Component, Directive or DOM Node.
*/
export function getLContext(target: any): LContext|null {
let mpValue = readPatchedData(target);
if (mpValue) {
// only when it's an array is it considered an LView instance
// ... otherwise it's an already constructed LContext instance
if (Array.isArray(mpValue)) {
const lView: LView = mpValue !;
let nodeIndex: number;
let component: any = undefined;
let directives: any[]|null|undefined = undefined;
if (isComponentInstance(target)) {
nodeIndex = findViaComponent(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided component was not found in the application');
}
component = target;
} else if (isDirectiveInstance(target)) {
nodeIndex = findViaDirective(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided directive was not found in the application');
}
directives = getDirectivesAtNodeIndex(nodeIndex, lView, false);
} else {
nodeIndex = findViaNativeElement(lView, target as RElement);
if (nodeIndex == -1) {
return null;
}
}
// the goal is not to fill the entire context full of data because the lookups
// are expensive. Instead, only the target data (the element, component, container, ICU
// expression or directive details) are filled into the context. If called multiple times
// with different target values then the missing target data will be filled in.
const native = unwrapRNode(lView[nodeIndex]);
const existingCtx = readPatchedData(native);
const context: LContext = (existingCtx && !Array.isArray(existingCtx)) ?
existingCtx :
createLContext(lView, nodeIndex, native);
// only when the component has been discovered then update the monkey-patch
if (component && context.component === undefined) {
context.component = component;
attachPatchData(context.component, context);
}
// only when the directives have been discovered then update the monkey-patch
if (directives && context.directives === undefined) {
context.directives = directives;
for (let i = 0; i < directives.length; i++) {
attachPatchData(directives[i], context);
}
}
attachPatchData(context.native, context);
mpValue = context;
}
} else {
const rElement = target as RElement;
ngDevMode && assertDomNode(rElement);
// if the context is not found then we need to traverse upwards up the DOM
// to find the nearest element that has already been monkey patched with data
let parent = rElement as any;
while (parent = parent.parentNode) { | const parentContext = readPatchedData(parent);
if (parentContext) {
let lView: LView|null;
if (Array.isArray(parentContext)) {
lView = parentContext as LView;
} else {
lView = parentContext.lView;
}
// the edge of the app was also reached here through another means
// (maybe because the DOM was changed manually).
if (!lView) {
return null;
}
const index = findViaNativeElement(lView, rElement);
if (index >= 0) {
const native = unwrapRNode(lView[index]);
const context = createLContext(lView, index, native);
attachPatchData(native, context);
mpValue = context;
break;
}
}
}
}
return (mpValue as LContext) || null;
}
/**
* Creates an empty instance of a `LContext` context
*/
function createLContext(lView: LView, nodeIndex: number, native: RNode): LContext {
return {
lView,
nodeIndex,
native,
component: undefined,
directives: undefined,
localRefs: undefined,
};
}
/**
* Takes a component instance and returns the view for that component.
*
* @param componentInstance
* @returns The component's view
*/
export function getComponentViewByInstance(componentInstance: {}): LView {
let lView = readPatchedData(componentInstance);
let view: LView;
if (Array.isArray(lView)) {
const nodeIndex = findViaComponent(lView, componentInstance);
view = getComponentViewByIndex(nodeIndex, lView);
const context = createLContext(lView, nodeIndex, view[HOST] as RElement);
context.component = componentInstance;
attachPatchData(componentInstance, context);
attachPatchData(context.native, context);
} else {
const context = lView as any as LContext;
view = getComponentViewByIndex(context.nodeIndex, context.lView);
}
return view;
}
/**
* Assigns the given data to the given target (which could be a component,
* directive or DOM node instance) using monkey-patching.
*/
export function attachPatchData(target: any, data: LView | LContext) {
target[MONKEY_PATCH_KEY_NAME] = data;
}
export function isComponentInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵcmp;
}
export function isDirectiveInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵdir;
}
/**
* Locates the element within the given LView and returns the matching index
*/
function findViaNativeElement(lView: LView, target: RElement): number {
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const native = getNativeByTNodeOrNull(tNode, lView) !;
if (native === target) {
return tNode.index;
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Locates the next tNode (child, sibling or parent).
*/
function traverseNextElement(tNode: TNode): TNode|null {
if (tNode.child) {
return tNode.child;
} else if (tNode.next) {
return tNode.next;
} else {
// Let's take the following template: <div><span>text</span></div><component/>
// After checking the text node, we need to find the next parent that has a "next" TNode,
// in this case the parent `div`, so that we can find the component.
while (tNode.parent && !tNode.parent.next) {
tNode = tNode.parent;
}
return tNode.parent && tNode.parent.next;
}
}
/**
* Locates the component within the given LView and returns the matching index
*/
function findViaComponent(lView: LView, componentInstance: {}): number {
const componentIndices = lView[TVIEW].components;
if (componentIndices) {
for (let i = 0; i < componentIndices.length; i++) {
const elementComponentIndex = componentIndices[i];
const componentView = getComponentViewByIndex(elementComponentIndex, lView);
if (componentView[CONTEXT] === componentInstance) {
return elementComponentIndex;
}
}
} else {
const rootComponentView = getComponentViewByIndex(HEADER_OFFSET, lView);
const rootComponent = rootComponentView[CONTEXT];
if (rootComponent === componentInstance) {
// we are dealing with the root element here therefore we know that the
// element is the very first element after the HEADER data in the lView
return HEADER_OFFSET;
}
}
return -1;
}
/**
* Locates the directive within the given LView and returns the matching index
*/
function findViaDirective(lView: LView, directiveInstance: {}): number {
// if a directive is monkey patched then it will (by default)
// have a reference to the LView of the current view. The
// element bound to the directive being search lives somewhere
// in the view data. We loop through the nodes and check their
// list of directives for the instance.
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const directiveIndexStart = tNode.directiveStart;
const directiveIndexEnd = tNode.directiveEnd;
for (let i = directiveIndexStart; i < directiveIndexEnd; i++) {
if (lView[i] === directiveInstance) {
return tNode.index;
}
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Returns a list of directives extracted from the given view based on the
* provided list of directive index values.
*
* @param nodeIndex The node index
* @param lView The target view data
* @param includeComponents Whether or not to include components in returned directives
*/
export function getDirectivesAtNodeIndex(
nodeIndex: number, lView: LView, includeComponents: boolean): any[]|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
if (directiveStartIndex == 0) return EMPTY_ARRAY;
const directiveEndIndex = tNode.directiveEnd;
if (!includeComponents && tNode.flags & TNodeFlags.isComponentHost) directiveStartIndex++;
return lView.slice(directiveStartIndex, directiveEndIndex);
}
export function getComponentAtNodeIndex(nodeIndex: number, lView: LView): {}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
return tNode.flags & TNodeFlags.isComponentHost ? lView[directiveStartIndex] : null;
}
/**
* Returns a map of local references (local reference name => element or directive instance) that
* exist on a given element.
*/
export function discoverLocalRefs(lView: LView, nodeIndex: number): {[key: string]: any}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
if (tNode && tNode.localNames) {
const result: {[key: string]: any} = {};
let localIndex = tNode.index + 1;
for (let i = 0; i < tNode.localNames.length; i += 2) {
result[tNode.localNames[i]] = lView[localIndex];
localIndex++;
}
return result;
}
return null;
} | random_line_split |
|
context_discovery.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import '../util/ng_dev_mode';
import {assertDomNode} from '../util/assert';
import {EMPTY_ARRAY} from './empty';
import {LContext, MONKEY_PATCH_KEY_NAME} from './interfaces/context';
import {TNode, TNodeFlags} from './interfaces/node';
import {RElement, RNode} from './interfaces/renderer';
import {CONTEXT, HEADER_OFFSET, HOST, LView, TVIEW} from './interfaces/view';
import {getComponentViewByIndex, getNativeByTNodeOrNull, readPatchedData, unwrapRNode} from './util/view_utils';
/** Returns the matching `LContext` data for a given DOM node, directive or component instance.
*
* This function will examine the provided DOM element, component, or directive instance\'s
* monkey-patched property to derive the `LContext` data. Once called then the monkey-patched
* value will be that of the newly created `LContext`.
*
* If the monkey-patched value is the `LView` instance then the context value for that
* target will be created and the monkey-patch reference will be updated. Therefore when this
* function is called it may mutate the provided element\'s, component\'s or any of the associated
* directive\'s monkey-patch values.
*
* If the monkey-patch value is not detected then the code will walk up the DOM until an element
* is found which contains a monkey-patch reference. When that occurs then the provided element
* will be updated with a new context (which is then returned). If the monkey-patch value is not
* detected for a component/directive instance then it will throw an error (all components and
* directives should be automatically monkey-patched by ivy).
*
* @param target Component, Directive or DOM Node.
*/
export function | (target: any): LContext|null {
let mpValue = readPatchedData(target);
if (mpValue) {
// only when it's an array is it considered an LView instance
// ... otherwise it's an already constructed LContext instance
if (Array.isArray(mpValue)) {
const lView: LView = mpValue !;
let nodeIndex: number;
let component: any = undefined;
let directives: any[]|null|undefined = undefined;
if (isComponentInstance(target)) {
nodeIndex = findViaComponent(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided component was not found in the application');
}
component = target;
} else if (isDirectiveInstance(target)) {
nodeIndex = findViaDirective(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided directive was not found in the application');
}
directives = getDirectivesAtNodeIndex(nodeIndex, lView, false);
} else {
nodeIndex = findViaNativeElement(lView, target as RElement);
if (nodeIndex == -1) {
return null;
}
}
// the goal is not to fill the entire context full of data because the lookups
// are expensive. Instead, only the target data (the element, component, container, ICU
// expression or directive details) are filled into the context. If called multiple times
// with different target values then the missing target data will be filled in.
const native = unwrapRNode(lView[nodeIndex]);
const existingCtx = readPatchedData(native);
const context: LContext = (existingCtx && !Array.isArray(existingCtx)) ?
existingCtx :
createLContext(lView, nodeIndex, native);
// only when the component has been discovered then update the monkey-patch
if (component && context.component === undefined) {
context.component = component;
attachPatchData(context.component, context);
}
// only when the directives have been discovered then update the monkey-patch
if (directives && context.directives === undefined) {
context.directives = directives;
for (let i = 0; i < directives.length; i++) {
attachPatchData(directives[i], context);
}
}
attachPatchData(context.native, context);
mpValue = context;
}
} else {
const rElement = target as RElement;
ngDevMode && assertDomNode(rElement);
// if the context is not found then we need to traverse upwards up the DOM
// to find the nearest element that has already been monkey patched with data
let parent = rElement as any;
while (parent = parent.parentNode) {
const parentContext = readPatchedData(parent);
if (parentContext) {
let lView: LView|null;
if (Array.isArray(parentContext)) {
lView = parentContext as LView;
} else {
lView = parentContext.lView;
}
// the edge of the app was also reached here through another means
// (maybe because the DOM was changed manually).
if (!lView) {
return null;
}
const index = findViaNativeElement(lView, rElement);
if (index >= 0) {
const native = unwrapRNode(lView[index]);
const context = createLContext(lView, index, native);
attachPatchData(native, context);
mpValue = context;
break;
}
}
}
}
return (mpValue as LContext) || null;
}
/**
* Creates an empty instance of a `LContext` context
*/
function createLContext(lView: LView, nodeIndex: number, native: RNode): LContext {
return {
lView,
nodeIndex,
native,
component: undefined,
directives: undefined,
localRefs: undefined,
};
}
/**
* Takes a component instance and returns the view for that component.
*
* @param componentInstance
* @returns The component's view
*/
export function getComponentViewByInstance(componentInstance: {}): LView {
let lView = readPatchedData(componentInstance);
let view: LView;
if (Array.isArray(lView)) {
const nodeIndex = findViaComponent(lView, componentInstance);
view = getComponentViewByIndex(nodeIndex, lView);
const context = createLContext(lView, nodeIndex, view[HOST] as RElement);
context.component = componentInstance;
attachPatchData(componentInstance, context);
attachPatchData(context.native, context);
} else {
const context = lView as any as LContext;
view = getComponentViewByIndex(context.nodeIndex, context.lView);
}
return view;
}
/**
* Assigns the given data to the given target (which could be a component,
* directive or DOM node instance) using monkey-patching.
*/
export function attachPatchData(target: any, data: LView | LContext) {
target[MONKEY_PATCH_KEY_NAME] = data;
}
export function isComponentInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵcmp;
}
export function isDirectiveInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵdir;
}
/**
* Locates the element within the given LView and returns the matching index
*/
function findViaNativeElement(lView: LView, target: RElement): number {
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const native = getNativeByTNodeOrNull(tNode, lView) !;
if (native === target) {
return tNode.index;
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Locates the next tNode (child, sibling or parent).
*/
function traverseNextElement(tNode: TNode): TNode|null {
if (tNode.child) {
return tNode.child;
} else if (tNode.next) {
return tNode.next;
} else {
// Let's take the following template: <div><span>text</span></div><component/>
// After checking the text node, we need to find the next parent that has a "next" TNode,
// in this case the parent `div`, so that we can find the component.
while (tNode.parent && !tNode.parent.next) {
tNode = tNode.parent;
}
return tNode.parent && tNode.parent.next;
}
}
/**
* Locates the component within the given LView and returns the matching index
*/
function findViaComponent(lView: LView, componentInstance: {}): number {
const componentIndices = lView[TVIEW].components;
if (componentIndices) {
for (let i = 0; i < componentIndices.length; i++) {
const elementComponentIndex = componentIndices[i];
const componentView = getComponentViewByIndex(elementComponentIndex, lView);
if (componentView[CONTEXT] === componentInstance) {
return elementComponentIndex;
}
}
} else {
const rootComponentView = getComponentViewByIndex(HEADER_OFFSET, lView);
const rootComponent = rootComponentView[CONTEXT];
if (rootComponent === componentInstance) {
// we are dealing with the root element here therefore we know that the
// element is the very first element after the HEADER data in the lView
return HEADER_OFFSET;
}
}
return -1;
}
/**
* Locates the directive within the given LView and returns the matching index
*/
function findViaDirective(lView: LView, directiveInstance: {}): number {
// if a directive is monkey patched then it will (by default)
// have a reference to the LView of the current view. The
// element bound to the directive being search lives somewhere
// in the view data. We loop through the nodes and check their
// list of directives for the instance.
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const directiveIndexStart = tNode.directiveStart;
const directiveIndexEnd = tNode.directiveEnd;
for (let i = directiveIndexStart; i < directiveIndexEnd; i++) {
if (lView[i] === directiveInstance) {
return tNode.index;
}
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Returns a list of directives extracted from the given view based on the
* provided list of directive index values.
*
* @param nodeIndex The node index
* @param lView The target view data
* @param includeComponents Whether or not to include components in returned directives
*/
export function getDirectivesAtNodeIndex(
nodeIndex: number, lView: LView, includeComponents: boolean): any[]|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
if (directiveStartIndex == 0) return EMPTY_ARRAY;
const directiveEndIndex = tNode.directiveEnd;
if (!includeComponents && tNode.flags & TNodeFlags.isComponentHost) directiveStartIndex++;
return lView.slice(directiveStartIndex, directiveEndIndex);
}
export function getComponentAtNodeIndex(nodeIndex: number, lView: LView): {}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
return tNode.flags & TNodeFlags.isComponentHost ? lView[directiveStartIndex] : null;
}
/**
* Returns a map of local references (local reference name => element or directive instance) that
* exist on a given element.
*/
export function discoverLocalRefs(lView: LView, nodeIndex: number): {[key: string]: any}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
if (tNode && tNode.localNames) {
const result: {[key: string]: any} = {};
let localIndex = tNode.index + 1;
for (let i = 0; i < tNode.localNames.length; i += 2) {
result[tNode.localNames[i]] = lView[localIndex];
localIndex++;
}
return result;
}
return null;
}
| getLContext | identifier_name |
context_discovery.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import '../util/ng_dev_mode';
import {assertDomNode} from '../util/assert';
import {EMPTY_ARRAY} from './empty';
import {LContext, MONKEY_PATCH_KEY_NAME} from './interfaces/context';
import {TNode, TNodeFlags} from './interfaces/node';
import {RElement, RNode} from './interfaces/renderer';
import {CONTEXT, HEADER_OFFSET, HOST, LView, TVIEW} from './interfaces/view';
import {getComponentViewByIndex, getNativeByTNodeOrNull, readPatchedData, unwrapRNode} from './util/view_utils';
/** Returns the matching `LContext` data for a given DOM node, directive or component instance.
*
* This function will examine the provided DOM element, component, or directive instance\'s
* monkey-patched property to derive the `LContext` data. Once called then the monkey-patched
* value will be that of the newly created `LContext`.
*
* If the monkey-patched value is the `LView` instance then the context value for that
* target will be created and the monkey-patch reference will be updated. Therefore when this
* function is called it may mutate the provided element\'s, component\'s or any of the associated
* directive\'s monkey-patch values.
*
* If the monkey-patch value is not detected then the code will walk up the DOM until an element
* is found which contains a monkey-patch reference. When that occurs then the provided element
* will be updated with a new context (which is then returned). If the monkey-patch value is not
* detected for a component/directive instance then it will throw an error (all components and
* directives should be automatically monkey-patched by ivy).
*
* @param target Component, Directive or DOM Node.
*/
export function getLContext(target: any): LContext|null {
let mpValue = readPatchedData(target);
if (mpValue) {
// only when it's an array is it considered an LView instance
// ... otherwise it's an already constructed LContext instance
if (Array.isArray(mpValue)) {
const lView: LView = mpValue !;
let nodeIndex: number;
let component: any = undefined;
let directives: any[]|null|undefined = undefined;
if (isComponentInstance(target)) {
nodeIndex = findViaComponent(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided component was not found in the application');
}
component = target;
} else if (isDirectiveInstance(target)) {
nodeIndex = findViaDirective(lView, target);
if (nodeIndex == -1) {
throw new Error('The provided directive was not found in the application');
}
directives = getDirectivesAtNodeIndex(nodeIndex, lView, false);
} else {
nodeIndex = findViaNativeElement(lView, target as RElement);
if (nodeIndex == -1) {
return null;
}
}
// the goal is not to fill the entire context full of data because the lookups
// are expensive. Instead, only the target data (the element, component, container, ICU
// expression or directive details) are filled into the context. If called multiple times
// with different target values then the missing target data will be filled in.
const native = unwrapRNode(lView[nodeIndex]);
const existingCtx = readPatchedData(native);
const context: LContext = (existingCtx && !Array.isArray(existingCtx)) ?
existingCtx :
createLContext(lView, nodeIndex, native);
// only when the component has been discovered then update the monkey-patch
if (component && context.component === undefined) {
context.component = component;
attachPatchData(context.component, context);
}
// only when the directives have been discovered then update the monkey-patch
if (directives && context.directives === undefined) {
context.directives = directives;
for (let i = 0; i < directives.length; i++) {
attachPatchData(directives[i], context);
}
}
attachPatchData(context.native, context);
mpValue = context;
}
} else {
const rElement = target as RElement;
ngDevMode && assertDomNode(rElement);
// if the context is not found then we need to traverse upwards up the DOM
// to find the nearest element that has already been monkey patched with data
let parent = rElement as any;
while (parent = parent.parentNode) {
const parentContext = readPatchedData(parent);
if (parentContext) {
let lView: LView|null;
if (Array.isArray(parentContext)) {
lView = parentContext as LView;
} else {
lView = parentContext.lView;
}
// the edge of the app was also reached here through another means
// (maybe because the DOM was changed manually).
if (!lView) {
return null;
}
const index = findViaNativeElement(lView, rElement);
if (index >= 0) {
const native = unwrapRNode(lView[index]);
const context = createLContext(lView, index, native);
attachPatchData(native, context);
mpValue = context;
break;
}
}
}
}
return (mpValue as LContext) || null;
}
/**
* Creates an empty instance of a `LContext` context
*/
function createLContext(lView: LView, nodeIndex: number, native: RNode): LContext {
return {
lView,
nodeIndex,
native,
component: undefined,
directives: undefined,
localRefs: undefined,
};
}
/**
* Takes a component instance and returns the view for that component.
*
* @param componentInstance
* @returns The component's view
*/
export function getComponentViewByInstance(componentInstance: {}): LView {
let lView = readPatchedData(componentInstance);
let view: LView;
if (Array.isArray(lView)) {
const nodeIndex = findViaComponent(lView, componentInstance);
view = getComponentViewByIndex(nodeIndex, lView);
const context = createLContext(lView, nodeIndex, view[HOST] as RElement);
context.component = componentInstance;
attachPatchData(componentInstance, context);
attachPatchData(context.native, context);
} else {
const context = lView as any as LContext;
view = getComponentViewByIndex(context.nodeIndex, context.lView);
}
return view;
}
/**
* Assigns the given data to the given target (which could be a component,
* directive or DOM node instance) using monkey-patching.
*/
export function attachPatchData(target: any, data: LView | LContext) {
target[MONKEY_PATCH_KEY_NAME] = data;
}
export function isComponentInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵcmp;
}
export function isDirectiveInstance(instance: any): boolean {
return instance && instance.constructor && instance.constructor.ɵdir;
}
/**
* Locates the element within the given LView and returns the matching index
*/
function findViaNativeElement(lView: LView, target: RElement): number {
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const native = getNativeByTNodeOrNull(tNode, lView) !;
if (native === target) {
return tNode.index;
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Locates the next tNode (child, sibling or parent).
*/
function traverseNextElement(tNode: TNode): TNode|null {
| /**
* Locates the component within the given LView and returns the matching index
*/
function findViaComponent(lView: LView, componentInstance: {}): number {
const componentIndices = lView[TVIEW].components;
if (componentIndices) {
for (let i = 0; i < componentIndices.length; i++) {
const elementComponentIndex = componentIndices[i];
const componentView = getComponentViewByIndex(elementComponentIndex, lView);
if (componentView[CONTEXT] === componentInstance) {
return elementComponentIndex;
}
}
} else {
const rootComponentView = getComponentViewByIndex(HEADER_OFFSET, lView);
const rootComponent = rootComponentView[CONTEXT];
if (rootComponent === componentInstance) {
// we are dealing with the root element here therefore we know that the
// element is the very first element after the HEADER data in the lView
return HEADER_OFFSET;
}
}
return -1;
}
/**
* Locates the directive within the given LView and returns the matching index
*/
function findViaDirective(lView: LView, directiveInstance: {}): number {
// if a directive is monkey patched then it will (by default)
// have a reference to the LView of the current view. The
// element bound to the directive being search lives somewhere
// in the view data. We loop through the nodes and check their
// list of directives for the instance.
let tNode = lView[TVIEW].firstChild;
while (tNode) {
const directiveIndexStart = tNode.directiveStart;
const directiveIndexEnd = tNode.directiveEnd;
for (let i = directiveIndexStart; i < directiveIndexEnd; i++) {
if (lView[i] === directiveInstance) {
return tNode.index;
}
}
tNode = traverseNextElement(tNode);
}
return -1;
}
/**
* Returns a list of directives extracted from the given view based on the
* provided list of directive index values.
*
* @param nodeIndex The node index
* @param lView The target view data
* @param includeComponents Whether or not to include components in returned directives
*/
export function getDirectivesAtNodeIndex(
nodeIndex: number, lView: LView, includeComponents: boolean): any[]|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
if (directiveStartIndex == 0) return EMPTY_ARRAY;
const directiveEndIndex = tNode.directiveEnd;
if (!includeComponents && tNode.flags & TNodeFlags.isComponentHost) directiveStartIndex++;
return lView.slice(directiveStartIndex, directiveEndIndex);
}
export function getComponentAtNodeIndex(nodeIndex: number, lView: LView): {}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
let directiveStartIndex = tNode.directiveStart;
return tNode.flags & TNodeFlags.isComponentHost ? lView[directiveStartIndex] : null;
}
/**
* Returns a map of local references (local reference name => element or directive instance) that
* exist on a given element.
*/
export function discoverLocalRefs(lView: LView, nodeIndex: number): {[key: string]: any}|null {
const tNode = lView[TVIEW].data[nodeIndex] as TNode;
if (tNode && tNode.localNames) {
const result: {[key: string]: any} = {};
let localIndex = tNode.index + 1;
for (let i = 0; i < tNode.localNames.length; i += 2) {
result[tNode.localNames[i]] = lView[localIndex];
localIndex++;
}
return result;
}
return null;
}
| if (tNode.child) {
return tNode.child;
} else if (tNode.next) {
return tNode.next;
} else {
// Let's take the following template: <div><span>text</span></div><component/>
// After checking the text node, we need to find the next parent that has a "next" TNode,
// in this case the parent `div`, so that we can find the component.
while (tNode.parent && !tNode.parent.next) {
tNode = tNode.parent;
}
return tNode.parent && tNode.parent.next;
}
}
| identifier_body |
buffer.py | class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n] | def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False | self.buffer = self.buffer[n:]
return data
| random_line_split |
buffer.py | class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
|
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
| return self.buffer | conditional_block |
buffer.py | class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
|
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
| """Returns length of buffer. Used in len()."""
return len(self.buffer) | identifier_body |
buffer.py | class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def | (self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
| peek | identifier_name |
test_dumpgenerator.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
try:
from hashlib import md5
except ImportError: # Python 2.4 compatibility
from md5 import new as md5
import os
import requests
import shutil
import time
import unittest
import urllib
import urllib2
from dumpgenerator import delay, domain2prefix, getImageNames, getPageTitles, getUserAgent, getWikiEngine, mwGetAPIAndIndex
class TestDumpgenerator(unittest.TestCase):
# Documentation
# http://revista.python.org.ar/1/html/unittest.html
# https://docs.python.org/2/library/unittest.html
# Ideas:
# - Check one wiki per wikifarm at least (page titles & images, with/out API)
def test_delay(self):
# This test checks several delays
print '\n', '#'*73, '\n', 'test_delay', '\n', '#'*73
for i in [0, 1, 2, 3]:
print 'Testing delay:', i
config = {'delay': i}
t1 = time.time()
delay(config=config)
t2 = time.time() - t1
print 'Elapsed time in seconds (approx.):', t2
self.assertTrue(t2 + 0.01 > i and t2 < i + 1)
def test_getImages(self):
# This test download the image list using API and index.php
# Compare both lists in length and file by file
# Check the presence of some special files, like odd chars filenames
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getImages', '\n', '#'*73
tests = [
# Alone wikis
#['http://wiki.annotation.jp/index.php', 'http://wiki.annotation.jp/api.php', u'かずさアノテーション - ソーシャル・ゲノム・アノテーション.jpg'],
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'Archive-is 2013-07-02 17-05-40.png'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Benham\'s disc (animated).gif'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com/index.php', 'http://dawngate.gamepedia.com/api.php', u'Spell Vanquish.png'],
# Neoseeker wikifarm
['http://digimon.neoseeker.com/w/index.php', 'http://digimon.neoseeker.com/w/api.php', u'Ogremon card.png'],
# Orain wikifarm
#['http://mc.orain.org/w/index.php', 'http://mc.orain.org/w/api.php', u'Mojang logo.svg'],
# Referata wikifarm
['http://wikipapers.referata.com/w/index.php', 'http://wikipapers.referata.com/w/api.php', u'Avbot logo.png'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com/w/index.php', 'http://commandos.shoutwiki.com/w/api.php', u'Night of the Wolves loading.png'],
# Wiki-site wikifarm
['http://minlingo.wiki-site.com/index.php', 'http://minlingo.wiki-site.com/api.php', u'一 (書方灋ᅗᅩ).png'],
# Wikkii wikifarm
# It seems offline
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, filetocheck in tests:
# Testing with API
print '\nTesting', api
config_api = {'api': api, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with API'
result_api = getImageNames(config=config_api, session=session)
self.assertEqual(len(result_api), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_api])
# Testing with index
print '\nTesting', index
config_index = {'index': index, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with index'
result_index = getImageNames(config=config_index, session=session)
#print 111, set([filename for filename, url, uploader in result_api]) - set([filename for filename, url, uploader in result_index])
self.assertEqual(len(result_index), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_index])
# Compare every image in both lists, with/without API
c = 0
for filename_api, url_api, uploader_api in result_api:
self.assertEqual(filename_api, result_index[c][0], u'{0} and {1} are different'.format(filename_api, result_index[c][0]))
self.assertEqual(url_api, result_index[c][1], u'{0} and {1} are different'.format(url_api, result_index[c][1]))
self.assertEqual(uploader_api, result_index[c][2], u'{0} and {1} are different'.format(uploader_api, result_index[c][2]))
c += 1
def test_getPageTitles(self):
# This test download the title list using API and index.php
# Compare both lists in length and title by title
# Check the presence of some special titles, like odd chars
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getPageTitles', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'April Fools\' Day'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Conway\'s Game of Life'],
# Test old allpages API behaviour
['http://wiki.damirsystems.com/index.php', 'http://wiki.damirsystems.com/api.php', 'SQL Server Tips'],
# Test BOM encoding
['http://www.libreidea.org/w/index.php', 'http://www.libreidea.org/w/api.php', 'Main Page'],
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, pagetocheck in tests:
# Testing with API
print '\nTesting', api
print 'Trying to parse', pagetocheck, 'with API'
config_api = {'api': api, 'index': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_api, session=session)
titles_api = './%s-%s-titles.txt' % (domain2prefix(config=config_api), config_api['date'])
result_api = open(titles_api, 'r').read().splitlines()
os.remove(titles_api)
self.assertTrue(pagetocheck in result_api)
# Testing with index
print 'Testing', index
print 'Trying to parse', pagetocheck, 'with index'
config_index = {'index': index, 'api': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_index, session=session)
titles_index = './%s-%s-titles.txt' % (domain2prefix(config=config_index), config_index['date'])
result_index = open(titles_index, 'r').read().splitlines()
os.remove(titles_index)
self.assertTrue(pagetocheck in result_index)
self.assertEqual(len(result_api), len(result_index))
# Compare every page in both lists, with/without API
c = 0
for pagename_api in result_api:
self.assertEqual(pagename_api.decode('utf8'), result_index[c].decode('utf8'), u'{0} and {1} are different'.format(pagename_api.decode('utf8'), result_index[c].decode('utf8')))
c += 1
def test_getWikiEngine(self):
print '\n', '#'*73, '\n', 'test_getWikiEngine', '\n', '#'*73
tests = [
['https://www.dokuwiki.org', 'DokuWiki'],
['http://wiki.openwrt.org', 'DokuWiki'],
['http://skilledtests.com/wiki/', 'MediaWiki'],
#['http://moinmo.in', 'MoinMoin'],
['https://wiki.debian.org', 'MoinMoin'],
['http://twiki.org/cgi-bin/view/', 'TWiki'],
['http://nuclearinfo.net/Nuclearpower/CurrentReactors', 'TWiki'],
['http://www.pmwiki.org/', 'PmWiki'],
['http://www.apfelwiki.de/', 'PmWiki'],
['http://wiki.wlug.org.nz/', 'PhpWiki'],
# ['http://wiki.greenmuseum.org/', 'PhpWiki'],
['http://www.cmswiki.com/tiki-index.php', 'TikiWiki'],
['http://www.wasteflake.com/', 'TikiWiki'],
['http://foswiki.org/', 'FosWiki'],
['http://www.w3c.br/Home/WebHome', 'FosWiki'],
['http://mojomojo.org/', 'MojoMojo'],
['http://wiki.catalystframework.org/wiki/', 'MojoMojo'],
['https://www.ictu.nl/archief/wiki.noiv.nl/xwiki/bin/view/Main', 'XWiki'],
#['https://web.archive.org/web/20080517021020id_/http://berlin.xwiki.com/xwiki/bin/view/Main/WebHome', 'XWiki'],
['http://www.xwiki.org/xwiki/bin/view/Main/WebHome', 'XWiki'],
['https://confluence.atlassian.com/', 'Confluence'],
#['https://wiki.hybris.com/dashboard.action', 'Confluence'],
['https://confluence.sakaiproject.org/', 'Confluence'],
#['http://demo.bananadance.org/', 'Banana Dance'],
['http://wagn.org/', 'Wagn'],
['http://wiki.ace-mod.net/', 'Wagn'],
['https://success.mindtouch.com/', 'MindTouch'],
['https://jspwiki.apache.org/', 'JSPWiki'],
['http://www.ihear.com/FreeCLAS/', 'JSPWiki'],
['http://www.wikkawiki.org/HomePage', 'WikkaWiki'],
['http://puppylinux.org/wikka/', 'WikkaWiki'],
['http://cs.netsville.com/wiki/wikka.php', 'WikkaWiki'],
#['http://web.archive.org/web/20060717202033id_/http://www.comawiki.org/CoMa.php?CoMa=startseite', 'CoMaWiki'],
['http://bootbook.de/CoMa.php', 'CoMaWiki'],
#['http://wikini.net/wakka.php', 'WikiNi'],
['http://wiki.raydium.org/wiki/', 'WikiNi'],
['http://wiki.cs.cityu.edu.hk/CitiWiki/SourceCode', 'CitiWiki'],
['http://wackowiki.sourceforge.net/test/', 'WackoWiki'],
['http://www.sw4me.com/wiki/', 'WackoWiki'],
['http://lslwiki.net/lslwiki/wakka.php', 'WakkaWiki'],
['http://kw.pm.org/wiki/index.cgi', 'Kwiki'],
['http://wiki.wubi.org/index.cgi', 'Kwiki'],
#['http://perl.bristolbath.org/index.cgi', 'Kwiki'],
['http://www.anwiki.com/', 'Anwiki'],
['http://www.anw.fr/', 'Anwiki'],
['http://www.aneuch.org/', 'Aneuch'],
['http://doc.myunixhost.com/', 'Aneuch'],
['http://www.bitweaver.org/wiki/index.php', 'bitweaver'],
['http://wiki.e-shell.org/Home', 'Zwiki'],
['http://leo.zwiki.org/', 'Zwiki'],
['http://accessibility4all.wikispaces.com/', 'Wikispaces'],
['http://darksouls.wikidot.com/', 'Wikidot'],
['http://www.wikifoundrycentral.com/', 'Wetpaint'],
['http://wiki.openid.net/', 'PBworks'],
]
for wiki, engine in tests:
print 'Testing', wiki
guess_engine = getWikiEngine(wiki)
print 'Got: %s, expected: %s' % (guess_engine, engine)
self.assertEqual(guess_engine, engine)
def test_mwGetAPIAndIndex(self):
print '\n', '#'*73, '\n', 'test_mwGetAPIAndIndex', '\n', '#'*73
| this directory
#shutil.copy2('../dumpgenerator.py', './dumpgenerator.py')
unittest.main()
| tests = [
# Alone wikis
['http://archiveteam.org', 'http://archiveteam.org/api.php', 'http://archiveteam.org/index.php'],
['http://skilledtests.com/wiki/', 'http://skilledtests.com/wiki/api.php', 'http://skilledtests.com/wiki/index.php'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com', 'http://dawngate.gamepedia.com/api.php', 'http://dawngate.gamepedia.com/index.php'],
# Neoseeker wikifarm
#['http://digimon.neoseeker.com', 'http://digimon.neoseeker.com/w/api.php', 'http://digimon.neoseeker.com/w/index.php'],
# Orain wikifarm
#['http://mc.orain.org', 'http://mc.orain.org/w/api.php', 'http://mc.orain.org/w/index.php'],
# Referata wikifarm
# ['http://wikipapers.referata.com', 'http://wikipapers.referata.com/w/api.php', 'http://wikipapers.referata.com/w/index.php'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com', 'http://commandos.shoutwiki.com/w/api.php', 'http://commandos.shoutwiki.com/w/index.php'],
# Wiki-site wikifarm
#['http://minlingo.wiki-site.com', 'http://minlingo.wiki-site.com/api.php', 'http://minlingo.wiki-site.com/index.php'],
# Wikkii wikifarm
# It seems offline
]
for wiki, api, index in tests:
print 'Testing', wiki
api2, index2 = mwGetAPIAndIndex(wiki)
self.assertEqual(api, api2)
self.assertEqual(index, index2)
if __name__ == '__main__':
#copying dumpgenerator.py to | identifier_body |
test_dumpgenerator.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
try:
from hashlib import md5
except ImportError: # Python 2.4 compatibility
from md5 import new as md5
import os
import requests
import shutil
import time
import unittest
import urllib
import urllib2
from dumpgenerator import delay, domain2prefix, getImageNames, getPageTitles, getUserAgent, getWikiEngine, mwGetAPIAndIndex
class | (unittest.TestCase):
# Documentation
# http://revista.python.org.ar/1/html/unittest.html
# https://docs.python.org/2/library/unittest.html
# Ideas:
# - Check one wiki per wikifarm at least (page titles & images, with/out API)
def test_delay(self):
# This test checks several delays
print '\n', '#'*73, '\n', 'test_delay', '\n', '#'*73
for i in [0, 1, 2, 3]:
print 'Testing delay:', i
config = {'delay': i}
t1 = time.time()
delay(config=config)
t2 = time.time() - t1
print 'Elapsed time in seconds (approx.):', t2
self.assertTrue(t2 + 0.01 > i and t2 < i + 1)
def test_getImages(self):
# This test download the image list using API and index.php
# Compare both lists in length and file by file
# Check the presence of some special files, like odd chars filenames
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getImages', '\n', '#'*73
tests = [
# Alone wikis
#['http://wiki.annotation.jp/index.php', 'http://wiki.annotation.jp/api.php', u'かずさアノテーション - ソーシャル・ゲノム・アノテーション.jpg'],
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'Archive-is 2013-07-02 17-05-40.png'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Benham\'s disc (animated).gif'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com/index.php', 'http://dawngate.gamepedia.com/api.php', u'Spell Vanquish.png'],
# Neoseeker wikifarm
['http://digimon.neoseeker.com/w/index.php', 'http://digimon.neoseeker.com/w/api.php', u'Ogremon card.png'],
# Orain wikifarm
#['http://mc.orain.org/w/index.php', 'http://mc.orain.org/w/api.php', u'Mojang logo.svg'],
# Referata wikifarm
['http://wikipapers.referata.com/w/index.php', 'http://wikipapers.referata.com/w/api.php', u'Avbot logo.png'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com/w/index.php', 'http://commandos.shoutwiki.com/w/api.php', u'Night of the Wolves loading.png'],
# Wiki-site wikifarm
['http://minlingo.wiki-site.com/index.php', 'http://minlingo.wiki-site.com/api.php', u'一 (書方灋ᅗᅩ).png'],
# Wikkii wikifarm
# It seems offline
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, filetocheck in tests:
# Testing with API
print '\nTesting', api
config_api = {'api': api, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with API'
result_api = getImageNames(config=config_api, session=session)
self.assertEqual(len(result_api), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_api])
# Testing with index
print '\nTesting', index
config_index = {'index': index, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with index'
result_index = getImageNames(config=config_index, session=session)
#print 111, set([filename for filename, url, uploader in result_api]) - set([filename for filename, url, uploader in result_index])
self.assertEqual(len(result_index), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_index])
# Compare every image in both lists, with/without API
c = 0
for filename_api, url_api, uploader_api in result_api:
self.assertEqual(filename_api, result_index[c][0], u'{0} and {1} are different'.format(filename_api, result_index[c][0]))
self.assertEqual(url_api, result_index[c][1], u'{0} and {1} are different'.format(url_api, result_index[c][1]))
self.assertEqual(uploader_api, result_index[c][2], u'{0} and {1} are different'.format(uploader_api, result_index[c][2]))
c += 1
def test_getPageTitles(self):
# This test download the title list using API and index.php
# Compare both lists in length and title by title
# Check the presence of some special titles, like odd chars
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getPageTitles', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'April Fools\' Day'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Conway\'s Game of Life'],
# Test old allpages API behaviour
['http://wiki.damirsystems.com/index.php', 'http://wiki.damirsystems.com/api.php', 'SQL Server Tips'],
# Test BOM encoding
['http://www.libreidea.org/w/index.php', 'http://www.libreidea.org/w/api.php', 'Main Page'],
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, pagetocheck in tests:
# Testing with API
print '\nTesting', api
print 'Trying to parse', pagetocheck, 'with API'
config_api = {'api': api, 'index': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_api, session=session)
titles_api = './%s-%s-titles.txt' % (domain2prefix(config=config_api), config_api['date'])
result_api = open(titles_api, 'r').read().splitlines()
os.remove(titles_api)
self.assertTrue(pagetocheck in result_api)
# Testing with index
print 'Testing', index
print 'Trying to parse', pagetocheck, 'with index'
config_index = {'index': index, 'api': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_index, session=session)
titles_index = './%s-%s-titles.txt' % (domain2prefix(config=config_index), config_index['date'])
result_index = open(titles_index, 'r').read().splitlines()
os.remove(titles_index)
self.assertTrue(pagetocheck in result_index)
self.assertEqual(len(result_api), len(result_index))
# Compare every page in both lists, with/without API
c = 0
for pagename_api in result_api:
self.assertEqual(pagename_api.decode('utf8'), result_index[c].decode('utf8'), u'{0} and {1} are different'.format(pagename_api.decode('utf8'), result_index[c].decode('utf8')))
c += 1
def test_getWikiEngine(self):
print '\n', '#'*73, '\n', 'test_getWikiEngine', '\n', '#'*73
tests = [
['https://www.dokuwiki.org', 'DokuWiki'],
['http://wiki.openwrt.org', 'DokuWiki'],
['http://skilledtests.com/wiki/', 'MediaWiki'],
#['http://moinmo.in', 'MoinMoin'],
['https://wiki.debian.org', 'MoinMoin'],
['http://twiki.org/cgi-bin/view/', 'TWiki'],
['http://nuclearinfo.net/Nuclearpower/CurrentReactors', 'TWiki'],
['http://www.pmwiki.org/', 'PmWiki'],
['http://www.apfelwiki.de/', 'PmWiki'],
['http://wiki.wlug.org.nz/', 'PhpWiki'],
# ['http://wiki.greenmuseum.org/', 'PhpWiki'],
['http://www.cmswiki.com/tiki-index.php', 'TikiWiki'],
['http://www.wasteflake.com/', 'TikiWiki'],
['http://foswiki.org/', 'FosWiki'],
['http://www.w3c.br/Home/WebHome', 'FosWiki'],
['http://mojomojo.org/', 'MojoMojo'],
['http://wiki.catalystframework.org/wiki/', 'MojoMojo'],
['https://www.ictu.nl/archief/wiki.noiv.nl/xwiki/bin/view/Main', 'XWiki'],
#['https://web.archive.org/web/20080517021020id_/http://berlin.xwiki.com/xwiki/bin/view/Main/WebHome', 'XWiki'],
['http://www.xwiki.org/xwiki/bin/view/Main/WebHome', 'XWiki'],
['https://confluence.atlassian.com/', 'Confluence'],
#['https://wiki.hybris.com/dashboard.action', 'Confluence'],
['https://confluence.sakaiproject.org/', 'Confluence'],
#['http://demo.bananadance.org/', 'Banana Dance'],
['http://wagn.org/', 'Wagn'],
['http://wiki.ace-mod.net/', 'Wagn'],
['https://success.mindtouch.com/', 'MindTouch'],
['https://jspwiki.apache.org/', 'JSPWiki'],
['http://www.ihear.com/FreeCLAS/', 'JSPWiki'],
['http://www.wikkawiki.org/HomePage', 'WikkaWiki'],
['http://puppylinux.org/wikka/', 'WikkaWiki'],
['http://cs.netsville.com/wiki/wikka.php', 'WikkaWiki'],
#['http://web.archive.org/web/20060717202033id_/http://www.comawiki.org/CoMa.php?CoMa=startseite', 'CoMaWiki'],
['http://bootbook.de/CoMa.php', 'CoMaWiki'],
#['http://wikini.net/wakka.php', 'WikiNi'],
['http://wiki.raydium.org/wiki/', 'WikiNi'],
['http://wiki.cs.cityu.edu.hk/CitiWiki/SourceCode', 'CitiWiki'],
['http://wackowiki.sourceforge.net/test/', 'WackoWiki'],
['http://www.sw4me.com/wiki/', 'WackoWiki'],
['http://lslwiki.net/lslwiki/wakka.php', 'WakkaWiki'],
['http://kw.pm.org/wiki/index.cgi', 'Kwiki'],
['http://wiki.wubi.org/index.cgi', 'Kwiki'],
#['http://perl.bristolbath.org/index.cgi', 'Kwiki'],
['http://www.anwiki.com/', 'Anwiki'],
['http://www.anw.fr/', 'Anwiki'],
['http://www.aneuch.org/', 'Aneuch'],
['http://doc.myunixhost.com/', 'Aneuch'],
['http://www.bitweaver.org/wiki/index.php', 'bitweaver'],
['http://wiki.e-shell.org/Home', 'Zwiki'],
['http://leo.zwiki.org/', 'Zwiki'],
['http://accessibility4all.wikispaces.com/', 'Wikispaces'],
['http://darksouls.wikidot.com/', 'Wikidot'],
['http://www.wikifoundrycentral.com/', 'Wetpaint'],
['http://wiki.openid.net/', 'PBworks'],
]
for wiki, engine in tests:
print 'Testing', wiki
guess_engine = getWikiEngine(wiki)
print 'Got: %s, expected: %s' % (guess_engine, engine)
self.assertEqual(guess_engine, engine)
def test_mwGetAPIAndIndex(self):
print '\n', '#'*73, '\n', 'test_mwGetAPIAndIndex', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org', 'http://archiveteam.org/api.php', 'http://archiveteam.org/index.php'],
['http://skilledtests.com/wiki/', 'http://skilledtests.com/wiki/api.php', 'http://skilledtests.com/wiki/index.php'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com', 'http://dawngate.gamepedia.com/api.php', 'http://dawngate.gamepedia.com/index.php'],
# Neoseeker wikifarm
#['http://digimon.neoseeker.com', 'http://digimon.neoseeker.com/w/api.php', 'http://digimon.neoseeker.com/w/index.php'],
# Orain wikifarm
#['http://mc.orain.org', 'http://mc.orain.org/w/api.php', 'http://mc.orain.org/w/index.php'],
# Referata wikifarm
# ['http://wikipapers.referata.com', 'http://wikipapers.referata.com/w/api.php', 'http://wikipapers.referata.com/w/index.php'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com', 'http://commandos.shoutwiki.com/w/api.php', 'http://commandos.shoutwiki.com/w/index.php'],
# Wiki-site wikifarm
#['http://minlingo.wiki-site.com', 'http://minlingo.wiki-site.com/api.php', 'http://minlingo.wiki-site.com/index.php'],
# Wikkii wikifarm
# It seems offline
]
for wiki, api, index in tests:
print 'Testing', wiki
api2, index2 = mwGetAPIAndIndex(wiki)
self.assertEqual(api, api2)
self.assertEqual(index, index2)
if __name__ == '__main__':
#copying dumpgenerator.py to this directory
#shutil.copy2('../dumpgenerator.py', './dumpgenerator.py')
unittest.main()
| TestDumpgenerator | identifier_name |
test_dumpgenerator.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
try:
from hashlib import md5
except ImportError: # Python 2.4 compatibility
from md5 import new as md5
import os
import requests
import shutil
import time
import unittest
import urllib
import urllib2
from dumpgenerator import delay, domain2prefix, getImageNames, getPageTitles, getUserAgent, getWikiEngine, mwGetAPIAndIndex
class TestDumpgenerator(unittest.TestCase):
# Documentation
# http://revista.python.org.ar/1/html/unittest.html
# https://docs.python.org/2/library/unittest.html
# Ideas:
# - Check one wiki per wikifarm at least (page titles & images, with/out API)
def test_delay(self):
# This test checks several delays
print '\n', '#'*73, '\n', 'test_delay', '\n', '#'*73
for i in [0, 1, 2, 3]:
print 'Testing delay:', i
config = {'delay': i}
t1 = time.time()
delay(config=config)
t2 = time.time() - t1
print 'Elapsed time in seconds (approx.):', t2
self.assertTrue(t2 + 0.01 > i and t2 < i + 1)
def test_getImages(self):
# This test download the image list using API and index.php
# Compare both lists in length and file by file
# Check the presence of some special files, like odd chars filenames
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getImages', '\n', '#'*73
tests = [
# Alone wikis
#['http://wiki.annotation.jp/index.php', 'http://wiki.annotation.jp/api.php', u'かずさアノテーション - ソーシャル・ゲノム・アノテーション.jpg'],
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'Archive-is 2013-07-02 17-05-40.png'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Benham\'s disc (animated).gif'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com/index.php', 'http://dawngate.gamepedia.com/api.php', u'Spell Vanquish.png'],
# Neoseeker wikifarm
['http://digimon.neoseeker.com/w/index.php', 'http://digimon.neoseeker.com/w/api.php', u'Ogremon card.png'],
# Orain wikifarm
#['http://mc.orain.org/w/index.php', 'http://mc.orain.org/w/api.php', u'Mojang logo.svg'],
# Referata wikifarm
['http://wikipapers.referata.com/w/index.php', 'http://wikipapers.referata.com/w/api.php', u'Avbot logo.png'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com/w/index.php', 'http://commandos.shoutwiki.com/w/api.php', u'Night of the Wolves loading.png'],
# Wiki-site wikifarm
['http://minlingo.wiki-site.com/index.php', 'http://minlingo.wiki-site.com/api.php', u'一 (書方灋ᅗᅩ).png'],
# Wikkii wikifarm
# It seems offline
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, filetocheck in tests:
# Testing with API
print '\nTesting', api
config_api = {'api': api, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with API'
result_api = getImageNames(config=config_api, session=session)
self.assertEqual(len(result_api), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_api])
# Testing with index
print '\nTesting', index
config_index = {'index': index, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with index'
result_index = getImageNames(config=config_index, session=session)
#print 111, set([filename for filename, url, uploader in result_api]) - set([filename for filename, url, uploader in result_index])
self.assertEqual(len(result_index), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_index])
# Compare every image in both lists, with/without API
c = 0
for filename_api, url_api, uploader_api in result_api:
self.assertEqual(filename_api, result_index[c][0], u'{0} and {1} a | ad the title list using API and index.php
# Compare both lists in length and title by title
# Check the presence of some special titles, like odd chars
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getPageTitles', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'April Fools\' Day'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Conway\'s Game of Life'],
# Test old allpages API behaviour
['http://wiki.damirsystems.com/index.php', 'http://wiki.damirsystems.com/api.php', 'SQL Server Tips'],
# Test BOM encoding
['http://www.libreidea.org/w/index.php', 'http://www.libreidea.org/w/api.php', 'Main Page'],
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, pagetocheck in tests:
# Testing with API
print '\nTesting', api
print 'Trying to parse', pagetocheck, 'with API'
config_api = {'api': api, 'index': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_api, session=session)
titles_api = './%s-%s-titles.txt' % (domain2prefix(config=config_api), config_api['date'])
result_api = open(titles_api, 'r').read().splitlines()
os.remove(titles_api)
self.assertTrue(pagetocheck in result_api)
# Testing with index
print 'Testing', index
print 'Trying to parse', pagetocheck, 'with index'
config_index = {'index': index, 'api': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_index, session=session)
titles_index = './%s-%s-titles.txt' % (domain2prefix(config=config_index), config_index['date'])
result_index = open(titles_index, 'r').read().splitlines()
os.remove(titles_index)
self.assertTrue(pagetocheck in result_index)
self.assertEqual(len(result_api), len(result_index))
# Compare every page in both lists, with/without API
c = 0
for pagename_api in result_api:
self.assertEqual(pagename_api.decode('utf8'), result_index[c].decode('utf8'), u'{0} and {1} are different'.format(pagename_api.decode('utf8'), result_index[c].decode('utf8')))
c += 1
def test_getWikiEngine(self):
print '\n', '#'*73, '\n', 'test_getWikiEngine', '\n', '#'*73
tests = [
['https://www.dokuwiki.org', 'DokuWiki'],
['http://wiki.openwrt.org', 'DokuWiki'],
['http://skilledtests.com/wiki/', 'MediaWiki'],
#['http://moinmo.in', 'MoinMoin'],
['https://wiki.debian.org', 'MoinMoin'],
['http://twiki.org/cgi-bin/view/', 'TWiki'],
['http://nuclearinfo.net/Nuclearpower/CurrentReactors', 'TWiki'],
['http://www.pmwiki.org/', 'PmWiki'],
['http://www.apfelwiki.de/', 'PmWiki'],
['http://wiki.wlug.org.nz/', 'PhpWiki'],
# ['http://wiki.greenmuseum.org/', 'PhpWiki'],
['http://www.cmswiki.com/tiki-index.php', 'TikiWiki'],
['http://www.wasteflake.com/', 'TikiWiki'],
['http://foswiki.org/', 'FosWiki'],
['http://www.w3c.br/Home/WebHome', 'FosWiki'],
['http://mojomojo.org/', 'MojoMojo'],
['http://wiki.catalystframework.org/wiki/', 'MojoMojo'],
['https://www.ictu.nl/archief/wiki.noiv.nl/xwiki/bin/view/Main', 'XWiki'],
#['https://web.archive.org/web/20080517021020id_/http://berlin.xwiki.com/xwiki/bin/view/Main/WebHome', 'XWiki'],
['http://www.xwiki.org/xwiki/bin/view/Main/WebHome', 'XWiki'],
['https://confluence.atlassian.com/', 'Confluence'],
#['https://wiki.hybris.com/dashboard.action', 'Confluence'],
['https://confluence.sakaiproject.org/', 'Confluence'],
#['http://demo.bananadance.org/', 'Banana Dance'],
['http://wagn.org/', 'Wagn'],
['http://wiki.ace-mod.net/', 'Wagn'],
['https://success.mindtouch.com/', 'MindTouch'],
['https://jspwiki.apache.org/', 'JSPWiki'],
['http://www.ihear.com/FreeCLAS/', 'JSPWiki'],
['http://www.wikkawiki.org/HomePage', 'WikkaWiki'],
['http://puppylinux.org/wikka/', 'WikkaWiki'],
['http://cs.netsville.com/wiki/wikka.php', 'WikkaWiki'],
#['http://web.archive.org/web/20060717202033id_/http://www.comawiki.org/CoMa.php?CoMa=startseite', 'CoMaWiki'],
['http://bootbook.de/CoMa.php', 'CoMaWiki'],
#['http://wikini.net/wakka.php', 'WikiNi'],
['http://wiki.raydium.org/wiki/', 'WikiNi'],
['http://wiki.cs.cityu.edu.hk/CitiWiki/SourceCode', 'CitiWiki'],
['http://wackowiki.sourceforge.net/test/', 'WackoWiki'],
['http://www.sw4me.com/wiki/', 'WackoWiki'],
['http://lslwiki.net/lslwiki/wakka.php', 'WakkaWiki'],
['http://kw.pm.org/wiki/index.cgi', 'Kwiki'],
['http://wiki.wubi.org/index.cgi', 'Kwiki'],
#['http://perl.bristolbath.org/index.cgi', 'Kwiki'],
['http://www.anwiki.com/', 'Anwiki'],
['http://www.anw.fr/', 'Anwiki'],
['http://www.aneuch.org/', 'Aneuch'],
['http://doc.myunixhost.com/', 'Aneuch'],
['http://www.bitweaver.org/wiki/index.php', 'bitweaver'],
['http://wiki.e-shell.org/Home', 'Zwiki'],
['http://leo.zwiki.org/', 'Zwiki'],
['http://accessibility4all.wikispaces.com/', 'Wikispaces'],
['http://darksouls.wikidot.com/', 'Wikidot'],
['http://www.wikifoundrycentral.com/', 'Wetpaint'],
['http://wiki.openid.net/', 'PBworks'],
]
for wiki, engine in tests:
print 'Testing', wiki
guess_engine = getWikiEngine(wiki)
print 'Got: %s, expected: %s' % (guess_engine, engine)
self.assertEqual(guess_engine, engine)
def test_mwGetAPIAndIndex(self):
print '\n', '#'*73, '\n', 'test_mwGetAPIAndIndex', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org', 'http://archiveteam.org/api.php', 'http://archiveteam.org/index.php'],
['http://skilledtests.com/wiki/', 'http://skilledtests.com/wiki/api.php', 'http://skilledtests.com/wiki/index.php'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com', 'http://dawngate.gamepedia.com/api.php', 'http://dawngate.gamepedia.com/index.php'],
# Neoseeker wikifarm
#['http://digimon.neoseeker.com', 'http://digimon.neoseeker.com/w/api.php', 'http://digimon.neoseeker.com/w/index.php'],
# Orain wikifarm
#['http://mc.orain.org', 'http://mc.orain.org/w/api.php', 'http://mc.orain.org/w/index.php'],
# Referata wikifarm
# ['http://wikipapers.referata.com', 'http://wikipapers.referata.com/w/api.php', 'http://wikipapers.referata.com/w/index.php'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com', 'http://commandos.shoutwiki.com/w/api.php', 'http://commandos.shoutwiki.com/w/index.php'],
# Wiki-site wikifarm
#['http://minlingo.wiki-site.com', 'http://minlingo.wiki-site.com/api.php', 'http://minlingo.wiki-site.com/index.php'],
# Wikkii wikifarm
# It seems offline
]
for wiki, api, index in tests:
print 'Testing', wiki
api2, index2 = mwGetAPIAndIndex(wiki)
self.assertEqual(api, api2)
self.assertEqual(index, index2)
if __name__ == '__main__':
#copying dumpgenerator.py to this directory
#shutil.copy2('../dumpgenerator.py', './dumpgenerator.py')
unittest.main()
| re different'.format(filename_api, result_index[c][0]))
self.assertEqual(url_api, result_index[c][1], u'{0} and {1} are different'.format(url_api, result_index[c][1]))
self.assertEqual(uploader_api, result_index[c][2], u'{0} and {1} are different'.format(uploader_api, result_index[c][2]))
c += 1
def test_getPageTitles(self):
# This test downlo | conditional_block |
test_dumpgenerator.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
try:
from hashlib import md5
except ImportError: # Python 2.4 compatibility
from md5 import new as md5
import os
import requests
import shutil
import time
import unittest
import urllib
import urllib2
from dumpgenerator import delay, domain2prefix, getImageNames, getPageTitles, getUserAgent, getWikiEngine, mwGetAPIAndIndex
class TestDumpgenerator(unittest.TestCase):
# Documentation
# http://revista.python.org.ar/1/html/unittest.html
# https://docs.python.org/2/library/unittest.html
# Ideas:
# - Check one wiki per wikifarm at least (page titles & images, with/out API)
def test_delay(self):
# This test checks several delays
print '\n', '#'*73, '\n', 'test_delay', '\n', '#'*73
for i in [0, 1, 2, 3]:
print 'Testing delay:', i
config = {'delay': i}
t1 = time.time()
delay(config=config)
t2 = time.time() - t1
print 'Elapsed time in seconds (approx.):', t2
self.assertTrue(t2 + 0.01 > i and t2 < i + 1)
def test_getImages(self):
# This test download the image list using API and index.php
# Compare both lists in length and file by file
# Check the presence of some special files, like odd chars filenames
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getImages', '\n', '#'*73
tests = [
# Alone wikis
#['http://wiki.annotation.jp/index.php', 'http://wiki.annotation.jp/api.php', u'かずさアノテーション - ソーシャル・ゲノム・アノテーション.jpg'],
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'Archive-is 2013-07-02 17-05-40.png'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Benham\'s disc (animated).gif'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com/index.php', 'http://dawngate.gamepedia.com/api.php', u'Spell Vanquish.png'],
# Neoseeker wikifarm
['http://digimon.neoseeker.com/w/index.php', 'http://digimon.neoseeker.com/w/api.php', u'Ogremon card.png'],
# Orain wikifarm
#['http://mc.orain.org/w/index.php', 'http://mc.orain.org/w/api.php', u'Mojang logo.svg'],
# Referata wikifarm
['http://wikipapers.referata.com/w/index.php', 'http://wikipapers.referata.com/w/api.php', u'Avbot logo.png'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com/w/index.php', 'http://commandos.shoutwiki.com/w/api.php', u'Night of the Wolves loading.png'],
# Wiki-site wikifarm
['http://minlingo.wiki-site.com/index.php', 'http://minlingo.wiki-site.com/api.php', u'一 (書方灋ᅗᅩ).png'],
# Wikkii wikifarm
# It seems offline
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, filetocheck in tests:
# Testing with API
print '\nTesting', api
config_api = {'api': api, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with API'
result_api = getImageNames(config=config_api, session=session)
self.assertEqual(len(result_api), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_api])
# Testing with index
print '\nTesting', index
config_index = {'index': index, 'delay': 0}
req = urllib2.Request(url=api, data=urllib.urlencode({'action': 'query', 'meta': 'siteinfo', 'siprop': 'statistics', 'format': 'json'}), headers={'User-Agent': getUserAgent()})
f = urllib2.urlopen(req)
imagecount = int(json.loads(f.read())['query']['statistics']['images'])
f.close()
print 'Trying to parse', filetocheck, 'with index'
result_index = getImageNames(config=config_index, session=session)
#print 111, set([filename for filename, url, uploader in result_api]) - set([filename for filename, url, uploader in result_index])
self.assertEqual(len(result_index), imagecount)
self.assertTrue(filetocheck in [filename for filename, url, uploader in result_index])
# Compare every image in both lists, with/without API
c = 0
for filename_api, url_api, uploader_api in result_api:
self.assertEqual(filename_api, result_index[c][0], u'{0} and {1} are different'.format(filename_api, result_index[c][0]))
self.assertEqual(url_api, result_index[c][1], u'{0} and {1} are different'.format(url_api, result_index[c][1]))
self.assertEqual(uploader_api, result_index[c][2], u'{0} and {1} are different'.format(uploader_api, result_index[c][2]))
c += 1
def test_getPageTitles(self):
# This test download the title list using API and index.php
# Compare both lists in length and title by title
# Check the presence of some special titles, like odd chars
# The tested wikis are from different wikifarms and some alone
print '\n', '#'*73, '\n', 'test_getPageTitles', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org/index.php', 'http://archiveteam.org/api.php', u'April Fools\' Day'],
['http://skilledtests.com/wiki/index.php', 'http://skilledtests.com/wiki/api.php', u'Conway\'s Game of Life'],
# Test old allpages API behaviour
['http://wiki.damirsystems.com/index.php', 'http://wiki.damirsystems.com/api.php', 'SQL Server Tips'],
# Test BOM encoding
['http://www.libreidea.org/w/index.php', 'http://www.libreidea.org/w/api.php', 'Main Page'],
]
session = requests.Session()
session.headers = {'User-Agent': getUserAgent()}
for index, api, pagetocheck in tests:
# Testing with API
print '\nTesting', api
print 'Trying to parse', pagetocheck, 'with API'
config_api = {'api': api, 'index': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'}
getPageTitles(config=config_api, session=session)
titles_api = './%s-%s-titles.txt' % (domain2prefix(config=config_api), config_api['date'])
result_api = open(titles_api, 'r').read().splitlines()
os.remove(titles_api)
self.assertTrue(pagetocheck in result_api)
# Testing with index | getPageTitles(config=config_index, session=session)
titles_index = './%s-%s-titles.txt' % (domain2prefix(config=config_index), config_index['date'])
result_index = open(titles_index, 'r').read().splitlines()
os.remove(titles_index)
self.assertTrue(pagetocheck in result_index)
self.assertEqual(len(result_api), len(result_index))
# Compare every page in both lists, with/without API
c = 0
for pagename_api in result_api:
self.assertEqual(pagename_api.decode('utf8'), result_index[c].decode('utf8'), u'{0} and {1} are different'.format(pagename_api.decode('utf8'), result_index[c].decode('utf8')))
c += 1
def test_getWikiEngine(self):
print '\n', '#'*73, '\n', 'test_getWikiEngine', '\n', '#'*73
tests = [
['https://www.dokuwiki.org', 'DokuWiki'],
['http://wiki.openwrt.org', 'DokuWiki'],
['http://skilledtests.com/wiki/', 'MediaWiki'],
#['http://moinmo.in', 'MoinMoin'],
['https://wiki.debian.org', 'MoinMoin'],
['http://twiki.org/cgi-bin/view/', 'TWiki'],
['http://nuclearinfo.net/Nuclearpower/CurrentReactors', 'TWiki'],
['http://www.pmwiki.org/', 'PmWiki'],
['http://www.apfelwiki.de/', 'PmWiki'],
['http://wiki.wlug.org.nz/', 'PhpWiki'],
# ['http://wiki.greenmuseum.org/', 'PhpWiki'],
['http://www.cmswiki.com/tiki-index.php', 'TikiWiki'],
['http://www.wasteflake.com/', 'TikiWiki'],
['http://foswiki.org/', 'FosWiki'],
['http://www.w3c.br/Home/WebHome', 'FosWiki'],
['http://mojomojo.org/', 'MojoMojo'],
['http://wiki.catalystframework.org/wiki/', 'MojoMojo'],
['https://www.ictu.nl/archief/wiki.noiv.nl/xwiki/bin/view/Main', 'XWiki'],
#['https://web.archive.org/web/20080517021020id_/http://berlin.xwiki.com/xwiki/bin/view/Main/WebHome', 'XWiki'],
['http://www.xwiki.org/xwiki/bin/view/Main/WebHome', 'XWiki'],
['https://confluence.atlassian.com/', 'Confluence'],
#['https://wiki.hybris.com/dashboard.action', 'Confluence'],
['https://confluence.sakaiproject.org/', 'Confluence'],
#['http://demo.bananadance.org/', 'Banana Dance'],
['http://wagn.org/', 'Wagn'],
['http://wiki.ace-mod.net/', 'Wagn'],
['https://success.mindtouch.com/', 'MindTouch'],
['https://jspwiki.apache.org/', 'JSPWiki'],
['http://www.ihear.com/FreeCLAS/', 'JSPWiki'],
['http://www.wikkawiki.org/HomePage', 'WikkaWiki'],
['http://puppylinux.org/wikka/', 'WikkaWiki'],
['http://cs.netsville.com/wiki/wikka.php', 'WikkaWiki'],
#['http://web.archive.org/web/20060717202033id_/http://www.comawiki.org/CoMa.php?CoMa=startseite', 'CoMaWiki'],
['http://bootbook.de/CoMa.php', 'CoMaWiki'],
#['http://wikini.net/wakka.php', 'WikiNi'],
['http://wiki.raydium.org/wiki/', 'WikiNi'],
['http://wiki.cs.cityu.edu.hk/CitiWiki/SourceCode', 'CitiWiki'],
['http://wackowiki.sourceforge.net/test/', 'WackoWiki'],
['http://www.sw4me.com/wiki/', 'WackoWiki'],
['http://lslwiki.net/lslwiki/wakka.php', 'WakkaWiki'],
['http://kw.pm.org/wiki/index.cgi', 'Kwiki'],
['http://wiki.wubi.org/index.cgi', 'Kwiki'],
#['http://perl.bristolbath.org/index.cgi', 'Kwiki'],
['http://www.anwiki.com/', 'Anwiki'],
['http://www.anw.fr/', 'Anwiki'],
['http://www.aneuch.org/', 'Aneuch'],
['http://doc.myunixhost.com/', 'Aneuch'],
['http://www.bitweaver.org/wiki/index.php', 'bitweaver'],
['http://wiki.e-shell.org/Home', 'Zwiki'],
['http://leo.zwiki.org/', 'Zwiki'],
['http://accessibility4all.wikispaces.com/', 'Wikispaces'],
['http://darksouls.wikidot.com/', 'Wikidot'],
['http://www.wikifoundrycentral.com/', 'Wetpaint'],
['http://wiki.openid.net/', 'PBworks'],
]
for wiki, engine in tests:
print 'Testing', wiki
guess_engine = getWikiEngine(wiki)
print 'Got: %s, expected: %s' % (guess_engine, engine)
self.assertEqual(guess_engine, engine)
def test_mwGetAPIAndIndex(self):
print '\n', '#'*73, '\n', 'test_mwGetAPIAndIndex', '\n', '#'*73
tests = [
# Alone wikis
['http://archiveteam.org', 'http://archiveteam.org/api.php', 'http://archiveteam.org/index.php'],
['http://skilledtests.com/wiki/', 'http://skilledtests.com/wiki/api.php', 'http://skilledtests.com/wiki/index.php'],
# Editthis wikifarm
# It has a page view limit
# Gamepedia wikifarm
['http://dawngate.gamepedia.com', 'http://dawngate.gamepedia.com/api.php', 'http://dawngate.gamepedia.com/index.php'],
# Neoseeker wikifarm
#['http://digimon.neoseeker.com', 'http://digimon.neoseeker.com/w/api.php', 'http://digimon.neoseeker.com/w/index.php'],
# Orain wikifarm
#['http://mc.orain.org', 'http://mc.orain.org/w/api.php', 'http://mc.orain.org/w/index.php'],
# Referata wikifarm
# ['http://wikipapers.referata.com', 'http://wikipapers.referata.com/w/api.php', 'http://wikipapers.referata.com/w/index.php'],
# ShoutWiki wikifarm
['http://commandos.shoutwiki.com', 'http://commandos.shoutwiki.com/w/api.php', 'http://commandos.shoutwiki.com/w/index.php'],
# Wiki-site wikifarm
#['http://minlingo.wiki-site.com', 'http://minlingo.wiki-site.com/api.php', 'http://minlingo.wiki-site.com/index.php'],
# Wikkii wikifarm
# It seems offline
]
for wiki, api, index in tests:
print 'Testing', wiki
api2, index2 = mwGetAPIAndIndex(wiki)
self.assertEqual(api, api2)
self.assertEqual(index, index2)
if __name__ == '__main__':
#copying dumpgenerator.py to this directory
#shutil.copy2('../dumpgenerator.py', './dumpgenerator.py')
unittest.main() | print 'Testing', index
print 'Trying to parse', pagetocheck, 'with index'
config_index = {'index': index, 'api': '', 'delay': 0, 'namespaces': ['all'], 'exnamespaces': [], 'date': datetime.datetime.now().strftime('%Y%m%d'), 'path': '.'} | random_line_split |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number | pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn name<'a>() -> Cow<'a, str> { "account".into() }
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218 | pub droplet_limit: f64,
pub email: String, | random_line_split |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number
pub droplet_limit: f64,
pub email: String,
pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn name<'a>() -> Cow<'a, str> |
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218
| { "account".into() } | identifier_body |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number
pub droplet_limit: f64,
pub email: String,
pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn | <'a>() -> Cow<'a, str> { "account".into() }
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218
| name | identifier_name |
projecttags.py | from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
| return {'submission': submission} | identifier_body |
|
projecttags.py | from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html') | return {'submission': submission} | def grading(submission): | random_line_split |
projecttags.py | from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def | (assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
return {'submission': submission}
| deadline_timeout | identifier_name |
projecttags.py | from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
|
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
return {'submission': submission}
| if subm.grading.means_passed:
return green_label
else:
return red_label | conditional_block |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn new(script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) |
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
}
| {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if !status.success() {
println!(
"ERROR: {} {} failed with error code {}",
self.script_path,
state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
} | identifier_body |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn | (script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if !status.success() {
println!(
"ERROR: {} {} failed with error code {}",
self.script_path,
state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
}
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
}
| new | identifier_name |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn new(script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if !status.success() {
println!( | state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
}
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
} | "ERROR: {} {} failed with error code {}",
self.script_path, | random_line_split |
main.py | from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None | epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap | for epgenclass in site_epub_classes: | random_line_split |
main.py | from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def | (path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
| static_img_proxy_view | identifier_name |
main.py | from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
|
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
| data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400) | conditional_block |
main.py | from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
|
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
| reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key') | identifier_body |
targetCenterList.py | import numpy as np
import dnfpy.core.utils as utils
from dnfpyUtils.stats.trajectory import Trajectory
class TargetCenterList(Trajectory):
| """
Input:
inputMap (constructor)
targetList (children)
Output:
the center of the followed track from target lsit
"""
def __init__(self,name,inputMap,inputSize,dim=1,dt=0.1,wrap=True,**kwargs):
super().__init__(name=name,dim=dim,dt=dt,wrap=wrap,inputSize=inputSize,**kwargs)
self.inputMap = inputMap
def reset(self):
super().reset()
dim = self.getArg('dim')
self._data = [(np.nan,)*dim]
def _compute(self,targetList,inputSize):
li = []
for i in range(len(targetList)):
target = self.inputMap.getTracks()[targetList[i]]
li.append(np.array(target.getCenter())/inputSize)
self._data = li
self.trace.append(np.copy(self._data))
def getViewSpace(self):
dim = self.getArg('dim')
return (1,)*dim | identifier_body |
|
targetCenterList.py | import numpy as np
import dnfpy.core.utils as utils
from dnfpyUtils.stats.trajectory import Trajectory
class | (Trajectory):
"""
Input:
inputMap (constructor)
targetList (children)
Output:
the center of the followed track from target lsit
"""
def __init__(self,name,inputMap,inputSize,dim=1,dt=0.1,wrap=True,**kwargs):
super().__init__(name=name,dim=dim,dt=dt,wrap=wrap,inputSize=inputSize,**kwargs)
self.inputMap = inputMap
def reset(self):
super().reset()
dim = self.getArg('dim')
self._data = [(np.nan,)*dim]
def _compute(self,targetList,inputSize):
li = []
for i in range(len(targetList)):
target = self.inputMap.getTracks()[targetList[i]]
li.append(np.array(target.getCenter())/inputSize)
self._data = li
self.trace.append(np.copy(self._data))
def getViewSpace(self):
dim = self.getArg('dim')
return (1,)*dim
| TargetCenterList | identifier_name |
targetCenterList.py | import numpy as np
import dnfpy.core.utils as utils
from dnfpyUtils.stats.trajectory import Trajectory
class TargetCenterList(Trajectory):
"""
Input:
inputMap (constructor)
targetList (children)
Output:
the center of the followed track from target lsit
"""
def __init__(self,name,inputMap,inputSize,dim=1,dt=0.1,wrap=True,**kwargs):
super().__init__(name=name,dim=dim,dt=dt,wrap=wrap,inputSize=inputSize,**kwargs)
self.inputMap = inputMap
def reset(self):
super().reset()
dim = self.getArg('dim')
self._data = [(np.nan,)*dim]
def _compute(self,targetList,inputSize):
li = []
for i in range(len(targetList)):
target = self.inputMap.getTracks()[targetList[i]] | def getViewSpace(self):
dim = self.getArg('dim')
return (1,)*dim | li.append(np.array(target.getCenter())/inputSize)
self._data = li
self.trace.append(np.copy(self._data))
| random_line_split |
targetCenterList.py | import numpy as np
import dnfpy.core.utils as utils
from dnfpyUtils.stats.trajectory import Trajectory
class TargetCenterList(Trajectory):
"""
Input:
inputMap (constructor)
targetList (children)
Output:
the center of the followed track from target lsit
"""
def __init__(self,name,inputMap,inputSize,dim=1,dt=0.1,wrap=True,**kwargs):
super().__init__(name=name,dim=dim,dt=dt,wrap=wrap,inputSize=inputSize,**kwargs)
self.inputMap = inputMap
def reset(self):
super().reset()
dim = self.getArg('dim')
self._data = [(np.nan,)*dim]
def _compute(self,targetList,inputSize):
li = []
for i in range(len(targetList)):
|
self._data = li
self.trace.append(np.copy(self._data))
def getViewSpace(self):
dim = self.getArg('dim')
return (1,)*dim
| target = self.inputMap.getTracks()[targetList[i]]
li.append(np.array(target.getCenter())/inputSize) | conditional_block |
2PopDNAnorec_1_711.js | USETEXTLINKS = 1
STARTALLOPEN = 0
WRAPTEXT = 1
PRESERVESTATE = 0 | foldersTree = gFld("<i>ARLEQUIN RESULTS (2PopDNAnorec_1_711.arp)</i>", "")
insDoc(foldersTree, gLnk("R", "Arlequin log file", "Arlequin_log.txt"))
aux1 = insFld(foldersTree, gFld("Run of 31/07/18 at 17:02:27", "2PopDNAnorec_1_711.xml#31_07_18at17_02_27"))
insDoc(aux1, gLnk("R", "Settings", "2PopDNAnorec_1_711.xml#31_07_18at17_02_27_run_information"))
aux2 = insFld(aux1, gFld("Genetic structure (samp=pop)", "2PopDNAnorec_1_711.xml#31_07_18at17_02_27_pop_gen_struct"))
insDoc(aux2, gLnk("R", "AMOVA", "2PopDNAnorec_1_711.xml#31_07_18at17_02_27_pop_amova"))
insDoc(aux2, gLnk("R", "Pairwise distances", "2PopDNAnorec_1_711.xml#31_07_18at17_02_27_pop_pairw_diff")) | HIGHLIGHT = 1
ICONPATH = 'file:////Users/eric/github/popgenDB/sims_for_structure_paper/2PopDNAnorec_0.5_1000/' //change if the gif's folder is a subfolder, for example: 'images/'
| random_line_split |
15.4.4.21-9-c-i-26.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-i-26.js
* @description Array.prototype.reduce - This object is the Arguments object which implements its own property get method (number of arguments equals number of parameters)
*/
function testcase() {
var testResult = false;
var initialValue = 0;
function callbackfn(prevVal, curVal, idx, obj) |
var func = function (a, b, c) {
Array.prototype.reduce.call(arguments, callbackfn, initialValue);
};
func(0, 1, 2);
return testResult;
}
runTestCase(testcase);
| {
if (idx === 2) {
testResult = (curVal === 2);
}
} | identifier_body |
15.4.4.21-9-c-i-26.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-i-26.js
* @description Array.prototype.reduce - This object is the Arguments object which implements its own property get method (number of arguments equals number of parameters)
*/
function | () {
var testResult = false;
var initialValue = 0;
function callbackfn(prevVal, curVal, idx, obj) {
if (idx === 2) {
testResult = (curVal === 2);
}
}
var func = function (a, b, c) {
Array.prototype.reduce.call(arguments, callbackfn, initialValue);
};
func(0, 1, 2);
return testResult;
}
runTestCase(testcase);
| testcase | identifier_name |
15.4.4.21-9-c-i-26.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-i-26.js
* @description Array.prototype.reduce - This object is the Arguments object which implements its own property get method (number of arguments equals number of parameters)
*/
function testcase() {
var testResult = false;
var initialValue = 0;
function callbackfn(prevVal, curVal, idx, obj) {
if (idx === 2) |
}
var func = function (a, b, c) {
Array.prototype.reduce.call(arguments, callbackfn, initialValue);
};
func(0, 1, 2);
return testResult;
}
runTestCase(testcase);
| {
testResult = (curVal === 2);
} | conditional_block |
15.4.4.21-9-c-i-26.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-i-26.js
* @description Array.prototype.reduce - This object is the Arguments object which implements its own property get method (number of arguments equals number of parameters)
*/
function testcase() {
var testResult = false;
var initialValue = 0;
function callbackfn(prevVal, curVal, idx, obj) {
if (idx === 2) {
testResult = (curVal === 2);
}
}
var func = function (a, b, c) { | func(0, 1, 2);
return testResult;
}
runTestCase(testcase); | Array.prototype.reduce.call(arguments, callbackfn, initialValue);
};
| random_line_split |
test-amp-vk.js | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const POST_PARAMS = {
'embedtype': 'post',
'hash': 'Yc8_Z9pnpg8aKMZbVcD-jK45eAk',
'owner-id': '1',
'post-id': '45616',
};
const POLL_PARAMS = {
'embedtype': 'poll',
'api-id': '6183531',
'poll-id': '274086843_1a2a465f60fff4699f',
};
import '../amp-vk';
import {Layout} from '../../../../src/layout';
import {Resource} from '../../../../src/service/resource';
describes.realWin('amp-vk', {
amp: {
extensions: ['amp-vk'],
},
}, env => {
let win, doc;
beforeEach(() => {
win = env.win;
doc = win.document;
});
function createAmpVkElement(dataParams, layout) {
const element = doc.createElement('amp-vk');
for (const param in dataParams) {
element.setAttribute(`data-${param}`, dataParams[param]);
}
element.setAttribute('width', 500);
element.setAttribute('height', 300);
|
return element.build().then(() => {
const resource = Resource.forElement(element);
resource.measure();
return element.layoutCallback();
}).then(() => element);
}
it('requires data-embedtype', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['embedtype'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-embedtype attribute is required for/);
});
it('removes iframe after unlayoutCallback', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
const obj = vkPost.implementation_;
obj.unlayoutCallback();
expect(vkPost.querySelector('iframe')).to.be.null;
expect(obj.iframe_).to.be.null;
expect(obj.unlayoutOnPause()).to.be.true;
});
});
// Post tests
it('post::requires data-hash', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['hash'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-hash attribute is required for/);
});
it('post::requires data-owner-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['owner-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-owner-id attribute is required for/);
});
it('post::requires data-post-id', () => {
const params = Object.assign({}, POST_PARAMS);
delete params['post-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-post-id attribute is required for/);
});
it('post::renders iframe in amp-vk', () => {
return createAmpVkElement(POST_PARAMS).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('post::renders responsively', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const iframe = vkPost.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('post::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POST_PARAMS, Layout.RESPONSIVE).then(vkPost => {
const impl = vkPost.implementation_;
const iframe = vkPost.querySelector('iframe');
const referrer = encodeURIComponent(vkPost.ownerDocument.referrer);
const url = encodeURIComponent(
vkPost.ownerDocument.location.href.replace(/#.*$/, '')
);
impl.onLayoutMeasure();
const startWidth = impl.getLayoutWidth();
const correctIFrameSrc = `https://vk.com/widget_post.php?app=0&width=100%25\
&_ver=1&owner_id=1&post_id=45616&hash=Yc8_Z9pnpg8aKMZbVcD-jK45eAk&=1\
&startWidth=${startWidth}&url=${url}&referrer=${referrer}&title=AMP%20Post`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
// Poll tests
it('poll::requires data-api-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['api-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-api-id attribute is required for/);
});
it('poll::requires data-poll-id', () => {
const params = Object.assign({}, POLL_PARAMS);
delete params['poll-id'];
return createAmpVkElement(params).should.eventually.be.rejectedWith(
/The data-poll-id attribute is required for/);
});
it('poll::renders iframe in amp-vk', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
});
});
it('poll::renders responsively', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('poll::sets correct src url to the vk iFrame', () => {
return createAmpVkElement(POLL_PARAMS, Layout.RESPONSIVE).then(vkPoll => {
const iframe = vkPoll.querySelector('iframe');
const referrer = encodeURIComponent(vkPoll.ownerDocument.referrer);
const url = encodeURIComponent(
vkPoll.ownerDocument.location.href.replace(/#.*$/, '')
);
const correctIFrameSrc = `https://vk.com/al_widget_poll.php?\
app=6183531&width=100%25&_ver=1&poll_id=274086843_1a2a465f60fff4699f&=1\
&url=${url}&title=AMP%20Poll&description=&referrer=${referrer}`;
expect(iframe).to.not.be.null;
const timeArgPosition = iframe.src.lastIndexOf('&');
const iframeSrcWithoutTime = iframe.src.substr(0, timeArgPosition);
expect(iframeSrcWithoutTime).to.equal(correctIFrameSrc);
});
});
it('both::resizes amp-vk element in response to postmessages', () => {
return createAmpVkElement(POLL_PARAMS).then(vkPoll => {
const impl = vkPoll.implementation_;
const iframe = vkPoll.querySelector('iframe');
const changeHeight = sandbox.spy(impl, 'changeHeight');
const fakeHeight = 555;
expect(iframe).to.not.be.null;
generatePostMessage(vkPoll, iframe, fakeHeight);
expect(changeHeight).to.be.calledOnce;
expect(changeHeight.firstCall.args[0]).to.equal(fakeHeight);
});
});
function generatePostMessage(ins, iframe, height) {
ins.implementation_.handleVkIframeMessage_({
origin: 'https://vk.com',
source: iframe.contentWindow,
data: JSON.stringify([
'resize',
[height],
]),
});
}
}); | if (layout) {
element.setAttribute('layout', layout);
}
doc.body.appendChild(element); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.