repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
SergiosKar/Deep-Learning-models | Double_Deep_Q_Netowrk.py | 1 | 4525 | import random
import gym
import numpy as np
from collections import deque
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras import backend as K
EPISODES = 5000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.99
self.learning_rate = 0.001
# model network-->>action=NN.predict(state)
self.model = self._build_model()
# target network -->>target=NN.predict(state)
self.target_model = self._build_model()
self.update_target_model()
def _huber_loss(self, target, prediction):
# sqrt(1+error^2)-1
error = prediction - target
return K.mean(K.sqrt(1+K.square(error))-1, axis=-1)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss=self._huber_loss,
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
# select random action with prob=epsilon else action=maxQ
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
#sample random transitions
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
#calculate target for each minibatch
target = self.model.predict(state)
if done:
target[0][action] = reward
else:
#action from model network
a = self.model.predict(next_state)[0]
#target from target network
t = self.target_model.predict(next_state)[0]
target[0][action] = reward + self.gamma * t[np.argmax(a)]#belmann
#train model network
self.model.fit(state, target, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
# agent.load("./save/cartpole-ddqn.h5")
done = False
batch_size = 32
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
# env.render()
#e-greedy action
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
# add to experience memory
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
#update target model if goal is found
agent.update_target_model()
print("episode: {}/{}, score: {}, e: {:.2}"
.format(e, EPISODES, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
# if e % 10 == 0:
# agent.save("./save/cartpole-ddqn.h5") | mit |
MarcosCommunity/odoo | addons/note/tests/__init__.py | 260 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mattesno1/CouchPotatoServer | libs/caper/helpers.py | 81 | 2210 | # Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def is_list_type(obj, element_type):
if not type(obj) is list:
return False
if len(obj) < 1:
raise ValueError("Unable to determine list element type from empty list")
return type(obj[0]) is element_type
def clean_dict(target, remove=None):
"""Recursively remove items matching a value 'remove' from the dictionary
:type target: dict
"""
if type(target) is not dict:
raise ValueError("Target is required to be a dict")
remove_keys = []
for key in target.keys():
if type(target[key]) is not dict:
if target[key] == remove:
remove_keys.append(key)
else:
clean_dict(target[key], remove)
for key in remove_keys:
target.pop(key)
return target
def update_dict(a, b):
for key, value in b.items():
if key not in a:
a[key] = value
elif isinstance(a[key], dict) and isinstance(value, dict):
update_dict(a[key], value)
elif isinstance(a[key], list):
a[key].append(value)
else:
a[key] = [a[key], value]
def xrange_six(start, stop=None, step=None):
if stop is not None and step is not None:
if PY3:
return range(start, stop, step)
else:
return xrange(start, stop, step)
else:
if PY3:
return range(start)
else:
return xrange(start)
def delta_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
| gpl-3.0 |
nmayorov/scipy | scipy/optimize/_linprog.py | 3 | 23457 | """
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
import numpy as np
from .optimize import OptimizeResult, OptimizeWarning
from warnings import warn
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_rs import _linprog_rs
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
_postsolve, _check_result, _display_summary)
from copy import deepcopy
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
options=None, x0=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Informally, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : {'interior-point', 'revised simplex', 'simplex'}, optional
The algorithm used to solve the standard form problem.
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are supported.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1-D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
Default: see method-specific documentation.
disp : bool
Set to ``True`` to print convergence messages.
Default: ``False``.
autoscale : bool
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
Default: ``False``.
presolve : bool
Set to ``False`` to disable automatic presolve.
Default: ``True``.
rr : bool
Set to ``False`` to disable automatic redundancy removal.
Default: ``True``.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
:ref:`'interior-point' <optimize.linprog-interior-point>` is the default
as it is typically the fastest and most robust method.
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more
accurate for the problems it solves.
:ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is
included for backwards compatibility and educational purposes.
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying any method, a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g., a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
Optionally, the problem is automatically scaled via equilibration [12]_.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts the result to a solution to the original
problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
.. [12] Tomlin, J. A. "On scaling linear programming problems."
Mathematical Programming Study 4 (1975): 146-166.
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
Note that the default method for `linprog` is 'interior-point', which is
approximate by nature.
>>> print(res)
con: array([], dtype=float64)
fun: -21.99999984082494 # may vary
message: 'Optimization terminated successfully.'
nit: 6 # may vary
slack: array([3.89999997e+01, 8.46872439e-08] # may vary
status: 0
success: True
x: array([ 9.99999989, -2.99999999]) # may vary
If you need greater accuracy, try 'revised simplex'.
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex')
>>> print(res)
con: array([], dtype=float64)
fun: -22.0 # may vary
message: 'Optimization terminated successfully.'
nit: 1 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.]) # may vary
"""
meth = method.lower()
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0)
lp, solver_options = _parse_linprog(lp, options)
tol = solver_options.get('tol', 1e-9)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
lp_o = deepcopy(lp)
# Solve trivial problem, eliminate variables, tighten bounds, etc.
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, tol)
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
if not complete:
A, b, c, c0, x0 = _get_Abc(lp, c0)
if solver_options.pop('autoscale', False):
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
postsolve_args = postsolve_args[:-2] + (C, b_scale)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
postsolve_args=postsolve_args, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc.
disp = solver_options.get('disp', False)
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
if disp:
_display_summary(message, status, fun, iteration)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| bsd-3-clause |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_textbox02.py | 8 | 1108 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'textbox02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text')
worksheet.insert_textbox('H18', 'Some more text')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
cbporch/perceptron | adaline.py | 1 | 1193 | import numpy as np
from perceptron import Perceptron
class Adaline(Perceptron):
"""
Implementation of an Adaptive Linear Neuron, that can be abstracted to
various input sizes or dimensions. Displays using pyplot.
"""
ETA = 1
def __init__(self, grph, eta, max_t):
Perceptron.__init__(self, grph)
self.ETA = eta
self.max_t = max_t
def update(self, y_t, x):
r = []
s_t = np.sign(np.inner(self.grph.w, x))
for i in range(self.DIM):
r.append(self.grph.w[i] + (self.ETA * (y_t - s_t) * x[i]))
return r
def fit(self):
t = 0
c = True
while c:
n = self.random_check()
if n == -1 or t == self.max_t:
c = False
else:
self.grph.w = self.update(self.grph.y[n], self.grph.training_matrix[n])
t += 1
print("t: {0}, w: {1}".format(t, self.grph.w))
if self.grph.PLOT:
self.grph.plot_g() # In calling g() the 0th value is 1, corresponding to w_0
self.grph.show_plot()
# and the last value is not used in calculation, so is set as 0
return t
| mit |
Jortolsa/l10n-spain | l10n_es_aeat_mod340/models/account_invoice.py | 9 | 1455 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2012 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountInvoice(orm.Model):
_inherit = 'account.invoice'
_columns = {
'is_ticket_summary': fields.boolean(
'Ticket Summary',
help='Check if this invoice is a ticket summary'),
'number_tickets': fields.integer('Number of tickets', digits=(12, 0)),
'first_ticket': fields.char('First ticket', size=40),
'last_ticket': fields.char('Last ticket', size=40)
}
| agpl-3.0 |
bdang2012/taiga-back-casting | taiga/projects/milestones/services.py | 1 | 1452 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils import timezone
from . import models
def calculate_milestone_is_closed(milestone):
return (milestone.user_stories.all().count() > 0 and
all([task.status.is_closed for task in milestone.tasks.all()]) and
all([user_story.is_closed for user_story in milestone.user_stories.all()]))
def close_milestone(milestone):
if not milestone.closed:
milestone.closed = True
milestone.save(update_fields=["closed",])
def open_milestone(milestone):
if milestone.closed:
milestone.closed = False
milestone.save(update_fields=["closed",])
| agpl-3.0 |
FoxerLee/iOS_sitp | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/mac/gyptest-xctest.py | 221 | 1196 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
| mit |
guillermo-carrasco/bcbio-nextgen | bcbio/structural/__init__.py | 1 | 5772 | """Detect structural variation in genomes using high-throughput sequencing data.
"""
import collections
import copy
import operator
import toolz as tz
from bcbio.pipeline import datadict as dd
from bcbio.structural import (battenberg, cn_mops, cnvkit, delly,
lumpy, manta, metasv, plot, validate, wham)
from bcbio.variation import vcfutils
# Stratify callers by stage -- see `run` documentation below for definitions
_CALLERS = {
"initial": {"cnvkit": cnvkit.run,
"battenberg": battenberg.run},
"standard": {"cn.mops": cn_mops.run, "manta": manta.run,
"delly": delly.run, "lumpy": lumpy.run, "wham": wham.run},
"ensemble": {"metasv": metasv.run}}
_NEEDS_BACKGROUND = set(["cn.mops"])
def _get_svcallers(data):
svs = data["config"]["algorithm"].get("svcaller")
if svs is None:
svs = []
elif isinstance(svs, basestring):
svs = [svs]
return svs
def _handle_multiple_svcallers(data, stage):
"""Retrieve configured structural variation caller, handling multiple.
"""
svs = _get_svcallers(data)
out = []
for svcaller in svs:
if svcaller in _CALLERS[stage]:
base = copy.deepcopy(data)
base["config"]["algorithm"]["svcaller_active"] = svcaller
out.append(base)
return out
def finalize_sv(samples, config):
"""Combine results from multiple sv callers into a single ordered 'sv' key.
Handles ensemble calling and plotting of results.
"""
by_bam = collections.OrderedDict()
for x in samples:
try:
by_bam[x["align_bam"]].append(x)
except KeyError:
by_bam[x["align_bam"]] = [x]
by_batch = collections.OrderedDict()
lead_batches = {}
for grouped_calls in by_bam.values():
def orig_svcaller_order(x):
return _get_svcallers(x).index(x["config"]["algorithm"]["svcaller_active"])
sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x],
key=orig_svcaller_order)
final = grouped_calls[0]
if len(sorted_svcalls) > 0:
final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls])
del final["config"]["algorithm"]["svcaller_active"]
batch = dd.get_batch(final) or dd.get_sample_name(final)
batches = batch if isinstance(batch, (list, tuple)) else [batch]
lead_batches[dd.get_sample_name(final)] = batches[0]
for batch in batches:
try:
by_batch[batch].append(final)
except KeyError:
by_batch[batch] = [final]
out = []
for batch, items in by_batch.items():
if any("svplots" in dd.get_tools_on(d) for d in items):
plot_items = plot.by_regions(items)
else:
plot_items = items
for data in plot_items:
if lead_batches[dd.get_sample_name(data)] == batch:
out.append([data])
return out
def validate_sv(data):
"""Validate structural variant calls for a sample.
"""
return [[validate.evaluate(data)]]
def run(samples, run_parallel, stage):
"""Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, run prior to other callers and variant calling
- standard, regular batch calling
- ensemble, post-calling, combine other callers-
"""
to_process = collections.OrderedDict()
extras = []
background = []
for data in (xs[0] for xs in samples):
ready_data = _handle_multiple_svcallers(data, stage)
if len(ready_data) > 0:
background.append(data)
for x in ready_data:
svcaller = x["config"]["algorithm"].get("svcaller_active")
if stage == "ensemble": # no batching for ensemble methods
batch = dd.get_sample_name(x)
else:
batch = dd.get_batch(x) or dd.get_sample_name(x)
batches = batch if isinstance(batch, (list, tuple)) else [batch]
for b in batches:
try:
to_process[(svcaller, b)].append(x)
except KeyError:
to_process[(svcaller, b)] = [x]
else:
extras.append([data])
processed = run_parallel("detect_sv", ([xs, background, xs[0]["config"], stage]
for xs in to_process.values()))
finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])])
if len(processed) > 0 else [])
return extras + finalized
def detect_sv(items, all_items, config, stage):
"""Top level parallel target for examining structural variation.
"""
svcaller = config["algorithm"].get("svcaller_active")
caller_fn = _CALLERS[stage].get(svcaller)
out = []
if svcaller and caller_fn:
if (svcaller in _NEEDS_BACKGROUND and
not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)):
names = set([tz.get_in(["rgnames", "sample"], x) for x in items])
background = [x for x in all_items if tz.get_in(["rgnames", "sample"], x) not in names]
for svdata in caller_fn(items, background):
out.append([svdata])
else:
for svdata in caller_fn(items):
out.append([svdata])
else:
for data in items:
out.append([data])
return out
# ## configuration
def parallel_multiplier(items):
"""Use more resources (up to available limits) if we have multiple SV callers.
"""
return max([1] + [len(_get_svcallers(xs[0])) for xs in items])
| mit |
openmv/openmv | scripts/examples/OpenMV/03-Drawing/text_drawing.py | 3 | 1127 | # Text Drawing
#
# This example shows off drawing text on the OpenMV Cam.
import sensor, image, time, pyb
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
for i in range(10):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % 127) + 128
g = (pyb.rng() % 127) + 128
b = (pyb.rng() % 127) + 128
# If the first argument is a scaler then this method expects
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
# Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees.
img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False,
char_rotation = 0, char_hmirror = False, char_vflip = False,
string_rotation = 0, string_hmirror = False, string_vflip = False)
print(clock.fps())
| mit |
etingof/pyasn1 | tests/type/test_constraint.py | 2 | 11339 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2020, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from tests.base import BaseTestCase
from pyasn1.type import constraint
from pyasn1.type import error
class SingleValueConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.v1 = 1, 2
self.v2 = 3, 4
self.c1 = constraint.SingleValueConstraint(*self.v1)
self.c2 = constraint.SingleValueConstraint(*self.v2)
def testCmp(self):
assert self.c1 == self.c1, 'comparison fails'
def testHash(self):
assert hash(self.c1) != hash(self.c2), 'hash() fails'
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testContains(self):
for v in self.v1:
assert v in self.c1
assert v not in self.c2
for v in self.v2:
assert v in self.c2
assert v not in self.c1
def testIter(self):
assert set(self.v1) == set(self.c1)
assert set(self.v2) == set(self.c2)
def testSub(self):
subconst = self.c1 - constraint.SingleValueConstraint(self.v1[0])
assert list(subconst) == [self.v1[1]]
def testAdd(self):
superconst = self.c1 + self.c2
assert set(superconst) == set(self.v1 + self.v2)
class ContainedSubtypeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ContainedSubtypeConstraint(
constraint.SingleValueConstraint(12)
)
def testGoodVal(self):
try:
self.c1(12)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueRangeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ValueRangeConstraint(1, 4)
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueSizeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ValueSizeConstraint(1, 2)
def testGoodVal(self):
try:
self.c1('a')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('abc')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
def setUp(self):
self.v1 = 'A', 'B'
self.v2 = 'C', 'D'
self.c1 = constraint.PermittedAlphabetConstraint(*self.v1)
self.c2 = constraint.PermittedAlphabetConstraint(*self.v2)
def testGoodVal(self):
try:
self.c1('A')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('E')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class WithComponentsConstraintTestCase(BaseTestCase):
def testGoodVal(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint()),
('B', constraint.ComponentAbsentConstraint()))
try:
c({'A': 1})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testGoodValWithExtraFields(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint()),
('B', constraint.ComponentAbsentConstraint())
)
try:
c({'A': 1, 'C': 2})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testEmptyConstraint(self):
c = constraint.WithComponentsConstraint()
try:
c({'A': 1})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint())
)
try:
c({'B': 2})
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testBadValExtraFields(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint())
)
try:
c({'B': 2, 'C': 3})
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsIntersectionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
)
def testCmp1(self):
assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
def testCmp2(self):
assert constraint.SingleValueConstraint(5) not in self.c1, \
'__cmp__() fails'
def testCmp3(self):
c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4))
)
assert self.c1 in c, '__cmp__() fails'
def testCmp4(self):
c = constraint.ConstraintsUnion(
constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
)
assert self.c1 not in c, '__cmp__() fails'
def testGoodVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class InnerTypeConstraintTestCase(BaseTestCase):
def testConst1(self):
c = constraint.InnerTypeConstraint(
constraint.SingleValueConstraint(4)
)
try:
c(4, 32)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(5, 32)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testConst2(self):
c = constraint.InnerTypeConstraint(
(0, constraint.SingleValueConstraint(4), 'PRESENT'),
(1, constraint.SingleValueConstraint(4), 'ABSENT')
)
try:
c(4, 0)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(4, 1)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
try:
c(3, 0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints compositions
class ConstraintsIntersectionRangeTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 9),
constraint.ValueRangeConstraint(2, 5)
)
def testGoodVal(self):
try:
self.c1(3)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsUnionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsUnion(
constraint.SingleValueConstraint(5),
constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
try:
self.c1(2)
self.c1(5)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsExclusionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsExclusion(
constraint.ValueRangeConstraint(2, 4)
)
def testGoodVal(self):
try:
self.c1(6)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints derivations
class DirectDerivationTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.SingleValueConstraint(5)
self.c2 = constraint.ConstraintsUnion(
self.c1, constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
class IndirectDerivationTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 30)
)
self.c2 = constraint.ConstraintsIntersection(
self.c1, constraint.ValueRangeConstraint(1, 20)
)
self.c2 = constraint.ConstraintsIntersection(
self.c2, constraint.ValueRangeConstraint(1, 10)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
# TODO: how to apply size constraints to constructed types?
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause |
maartenq/ansible | lib/ansible/modules/packaging/os/package.py | 18 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
- For Windows targets, use the M(win_package) module instead.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package. Other states depend on the underlying package module, i.e C(latest).
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
- For Windows targets, use the M(win_package) module instead.
'''
EXAMPLES = '''
- name: install ntpdate
package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
| gpl-3.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/heapq.py | 208 | 17997 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| lgpl-3.0 |
jagguli/intellij-community | python/lib/Lib/site-packages/django/middleware/gzip.py | 321 | 1455 | import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth compressing non-OK or really short responses.
if response.status_code != 200 or len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped respones of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
response.content = compress_string(response.content)
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
| apache-2.0 |
ghickman/django | tests/template_tests/filter_tests/test_wordwrap.py | 21 | 2026 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({'wordwrap01':
'{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
def test_wrap_lazy_string(self):
self.assertEqual(
wordwrap(lazystr(
'this is a long paragraph of text that really needs to be wrapped I\'m afraid'
), 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
| bsd-3-clause |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/modules/pyaes/util.py | 124 | 2032 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Why to_bufferable?
# Python 3 is very different from Python 2.x when it comes to strings of text
# and strings of bytes; in Python 3, strings of bytes do not exist, instead to
# represent arbitrary binary data, we must use the "bytes" object. This method
# ensures the object behaves as we need it to.
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
try:
xrange
except:
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
def append_PKCS7_padding(data):
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
def strip_PKCS7_padding(data):
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
if not pad or pad > 16:
return data
return data[:-pad]
| gpl-2.0 |
UOMx/edx-platform | lms/djangoapps/mobile_api/video_outlines/views.py | 121 | 4888 | """
Video Outlines
We only provide the listing view for a video outline, and video outlines are
only displayed at the course level. This is because it makes it a lot easier to
optimize and reason about, and it avoids having to tackle the bigger problem of
general XBlock representation in this rather specialized formatting.
"""
from functools import partial
from django.http import Http404, HttpResponse
from mobile_api.models import MobileApiConfig
from rest_framework import generics
from rest_framework.response import Response
from opaque_keys.edx.locator import BlockUsageLocator
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from ..utils import mobile_view, mobile_course_access
from .serializers import BlockOutline, video_summary
@mobile_view()
class VideoSummaryList(generics.ListAPIView):
"""
**Use Case**
Get a list of all videos in the specified course. You can use the
video_url value to access the video file.
**Example Request**
GET /api/mobile/v0.5/video_outlines/courses/{organization}/{course_number}/{course_run}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an array of videos in the course. The array
includes the following information for each video.
* named_path: An array that consists of the display names of the
courseware objects in the path to the video.
* path: An array that specifies the complete path to the video in
the courseware hierarchy. The array contains the following
values.
* category: The type of division in the course outline.
Possible values are "chapter", "sequential", and "vertical".
* name: The display name for the object.
* id: The The unique identifier for the video.
* section_url: The URL to the first page of the section that
contains the video in the Learning Management System.
* summary: An array of data about the video that includes the
following values.
* category: The type of component. This value will always be "video".
* duration: The length of the video, if available.
* id: The unique identifier for the video.
* language: The language code for the video.
* name: The display name of the video.
* size: The size of the video file.
* transcripts: An array of language codes and URLs to available
video transcripts. Use the URL value to access a transcript
for the video.
* video_thumbnail_url: The URL to the thumbnail image for the
video, if available.
* video_url: The URL to the video file. Use this value to access
the video.
* unit_url: The URL to the unit that contains the video in the Learning
Management System.
"""
@mobile_course_access(depth=None)
def list(self, request, course, *args, **kwargs):
video_profiles = MobileApiConfig.get_video_profiles()
video_outline = list(
BlockOutline(
course.id,
course,
{"video": partial(video_summary, video_profiles)},
request,
video_profiles,
)
)
return Response(video_outline)
@mobile_view()
class VideoTranscripts(generics.RetrieveAPIView):
"""
**Use Case**
Get a transcript for a specified video and language.
**Example request**
GET /api/mobile/v0.5/video_outlines/transcripts/{organization}/{course_number}/{course_run}/{video ID}/{language code}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an .srt file that you can download.
"""
@mobile_course_access()
def get(self, request, course, *args, **kwargs):
block_id = kwargs['block_id']
lang = kwargs['lang']
usage_key = BlockUsageLocator(
course.id, block_type="video", block_id=block_id
)
try:
video_descriptor = modulestore().get_item(usage_key)
transcripts = video_descriptor.get_transcripts_info()
content, filename, mimetype = video_descriptor.get_transcript(transcripts, lang=lang)
except (NotFoundError, ValueError, KeyError):
raise Http404(u"Transcript not found for {}, lang: {}".format(block_id, lang))
response = HttpResponse(content, content_type=mimetype)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename.encode('utf-8'))
return response
| agpl-3.0 |
cosenal/osf.io | website/addons/badges/model/badges.py | 35 | 5589 | # -*- coding: utf-8 -*-
import calendar
from bson import ObjectId
from datetime import datetime
from modularodm import fields, Q
from framework.mongo import StoredObject
from framework.guid.model import GuidStoredObject
from website.settings import DOMAIN
from website.util import web_url_for, api_url_for
from website.addons.badges.util import acquire_badge_image
class Badge(GuidStoredObject):
_id = fields.StringField(primary=True)
creator = fields.ForeignField('badgesusersettings', backref='creator')
is_system_badge = fields.BooleanField(default=False)
#Open Badge protocol
name = fields.StringField()
description = fields.StringField()
image = fields.StringField()
criteria = fields.StringField()
#TODO implement tags and alignment
alignment = fields.DictionaryField(list=True)
tags = fields.StringField(list=True)
@classmethod
def get_system_badges(cls):
return cls.find(Q('is_system_badge', 'eq', True))
@classmethod
def create(cls, user_settings, badge_data, save=True):
badge = cls()
badge.creator = user_settings
badge.name = badge_data['badgeName']
badge.description = badge_data['description']
badge.criteria = badge_data['criteria']
badge._ensure_guid()
badge.image = acquire_badge_image(badge_data['imageurl'], badge._id)
if not badge.image:
raise IOError
if save:
badge.save()
return badge
@property
def description_short(self):
words = self.description.split(' ')
if len(words) < 9:
return ' '.join(words)
return '{}...'.format(' '.join(words[:9]))
#TODO Auto link urls?
@property
def criteria_list(self):
tpl = '<ul>{}</ul>'
stpl = '<li>{}</li>'
lines = self.criteria.split('\n')
return tpl.format(' '.join([stpl.format(line) for line in lines if line])) # Please dont kill me Steve
@property
def assertions(self):
return self.badgeassertion__assertion
@property
def awarded_count(self):
return len(self.assertions)
@property
def unique_awards_count(self):
return len({assertion.node._id for assertion in self.assertions})
@property
def deep_url(self):
return web_url_for('view_badge', bid=self._id)
@property
def url(self):
return web_url_for('view_badge', bid=self._id)
def make_system_badge(self, save=True):
self.is_system_badge = True
self.save()
def to_json(self):
return {
'id': self._id,
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'alignment': self.alignment,
'tags': self.tags,
}
def to_openbadge(self):
return {
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'issuer': api_url_for('get_organization_json', _absolute=True, uid=self.creator.owner._id),
'url': '{0}{1}/json/'.format(DOMAIN, self._id), # web url for and GUIDs?
'alignment': self.alignment,
'tags': self.tags,
}
#TODO verification hosted and signed
class BadgeAssertion(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
#Backrefs
badge = fields.ForeignField('badge', backref='assertion')
node = fields.ForeignField('node', backref='awarded')
_awarder = fields.ForeignField('badgesusersettings')
#Custom fields
revoked = fields.BooleanField(default=False)
reason = fields.StringField()
#Required
issued_on = fields.IntegerField(required=True)
#Optional
evidence = fields.StringField()
expires = fields.StringField()
@classmethod
def create(cls, badge, node, evidence=None, save=True, awarder=None):
b = cls()
b.badge = badge
b.node = node
b.evidence = evidence
b.issued_on = calendar.timegm(datetime.utctimetuple(datetime.utcnow()))
b._awarder = awarder
if save:
b.save()
return b
@property
def issued_date(self):
return datetime.fromtimestamp(self.issued_on).strftime('%Y/%m/%d')
@property
def verify(self, vtype='hosted'):
return {
'type': 'hosted',
'url': api_url_for('get_assertion_json', _absolute=True, aid=self._id)
}
@property
def recipient(self):
return {
'idenity': self.node._id,
'type': 'osfnode', # TODO Could be an email?
'hashed': False
}
@property
def awarder(self):
if self.badge.is_system_badge and self._awarder:
return self._awarder
return self.badge.creator
def to_json(self):
return {
'uid': self._id,
'recipient': self.node._id,
'badge': self.badge._id,
'verify': self.verify,
'issued_on': self.issued_date,
'evidence': self.evidence,
'expires': self.expires
}
def to_openbadge(self):
return {
'uid': self._id,
'recipient': self.recipient,
'badge': '{}{}/json/'.format(DOMAIN, self.badge._id), # GUIDs Web url for
'verify': self.verify,
'issuedOn': self.issued_on,
'evidence': self.evidence,
'expires': self.expires
}
| apache-2.0 |
CWVanderReyden/originalMyHomeNet | Lib/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
idea4bsd/idea4bsd | python/helpers/pydev/third_party/pep8/autopep8.py | 34 | 125587 | #!/usr/bin/env python
#
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import bisect
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import itertools
import keyword
import locale
import os
import re
import signal
import sys
import token
import tokenize
import pep8
def check_lib2to3():
try:
import lib2to3
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib2to3'))
import lib2to3
try:
unicode
except NameError:
unicode = str
__version__ = '1.1.2a0'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
check_lib2to3()
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if logical_line.startswith(('def ', 'class ', '@')):
if indent_level and not blank_lines and not blank_before:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {0}'.format(indent[depth]))
elif close_bracket and not hang:
pass
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {0}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket:
yield (start, 'E123 {0}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{0} {1}'.format(*error))
# Look for visual indenting.
if (
parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]
):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
yield (pos, 'E125 {0}'.format(indent_level +
2 * DEFAULT_INDENT_SIZE))
del pep8._checks['logical_line'][pep8.continued_indentation]
pep8.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pep8. The second argument, "logical", is required only for logical-line
fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pep8 error was modified. Note that the modified line
numbers that are returned are indexed at 1. This typically would correspond
with the line number reported in the pep8 error information.
[fixed method list]
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303
- e401
- e502
- e701,e702
- e711
- w291
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pep8 categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e116 = self.fix_e113
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e309 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self.fix_w293 = self.fix_w291
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(inspect.getargspec(fix).args) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {f} on line {l}'.format(
f=result['id'], l=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{0}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {0}:{1}:{2}:{3}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset] + ' ' + target[offset:]
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pep8 reports an offset line number if there
# are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
else:
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
else:
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
else:
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# find inline commnet
inline_comment = None
if '# ' == target[offset:].lstrip(';').lstrip()[:2]:
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e711(self, result):
"""Fix comparison with None."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix comparison with boolean."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
# Handle very easy "not" special cases.
if re.match(r'^\s*if \w+ == False:$', target):
self.source[line_index] = re.sub(r'if (\w+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if \w+ != True:$', target):
self.source[line_index] = re.sub(r'if (\w+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix non-membership check."""
line_index = result['line'] - 1
target = self.source[line_index]
# Handle very easy case only.
if re.match(r'^\s*if not \w+ in \w+:$', target):
self.source[line_index] = re.sub(r'if not (\w+) in (\w+):',
r'if \1 not in \2:',
target,
count=1)
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(x,
indent_word,
max_line_length,
experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=wrap_output(sys.stderr, 'utf-8'))
if candidates:
return candidates[0]
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
else:
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for index in range(len(split_a)):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
pos = next((index for index, c in enumerate(line)
if c != '#'))
if (
# Leave multiple spaces like '# ' alone.
(line[:pos].count('#') > 1 or line[1].isalnum()) and
# Leave stylistic outlined blocks alone.
not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None, filename=''):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
check_lib2to3()
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names,
filename=filename)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
fixes |= set(fix)
return fixes
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename=''):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore),
filename=filename)
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'],
ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501'
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
else:
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
return line + ' pass'
else:
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
else:
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pep8 via python method calls."""
class QuietReport(pep8.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, _):
"""Collect errors."""
code = super(QuietReport, self).error(line_number, offset, text, _)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pep8.Checker('', lines=source,
reporter=QuietReport, **pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
return line.lstrip().rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
for line_number, line in enumerate(source_lines, start=1):
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == '\n':
lines.pop()
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line_number, line in enumerate(lines[thisstmt:nextstmt],
start=thisstmt):
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment lines, as
a signal that tokenize doesn't know what to do about them; indeed, they're
our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names, filename=''):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
check_lib2to3()
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
# The name parameter is necessary particularly for the "import" fixer.
return unicode(tool.refactor_string(source_text, name=filename))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec')
except (SyntaxError, TypeError, UnicodeDecodeError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pep8.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even more
clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
import textwrap
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None, apply_config=False):
"""Return fixed source code.
"encoding" will be used to decode "source" if it is a byte string.
"""
if not options:
options = parse_args([''], apply_config=apply_config)
if not isinstance(source, unicode):
source = source.decode(encoding or get_encoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
fixed_source = apply_local_fixes(tmp_source, options)
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding,
mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = inspect.getargspec(function)[0]
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def apply_global_fixes(source, options, where='global', filename=''):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pep8).
"""
if any(code_match(code, select=options.select, ignore=options.ignore)
for code in ['E101', 'E111']):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {0} fix for {1}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore,
filename=filename)
return source
def apply_local_fixes(source, options):
"""Ananologus to apply_global_fixes, but runs only those which makes sense
for the given line_range.
Do as much as we can without breaking code.
"""
def find_ge(a, x):
"""Find leftmost item greater than or equal to x."""
i = bisect.bisect_left(a, x)
if i != len(a):
return (i, a[i])
return (len(a) - 1, a[-1])
def find_le(a, x):
"""Find rightmost value less than or equal to x."""
i = bisect.bisect_right(a, x)
if i:
return (i - 1, a[i - 1])
return (0, a[0])
def local_fix(source, start_log, end_log,
start_lines, end_lines, indents, last_line):
"""apply_global_fixes to the source between start_log and end_log.
The subsource must be the correct syntax of a complete python program
(but all lines may share an indentation). The subsource's shared indent
is removed, fixes are applied and the indent prepended back. Taking
care to not reindent strings.
last_line is the strict cut off (options.line_range[1]), so that
lines after last_line are not modified.
"""
if end_log < start_log:
return source
ind = indents[start_log]
indent = _get_indentation(source[start_lines[start_log]])
sl = slice(start_lines[start_log], end_lines[end_log] + 1)
subsource = source[sl]
msl = multiline_string_lines(''.join(subsource),
include_docstrings=False)
# Remove indent from subsource.
if ind:
for line_no in start_lines[start_log:end_log + 1]:
pos = line_no - start_lines[start_log]
subsource[pos] = subsource[pos][ind:]
# Remove indent from comments.
for (i, line) in enumerate(subsource):
if i + 1 not in msl and re.match(r'\s*#', line):
if line.index('#') >= ind:
subsource[i] = line[ind:]
# Fix indentation of subsource.
fixed_subsource = apply_global_fixes(''.join(subsource),
options,
where='local')
fixed_subsource = fixed_subsource.splitlines(True)
# Add back indent for non multi-line strings lines.
msl = multiline_string_lines(''.join(fixed_subsource),
include_docstrings=False)
for (i, line) in enumerate(fixed_subsource):
if not i + 1 in msl:
fixed_subsource[i] = indent + line if line != '\n' else line
# We make a special case to look at the final line, if it's a multiline
# *and* the cut off is somewhere inside it, we take the fixed
# subset up until last_line, this assumes that the number of lines
# does not change in this multiline line.
changed_lines = len(fixed_subsource)
if (
start_lines[end_log] != end_lines[end_log] and
end_lines[end_log] > last_line
):
after_end = end_lines[end_log] - last_line
fixed_subsource = (fixed_subsource[:-after_end] +
source[sl][-after_end:])
changed_lines -= after_end
options.line_range[1] = (options.line_range[0] +
changed_lines - 1)
return (source[:start_lines[start_log]] +
fixed_subsource +
source[end_lines[end_log] + 1:])
def is_continued_stmt(line,
continued_stmts=frozenset(['else', 'elif',
'finally', 'except'])):
return re.split('[ :]', line.strip(), 1)[0] in continued_stmts
assert options.line_range
(start, end) = options.line_range
start -= 1
end -= 1
last_line = end # We shouldn't modify lines after this cut-off.
try:
logical = _find_logical(source)
except (SyntaxError, tokenize.TokenError):
return ''.join(source)
if not logical[0]:
# Just blank lines, this should imply that it will become '\n' ?
return apply_global_fixes(source, options)
(start_lines, indents) = zip(*logical[0])
(end_lines, _) = zip(*logical[1])
source = source.splitlines(True)
(start_log, start) = find_ge(start_lines, start)
(end_log, end) = find_le(start_lines, end)
# Look behind one line, if it's indented less than current indent
# then we can move to this previous line knowing that its
# indentation level will not be changed.
if (
start_log > 0 and
indents[start_log - 1] < indents[start_log] and
not is_continued_stmt(source[start_log - 1])
):
start_log -= 1
start = start_lines[start_log]
while start < end:
if is_continued_stmt(source[start]):
start_log += 1
start = start_lines[start_log]
continue
ind = indents[start_log]
for t in itertools.takewhile(lambda t: t[1][1] >= ind,
enumerate(logical[0][start_log:])):
(n_log, n) = start_log + t[0], t[1][0]
# Start shares indent up to n.
if n <= end:
source = local_fix(source, start_log, n_log,
start_lines, end_lines,
indents, last_line)
start_log = n_log if n == end else n_log + 1
start = start_lines[start_log]
continue
else:
# Look at the line after end and see if allows us to reindent.
(after_end_log, after_end) = find_ge(start_lines, end + 1)
if indents[after_end_log] > indents[start_log]:
(start_log, start) = find_ge(start_lines, start + 1)
continue
if (
indents[after_end_log] == indents[start_log] and
is_continued_stmt(source[after_end])
):
# Find n, the beginning of the last continued statement.
# Apply fix to previous block if there is one.
only_block = True
for n, n_ind in logical[0][start_log:end_log + 1][::-1]:
if n_ind == ind and not is_continued_stmt(source[n]):
n_log = start_lines.index(n)
source = local_fix(source, start_log, n_log - 1,
start_lines, end_lines,
indents, last_line)
start_log = n_log + 1
start = start_lines[start_log]
only_block = False
break
if only_block:
(end_log, end) = find_le(start_lines, end - 1)
continue
source = local_fix(source, start_log, end_log,
start_lines, end_lines,
indents, last_line)
break
return ''.join(source)
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def create_parser():
"""Return command-line parser."""
# Do import locally to be friendly to those who use autopep8 as a library
# and are supporting Python 2.6.
import argparse
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count', dest='verbose',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true', dest='diff',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('--global-config', metavar='filename',
default=DEFAULT_CONFIG,
help='path to a global pep8 config file; if this file '
'does not exist then this is ignored '
'(default: {0})'.format(DEFAULT_CONFIG))
parser.add_argument('--ignore-local-config', action='store_true',
help="don't look for and apply local config files; "
'if not passed, defaults are updated with any '
"config files in the project's root directory")
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {0})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--range', metavar='line', dest='line_range',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, metavar='n',
help='number of spaces per indent level '
'(default %(default)s)')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments, apply_config=False):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if apply_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.exclude and not args.recursive:
parser.error('--exclude is only relevant when used with --recursive')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def read_config(args, parser):
"""Read both user configuration and local configuration."""
try:
from configparser import ConfigParser as SafeConfigParser
from configparser import Error
except ImportError:
from ConfigParser import SafeConfigParser
from ConfigParser import Error
config = SafeConfigParser()
try:
config.read(args.global_config)
if not args.ignore_local_config:
parent = tail = args.files and os.path.abspath(
os.path.commonprefix(args.files))
while tail:
if config.read([os.path.join(parent, fn)
for fn in PROJECT_CONFIG]):
break
(parent, tail) = os.path.split(parent)
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items('pep8'))
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def _split_comma_separated(string):
"""Return a set of strings."""
return set(filter(None, string.split(',')))
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
else:
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0]
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard devation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pep8.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(filename) as f:
first_line = f.readlines(1)[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def wrap_output(output, encoding):
"""Return output with specified encoding."""
return codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
def get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def main(apply_config=True):
"""Tool main."""
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(sys.argv[1:], apply_config=apply_config)
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
encoding = sys.stdin.encoding or get_encoding()
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
wrap_output(sys.stdout, encoding=encoding).write(
fix_code(sys.stdin.read(), args, encoding=encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main()) | apache-2.0 |
AppliedMicro/ENGLinuxLatest | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 |
zhangziang/django-allauth | allauth/socialaccount/app_settings.py | 61 | 2394 | class AppSettings(object):
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def QUERY_EMAIL(self):
"""
Request e-mail address from 3rd party account provider?
E.g. using OpenID AX
"""
from allauth.account import app_settings as account_settings
return self._setting("QUERY_EMAIL",
account_settings.EMAIL_REQUIRED)
@property
def AUTO_SIGNUP(self):
"""
Attempt to bypass the signup form by using fields (e.g. username,
email) retrieved from the social account provider. If a conflict
arises due to a duplicate e-mail signup form will still kick in.
"""
return self._setting("AUTO_SIGNUP", True)
@property
def PROVIDERS(self):
"""
Provider specific settings
"""
return self._setting("PROVIDERS", {})
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_VERIFICATION",
account_settings.EMAIL_VERIFICATION)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.socialaccount.adapter'
'.DefaultSocialAccountAdapter')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def STORE_TOKENS(self):
return self._setting('STORE_TOKENS', True)
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys
app_settings = AppSettings('SOCIALACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| mit |
Guneet-Dhillon/mxnet | tests/python/unittest/test_symbol.py | 7 | 9547 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import mxnet as mx
import numpy as np
from common import models
from mxnet.test_utils import discard_stderr
import pickle as pkl
def test_symbol_basic():
mlist = []
mlist.append(models.mlp2())
for m in mlist:
m.list_arguments()
m.list_outputs()
def test_symbol_compose():
data = mx.symbol.Variable('data')
net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
net1.list_arguments() == ['data',
'fc1_weight', 'fc1_bias',
'fc2_weight', 'fc2_bias']
net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
net2 = mx.symbol.Activation(data=net2, act_type='relu')
net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
#print(net2.debug_str())
composed = net2(fc3_data=net1, name='composed')
#print(composed.debug_str())
multi_out = mx.symbol.Group([composed, net1])
assert len(multi_out.list_outputs()) == 2
def test_symbol_copy():
data = mx.symbol.Variable('data')
data_2 = copy.deepcopy(data)
data_3 = copy.copy(data)
assert data.tojson() == data_2.tojson()
assert data.tojson() == data_3.tojson()
def test_symbol_internal():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.list_arguments() == ['data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias']
internal = net1.get_internals()
fc1 = internal['fc1_output']
assert fc1.list_arguments() == oldfc.list_arguments()
def test_symbol_children():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
assert net1.get_children()['fc2_weight'].get_children() is None
data = mx.sym.Variable('data')
sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
concat = mx.sym.Concat(*list(sliced))
assert concat.get_children().list_outputs() == \
['slice_output0', 'slice_output1', 'slice_output2']
assert sliced.get_children().list_outputs() == ['data']
def test_symbol_pickle():
mlist = [models.mlp2(), models.conv()]
data = pkl.dumps(mlist)
mlist2 = pkl.loads(data)
for x, y in zip(mlist, mlist2):
assert x.tojson() == y.tojson()
def test_symbol_saveload():
sym = models.mlp2()
fname = 'tmp_sym.json'
sym.save(fname)
data2 = mx.symbol.load(fname)
# save because of order
assert sym.tojson() == data2.tojson()
os.remove(fname)
def test_symbol_infer_type():
data = mx.symbol.Variable('data')
f32data = mx.symbol.Cast(data=data, dtype='float32')
fc1 = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128)
mlp = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax')
arg, out, aux = mlp.infer_type(data=np.float16)
assert arg == [np.float16, np.float32, np.float32, np.float32]
assert out == [np.float32]
assert aux == []
def test_symbol_infer_shape():
num_hidden = 128
num_dim = 64
num_sample = 10
data = mx.symbol.Variable('data')
prev = mx.symbol.Variable('prevstate')
x2h = mx.symbol.FullyConnected(data=data, name='x2h', num_hidden=num_hidden)
h2h = mx.symbol.FullyConnected(data=prev, name='h2h', num_hidden=num_hidden)
out = mx.symbol.Activation(data=mx.sym.elemwise_add(x2h, h2h), name='out', act_type='relu')
# shape inference will fail because information is not available for h2h
ret = out.infer_shape(data=(num_sample, num_dim))
assert ret == (None, None, None)
arg, out_shapes, aux_shapes = out.infer_shape_partial(data=(num_sample, num_dim))
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == ()
# now we can do full shape inference
state_shape = out_shapes[0]
arg, out_shapes, aux_shapes = out.infer_shape(data=(num_sample, num_dim), prevstate=state_shape)
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == (num_hidden, num_hidden)
def test_symbol_infer_shape_var():
"Test specifying shape information when constructing a variable"
shape = (2, 3)
a = mx.symbol.Variable('a', shape=shape)
b = mx.symbol.Variable('b')
c = mx.symbol.elemwise_add(a, b)
arg_shapes, out_shapes, aux_shapes = c.infer_shape()
assert arg_shapes[0] == shape
assert arg_shapes[1] == shape
assert out_shapes[0] == shape
overwrite_shape = (5, 6)
arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape)
assert arg_shapes[0] == overwrite_shape
assert arg_shapes[1] == overwrite_shape
assert out_shapes[0] == overwrite_shape
def check_symbol_consistency(sym1, sym2, ctx):
assert sym1.list_arguments() == sym2.list_arguments()
assert sym1.list_auxiliary_states() == sym2.list_auxiliary_states()
assert sym1.list_outputs() == sym2.list_outputs()
mx.test_utils.check_consistency([sym1, sym2], ctx_list=[ctx, ctx])
def test_load_000800():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data', lr_mult=0.2)
weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2)
fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0')
sym1 = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json'))
attr1 = sym1.attr_dict()
attr2 = sym2.attr_dict()
for k, v1 in attr1.items():
assert k in attr2, k
v2 = attr2[k]
for kk, vv1 in v1.items():
if kk.startswith('__') and kk.endswith('__'):
assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2)
check_symbol_consistency(sym1, sym2,
{'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)})
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(2*a)
exe = b.simple_bind(ctx=mx.cpu(), a=(10,10))
def test_zero_prop():
data = mx.symbol.Variable('data')
for i in range(10):
data = data * data
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null')
small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
data = mx.sym.stop_gradient(data)
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
assert big > small2
assert small1 == small2
def test_zero_prop2():
x = mx.sym.Variable('x')
idx = mx.sym.Variable('idx')
y = mx.sym.batch_take(x, idx)
z = mx.sym.stop_gradient(y)
exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
exe.forward()
exe.backward()
# The following bind() should throw an exception. We discard the expected stderr
# output for this operation only in order to keep the test logs clean.
with discard_stderr():
try:
y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
except:
return
assert False
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
kopchik/qtile | libqtile/widget/crashme.py | 10 | 2352 | # Copyright (c) 2012 Florian Mounier
# Copyright (c) 2012 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
from .. import bar
from . import base
class _CrashMe(base._TextBox):
"""
A developer widget to force a crash in qtile.
Pressing left mouse button causes a zero divison error.
Pressing the right mouse button causes a cairo draw error.
"""
orientations = base.ORIENTATION_HORIZONTAL
def __init__(self, width=bar.CALCULATED, **config):
"""
- width: A fixed width, or bar.CALCULATED to calculate the width
automatically (which is recommended).
"""
base._TextBox.__init__(self, "Crash me !", width, **config)
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True
)
def button_press(self, x, y, button):
if button == 1:
1 / 0
elif button == 3:
self.text = '<span>\xC3GError'
self.bar.draw()
| mit |
pquentin/django | django/core/handlers/wsgi.py | 82 | 9759 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
import warnings
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
from django.utils import datastructures, six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| bsd-3-clause |
otherness-space/myProject002 | my_project_002/lib/python2.7/site-packages/django/core/management/commands/diffsettings.py | 114 | 1264 | from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| mit |
parthea/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/task.py | 6 | 2644 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for CloudML training.
CloudML training requires a tarball package and a python module to run. This file
provides such a "main" method and a list of args passed with the program.
"""
import argparse
import json
import logging
import os
import tensorflow as tf
from . import _model
from . import _trainer
from . import _util
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_dir',
type=str,
help='The input dir path for training and evaluation data.')
parser.add_argument(
'--job-dir',
dest='job_dir',
type=str,
help='The GCS path to which checkpoints and other outputs should be saved.')
parser.add_argument(
'--max_steps',
type=int,)
parser.add_argument(
'--batch_size',
type=int,
help='Number of examples to be processed per mini-batch.')
parser.add_argument(
'--checkpoint',
type=str,
default=_util._DEFAULT_CHECKPOINT_GSURL,
help='Pretrained inception checkpoint path.')
args, _ = parser.parse_known_args()
labels = _util.get_labels(args.input_dir)
model = _model.Model(labels, 0.5, args.checkpoint)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# Print the job data as provided by the service.
logging.info('Original job data: %s', env.get('job', {}))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
if not cluster or not task or task.type == 'master' or task.type == 'worker':
_trainer.Trainer(args.input_dir, args.batch_size, args.max_steps,
args.job_dir, model, cluster, task).run_training()
elif task.type == 'ps':
server = _trainer.start_server(cluster, task)
server.join()
else:
raise ValueError('invalid task_type %s' % (task.type,))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
| apache-2.0 |
pekkosk/hotbit | hotbit/io/__init__.py | 1 | 2274 | """
Input module. Contains functions to read element, Slater-Koster and repulsion
data.
"""
def read_element(filename, symbol, format=None):
"""
Read element data from files.
Parameters:
-----------
fileobj: filename of file-object to read from
symbol: chemical symbol of the element
"""
if format is None:
format = filetype(filename)
if format == 'elm':
from hotbit.io.native import read_element_from_elm
return read_element_from_elm(filename, symbol)
if format == 'skf':
from hotbit.io.dftb import read_element_from_skf
return read_element_from_skf(filename, symbol)
raise RuntimeError('File format "'+format+'" not recognized!')
def read_HS(filename, symboli, symbolj, format=None):
"""
Read Slater-Koster tables from files.
Parameters:
-----------
fileobj: filename of file-object to read from
symboli: chemical symbol of the first element
symbolj: chemical symbol of the second element
"""
if format is None:
format = filetype(filename)
if format == 'par':
from hotbit.io.native import read_HS_from_par
return read_HS_from_par(filename, symboli, symbolj)
if format == 'skf':
from hotbit.io.dftb import read_HS_from_skf
return read_HS_from_skf(filename, symboli, symbolj)
raise RuntimeError('File format "'+format+'" not recognized!')
def read_repulsion(filename, format=None):
"""
Read Slater-Koster tables from files.
Parameters:
-----------
fileobj: filename of file-object to read from
"""
if format is None:
format = filetype(filename)
if format == 'par':
from hotbit.io.native import read_repulsion_from_par
return read_repulsion_from_par(filename)
if format == 'skf':
from hotbit.io.dftb import read_repulsion_from_skf
return read_repulsion_from_skf(filename)
raise RuntimeError('File format "'+format+'" not recognized!')
def filetype(filename):
if filename.lower().endswith('.elm'):
return 'elm'
if filename.lower().endswith('.par'):
return 'par'
if filename.lower().endswith('.skf') or filename.lower.endswith('.spl'):
return 'skf'
| gpl-2.0 |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_openedgebuiltins.py | 370 | 40661 | # -*- coding: utf-8 -*-
"""
pygments.lexers._openedgebuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = [
'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR',
'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA',
'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER',
'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST',
'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD',
'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA',
'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG',
'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY',
'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT',
'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX',
'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY',
'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE',
'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN',
'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST',
'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT',
'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY',
'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP',
'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI',
'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT',
'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE',
'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE',
'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB',
'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA',
'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO',
'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE',
'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-',
'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H',
'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN',
'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND',
'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS',
'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT',
'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI',
'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH',
'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T',
'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH',
'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P',
'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL',
'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB',
'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS',
'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE',
'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME',
'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY',
'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL',
'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE',
'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET',
'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN',
'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV',
'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED',
'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS',
'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION',
'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL',
'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE',
'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT',
'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN',
'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM',
'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT',
'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF',
'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS',
'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE',
'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED',
'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP',
'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL',
'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS',
'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET',
'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF',
'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN',
'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE',
'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT',
'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO',
'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE',
'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG',
'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR',
'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND',
'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT',
'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE',
'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP',
'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA',
'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES',
'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC',
'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID',
'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE',
'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC',
'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST',
'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT',
'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU',
'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT',
'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE',
'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO',
'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT',
'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER',
'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR',
'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE',
'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC',
'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN',
'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS',
'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT',
'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE',
'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE',
'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA',
'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP',
'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS',
'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION',
'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH',
'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX',
'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR',
'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO',
'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE',
'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END',
'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY',
'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT',
'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL',
'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE',
'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME',
'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS',
'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT',
'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L',
'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS',
'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL',
'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW',
'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD',
'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION',
'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT',
'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME',
'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS',
'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN',
'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST',
'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE',
'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST',
'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM',
'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE',
'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER',
'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE',
'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS',
'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR',
'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO',
'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED',
'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT',
'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN',
'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE',
'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC',
'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU',
'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C',
'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR',
'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX',
'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT',
'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME',
'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS',
'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH',
'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P',
'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL',
'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE',
'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE',
'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU',
'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST',
'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS',
'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE',
'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE',
'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST',
'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V',
'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU',
'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64',
'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST',
'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT',
'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS',
'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED',
'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU',
'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET',
'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI',
'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT',
'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM',
'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS',
'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE',
'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN',
'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG',
'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO',
'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V',
'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA',
'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS',
'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP',
'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS',
'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR',
'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE',
'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE',
'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA',
'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN',
'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON',
'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE',
'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP',
'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN',
'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT',
'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM',
'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE',
'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL',
'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO',
'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI',
'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE',
'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT',
'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU',
'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B',
'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA',
'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T',
'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS',
'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP',
'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE',
'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED',
'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL',
'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z',
'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC',
'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC',
'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS',
'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC',
'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC',
'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC',
'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR',
'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS',
'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST',
'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT',
'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF',
'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU',
'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I',
'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE',
'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH',
'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT',
'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN',
'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN',
'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER',
'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON',
'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED',
'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP',
'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP',
'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH',
'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX',
'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT',
'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE',
'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS',
'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH',
'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH',
'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P',
'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL',
'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR',
'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE',
'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE',
'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN',
'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL',
'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C',
'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS',
'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE',
'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH',
'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI',
'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO',
'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P',
'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE',
'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-',
'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-',
'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE',
'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM',
'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO',
'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE',
'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL',
'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE',
'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW',
'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING',
'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE',
'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST',
'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS',
'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP',
'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE',
'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG',
'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI',
'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT',
'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL',
'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS',
'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE',
'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION',
'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC',
'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP',
'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL',
'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI',
'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP',
'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE',
'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO',
'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM',
'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES',
'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR',
'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS',
'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL',
'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES',
'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS',
'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED',
'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET',
'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF',
'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME',
'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD',
'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN',
'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR',
'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV',
'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE',
'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED',
'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE',
'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM',
'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY',
'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM',
'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME',
'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN',
'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO',
'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU',
'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME',
'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT',
'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING',
'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL',
'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING',
'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY',
'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME',
'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D',
'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED',
'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE',
'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST',
'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe',
'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S',
'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT',
'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY',
'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY',
'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH',
'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT',
'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG',
'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE',
'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION',
'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER',
'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM',
'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO',
'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY',
'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT',
'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY',
'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN',
'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE',
'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION',
'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE',
'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE',
'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID',
'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN',
'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER',
'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR',
'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU',
'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE',
'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX',
'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT',
'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE',
'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE',
'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER',
'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL',
'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE',
'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS',
'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER',
'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT',
'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL',
'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION',
'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED',
'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST',
'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW',
'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE',
'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER',
'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE',
'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU',
'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT',
'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-',
'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU',
'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER',
'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-',
'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU',
'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION',
'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI',
'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-',
'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS',
'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA',
'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS',
'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE',
'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS',
'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP',
'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE',
'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE',
'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE',
'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE',
'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA',
'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE',
'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED',
'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO',
'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF',
'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG',
'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM',
'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE',
'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT',
'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM',
'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR',
'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP',
'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION',
'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR',
'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR',
'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI',
'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW',
'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D',
'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE',
'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO',
'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO',
'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO',
'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX',
'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID',
'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM',
'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF',
'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE',
'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED',
'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION',
'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD',
'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL',
'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE',
'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO',
'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL',
'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT',
'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES',
'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE',
'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW',
'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE',
'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN',
'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN',
'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L',
'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH',
'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA',
'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX',
'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM',
'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED',
'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE',
'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT',
'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X',
'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE',
'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE',
'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH',
'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y',
'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF'
]
| mit |
ktan2020/legacy-automation | win/Lib/site-packages/requests/packages/charade/latin1prober.py | 50 | 5387 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| mit |
pschmitt/home-assistant | homeassistant/components/water_heater/reproduce_state.py | 16 | 3681 | """Reproduce an Water heater state."""
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_AWAY_MODE,
ATTR_OPERATION_MODE,
ATTR_TEMPERATURE,
DOMAIN,
SERVICE_SET_AWAY_MODE,
SERVICE_SET_OPERATION_MODE,
SERVICE_SET_TEMPERATURE,
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_PERFORMANCE,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_OFF,
STATE_ON,
STATE_PERFORMANCE,
}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_TEMPERATURE)
== state.attributes.get(ATTR_TEMPERATURE)
and cur_state.attributes.get(ATTR_AWAY_MODE)
== state.attributes.get(ATTR_AWAY_MODE)
):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state != cur_state.state:
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
service = SERVICE_SET_OPERATION_MODE
service_data[ATTR_OPERATION_MODE] = state.state
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
if (
state.attributes.get(ATTR_TEMPERATURE)
!= cur_state.attributes.get(ATTR_TEMPERATURE)
and state.attributes.get(ATTR_TEMPERATURE) is not None
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: state.entity_id,
ATTR_TEMPERATURE: state.attributes.get(ATTR_TEMPERATURE),
},
context=context,
blocking=True,
)
if (
state.attributes.get(ATTR_AWAY_MODE) != cur_state.attributes.get(ATTR_AWAY_MODE)
and state.attributes.get(ATTR_AWAY_MODE) is not None
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AWAY_MODE,
{
ATTR_ENTITY_ID: state.entity_id,
ATTR_AWAY_MODE: state.attributes.get(ATTR_AWAY_MODE),
},
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Water heater states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
| apache-2.0 |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/numpy/testing/noseclasses.py | 76 | 14350 | # These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
from __future__ import division, absolute_import, print_function
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
from .nosetester import get_package_name
import inspect
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule, isfunction, \
ismethod
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want= want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'"%sz, "int")
want= want.replace("'<i%d'"%sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger(object):
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
| mit |
darius/mccarthy-to-bryant | problems.py | 2 | 1351 | """
Use BDDs to solve SAT problems from DIMACS files.
TODO: try the tableau method too
"""
import bddsat
import dimacs
import sat
# Some problems from http://toughsat.appspot.com/
filenames = ['problems/trivial.dimacs',
'problems/factoring6.dimacs',
'problems/factoring2.dimacs',
'problems/subsetsum_random.dimacs',
]
def main():
for filename in filenames:
print(filename)
_, problem = dimacs.load(filename)
solution = bddsat.solve(problem)
print(solution)
if solution is not None:
assert sat.is_satisfied(problem, solution)
print
## main()
#. problems/trivial.dimacs
#. {1: 1, 2: 1}
#.
#. problems/factoring6.dimacs
#. {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 1, 9: 0, 10: 1, 11: 1, 12: 0, 13: 1, 14: 0}
#.
#. problems/factoring2.dimacs
#. {1: 0, 2: 1, 3: 0, 4: 1, 5: 1, 6: 0, 7: 0, 8: 1, 9: 0, 10: 1, 11: 0, 12: 0, 13: 1, 14: 0, 15: 0, 16: 0, 17: 0, 18: 1, 19: 0, 20: 1, 21: 0, 22: 0, 23: 0, 24: 1, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0}
#.
#. problems/subsetsum_random.dimacs
#. {1: 0, 2: 0, 3: 1, 4: 1, 5: 0, 6: 0, 7: 1, 8: 1, 9: 1, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 1, 16: 1, 17: 0, 18: 0, 19: 0, 20: 0, 21: 0, 22: 0, 23: 0, 24: 0, 25: 1, 26: 1, 27: 0, 28: 0, 29: 0, 30: 0}
#.
if __name__ == '__main__':
main()
| gpl-3.0 |
Volcanoscar/omim | 3party/protobuf/python/google/protobuf/message.py | 78 | 10275 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = '[email protected] (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
| apache-2.0 |
Darthkpo/xtt | openpyxl/xml/tests/test_incremental_xmlfile.py | 1 | 11428 | from __future__ import absolute_import
"""
Tests for the incremental XML serialisation API.
From lxml
"""
from io import BytesIO
import unittest
import tempfile, os, sys
from .common_imports import etree, HelperTestCase, skipIf
from .. import xmlfile as etree
import pytest
from openpyxl.tests.helper import compare_xml
import xml.etree.ElementTree
# _parse_file needs parse routine - take it from ElementTree
etree.parse = xml.etree.ElementTree.parse
class _XmlFileTestCaseBase(HelperTestCase):
_file = None # to be set by specific subtypes below
def setUp(self):
self._file = BytesIO()
def test_element(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
self.assertXml('<test></test>')
def test_element_write_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('toast')
self.assertXml('<test>toast</test>')
def test_element_nested(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
with xf.element('toast'):
with xf.element('taste'):
xf.write('conTent')
self.assertXml('<test><toast><taste>conTent</taste></toast></test>')
def test_element_nested_with_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('con')
with xf.element('toast'):
xf.write('tent')
with xf.element('taste'):
xf.write('inside')
xf.write('tnet')
xf.write('noc')
self.assertXml('<test>con<toast>tent<taste>inside</taste>'
'tnet</toast>noc</test>')
def test_write_Element(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.Element('test'))
self.assertXml('<test/>')
def test_write_Element_repeatedly(self):
element = etree.Element('test')
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
for i in range(100):
xf.write(element)
tree = self._parse_file()
self.assertTrue(tree is not None)
self.assertEqual(100, len(tree.getroot()))
self.assertEqual(set(['test']), set(el.tag for el in tree.getroot()))
def test_namespace_nsmap(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={'x': 'nsURI'}):
pass
self.assertXml('<x:test xmlns:x="nsURI"></x:test>')
def test_namespace_nested_nsmap(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test', nsmap={'x': 'nsURI'}):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test xmlns:x="nsURI"><x:toast></x:toast></test>')
def test_anonymous_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test'):
pass
self.assertXml('<ns0:test xmlns:ns0="nsURI"></ns0:test>')
def test_namespace_nested_anonymous(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test><ns0:toast xmlns:ns0="nsURI"></ns0:toast></test>')
def test_default_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={None: 'nsURI'}):
pass
self.assertXml('<test xmlns="nsURI"></test>')
def test_nested_default_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={None: 'nsURI'}):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test xmlns="nsURI"><toast></toast></test>')
@pytest.mark.xfail
def test_pi(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.ProcessingInstruction('pypi'))
with xf.element('test'):
pass
self.assertXml('<?pypi ?><test></test>')
@pytest.mark.xfail
def test_comment(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.Comment('a comment'))
with xf.element('test'):
pass
self.assertXml('<!--a comment--><test></test>')
def test_attribute(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test', attrib={'k': 'v'}):
pass
self.assertXml('<test k="v"></test>')
def test_escaping(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('Comments: <!-- text -->\n')
xf.write('Entities: &')
self.assertXml(
'<test>Comments: <!-- text -->\nEntities: &amp;</test>')
@pytest.mark.xfail
def test_encoding(self):
with etree.xmlfile(self._file, encoding='utf16') as xf:
with xf.element('test'):
xf.write('toast')
self.assertXml('<test>toast</test>', encoding='utf16')
@pytest.mark.xfail
def test_buffering(self):
with etree.xmlfile(self._file, buffered=False) as xf:
with xf.element('test'):
self.assertXml("<test>")
xf.write('toast')
self.assertXml("<test>toast")
with xf.element('taste'):
self.assertXml("<test>toast<taste>")
xf.write('some', etree.Element("more"), "toast")
self.assertXml("<test>toast<taste>some<more/>toast")
self.assertXml("<test>toast<taste>some<more/>toast</taste>")
xf.write('end')
self.assertXml("<test>toast<taste>some<more/>toast</taste>end")
self.assertXml("<test>toast<taste>some<more/>toast</taste>end</test>")
self.assertXml("<test>toast<taste>some<more/>toast</taste>end</test>")
@pytest.mark.xfail
def test_flush(self):
with etree.xmlfile(self._file, buffered=True) as xf:
with xf.element('test'):
self.assertXml("")
xf.write('toast')
self.assertXml("")
with xf.element('taste'):
self.assertXml("")
xf.flush()
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste></taste></test>")
def test_failure_preceding_text(self):
try:
with etree.xmlfile(self._file) as xf:
xf.write('toast')
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_failure_trailing_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
try:
xf.write('toast')
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_failure_trailing_Element(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
try:
xf.write(etree.Element('test'))
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
@pytest.mark.xfail
def test_closing_out_of_order_in_error_case(self):
cm_exit = None
try:
with etree.xmlfile(self._file) as xf:
x = xf.element('test')
cm_exit = x.__exit__
x.__enter__()
raise ValueError('123')
except ValueError:
self.assertTrue(cm_exit)
try:
cm_exit(ValueError, ValueError("huhu"), None)
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
else:
self.assertTrue(False)
def _read_file(self):
pos = self._file.tell()
self._file.seek(0)
try:
return self._file.read()
finally:
self._file.seek(pos)
def _parse_file(self):
pos = self._file.tell()
self._file.seek(0)
try:
return etree.parse(self._file)
finally:
self._file.seek(pos)
def tearDown(self):
if self._file is not None:
self._file.close()
def assertXml(self, expected, encoding='utf8'):
diff = compare_xml(self._read_file().decode(encoding), expected)
assert diff is None, diff
class BytesIOXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._file = BytesIO()
def test_filelike_close(self):
with etree.xmlfile(self._file, close=True) as xf:
with xf.element('test'):
pass
self.assertRaises(ValueError, self._file.getvalue)
class TempXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._file = tempfile.TemporaryFile()
class TempPathXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._tmpfile = tempfile.NamedTemporaryFile(delete=False)
self._file = self._tmpfile.name
def tearDown(self):
try:
self._tmpfile.close()
finally:
if os.path.exists(self._tmpfile.name):
os.unlink(self._tmpfile.name)
def _read_file(self):
self._tmpfile.seek(0)
return self._tmpfile.read()
def _parse_file(self):
self._tmpfile.seek(0)
return etree.parse(self._tmpfile)
@skipIf(True, "temp file behaviour is too platform specific here")
def test_buffering(self):
pass
@skipIf(True, "temp file behaviour is too platform specific here")
def test_flush(self):
pass
class SimpleFileLikeXmlFileTestCase(_XmlFileTestCaseBase):
class SimpleFileLike(object):
def __init__(self, target):
self._target = target
self.write = target.write
self.tell = target.tell
self.seek = target.seek
self.closed = False
def close(self):
assert not self.closed
self.closed = True
self._target.close()
def setUp(self):
self._target = BytesIO()
self._file = self.SimpleFileLike(self._target)
def _read_file(self):
return self._target.getvalue()
def _parse_file(self):
pos = self._file.tell()
self._target.seek(0)
try:
return etree.parse(self._target)
finally:
self._target.seek(pos)
def test_filelike_not_closing(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
self.assertFalse(self._file.closed)
def test_filelike_close(self):
with etree.xmlfile(self._file, close=True) as xf:
with xf.element('test'):
pass
self.assertTrue(self._file.closed)
self._file = None # prevent closing in tearDown()
| mit |
fujunwei/chromium-crosswalk | build/android/pylib/instrumentation/test_package.py | 71 | 1334 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
import os
from pylib.instrumentation import test_jar
from pylib.utils import apk_helper
class TestPackage(test_jar.TestJar):
def __init__(self, apk_path, jar_path, test_support_apk_path):
test_jar.TestJar.__init__(self, jar_path)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
self._apk_name = os.path.splitext(os.path.basename(apk_path))[0]
self._package_name = apk_helper.GetPackageName(self._apk_path)
self._test_support_apk_path = test_support_apk_path
def GetApkPath(self):
"""Returns the absolute path to the APK."""
return self._apk_path
def GetApkName(self):
"""Returns the name of the apk without the suffix."""
return self._apk_name
def GetPackageName(self):
"""Returns the package name of this APK."""
return self._package_name
# Override.
def Install(self, device):
device.Install(self.GetApkPath())
if (self._test_support_apk_path and
os.path.exists(self._test_support_apk_path)):
device.Install(self._test_support_apk_path)
| bsd-3-clause |
ghedsouza/django | tests/forms_tests/field_tests/test_base.py | 131 | 1455 | from django.forms import ChoiceField, Field, Form, Select
from django.test import SimpleTestCase
class BasicFieldsTests(SimpleTestCase):
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A:
def __init__(self):
self.class_a_var = True
super().__init__()
class ComplexField(Field, A):
def __init__(self):
super().__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
def test_field_deepcopies_widget_instance(self):
class CustomChoiceField(ChoiceField):
widget = Select(attrs={'class': 'my-custom-class'})
class TestForm(Form):
field1 = CustomChoiceField(choices=[])
field2 = CustomChoiceField(choices=[])
f = TestForm()
f.fields['field1'].choices = [('1', '1')]
f.fields['field2'].choices = [('2', '2')]
self.assertEqual(f.fields['field1'].widget.choices, [('1', '1')])
self.assertEqual(f.fields['field2'].widget.choices, [('2', '2')])
class DisabledFieldTests(SimpleTestCase):
def test_disabled_field_has_changed_always_false(self):
disabled_field = Field(disabled=True)
self.assertFalse(disabled_field.has_changed('x', 'y'))
| bsd-3-clause |
clione/django-kanban | src/core/userena/tests/commands.py | 7 | 4703 | from django.test import TestCase
from django.core.management import call_command
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from userena.models import UserenaSignup
from userena.managers import ASSIGNED_PERMISSIONS
from userena import settings as userena_settings
from userena.utils import get_profile_model, get_user_model
from guardian.shortcuts import remove_perm
from guardian.models import UserObjectPermission
import datetime
User = get_user_model()
class CleanExpiredTests(TestCase):
user_info = {'username': 'alice',
'password': 'swordfish',
'email': '[email protected]'}
def test_clean_expired(self):
"""
Test if ``clean_expired`` deletes all users which ``activation_key``
is expired.
"""
# Create an account which is expired.
user = UserenaSignup.objects.create_user(**self.user_info)
user.date_joined -= datetime.timedelta(days=userena_settings.USERENA_ACTIVATION_DAYS + 1)
user.save()
# There should be one account now
User.objects.get(username=self.user_info['username'])
# Clean it.
call_command('clean_expired')
self.failUnlessEqual(User.objects.filter(username=self.user_info['username']).count(), 0)
class CheckPermissionTests(TestCase):
user_info = {'username': 'alice',
'password': 'swordfish',
'email': '[email protected]'}
def test_check_permissions(self):
# Create a new account.
user = UserenaSignup.objects.create_user(**self.user_info)
user.save()
# Remove all permissions
UserObjectPermission.objects.filter(user=user).delete()
self.failUnlessEqual(UserObjectPermission.objects.filter(user=user).count(),
0)
# Check it
call_command('check_permissions')
# User should have all permissions again
user_permissions = UserObjectPermission.objects.filter(user=user).values_list('permission__codename', flat=True)
required_permissions = [u'change_user', u'delete_user', u'change_profile', u'view_profile']
for perm in required_permissions:
if perm not in user_permissions:
self.fail()
# Check it again should do nothing
call_command('check_permissions', test=True)
def test_incomplete_permissions(self):
# Delete the neccesary permissions
profile_model_obj = get_profile_model()
content_type_profile = ContentType.objects.get_for_model(profile_model_obj)
content_type_user = ContentType.objects.get_for_model(User)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
Permission.objects.get(name=perm[1],
content_type=content_type).delete()
# Check if they are they are back
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
try:
perm = Permission.objects.get(name=perm[1],
content_type=content_type)
except Permission.DoesNotExist: pass
else: self.fail("Found %s: " % perm)
# Repair them
call_command('check_permissions', test=True)
# Check if they are they are back
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
try:
perm = Permission.objects.get(name=perm[1],
content_type=content_type)
except Permission.DoesNotExist:
self.fail()
def test_no_profile(self):
""" Check for warning when there is no profile """
# TODO: Dirty! Currently we check for the warning by getting a 100%
# test coverage, meaning that it dit output some warning.
user = UserenaSignup.objects.create_user(**self.user_info)
# remove the profile of this user
get_profile_model().objects.get(user=user).delete()
# run the command to check for the warning.
call_command('check_permissions', test=True)
| mit |
flh/odoo | addons/hr_holidays/report/__init__.py | 442 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import holidays_summary_report
import available_holidays
import hr_holidays_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IssamLaradji/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
splav/servo | components/script/dom/bindings/codegen/parser/tests/test_interface_identifier_conflicts_across_members.py | 276 | 1371 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers2 {
readonly attribute long thing1;
const byte thing1 = 1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers3 {
getter boolean thing1(DOMString name);
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
long thing1();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
looker/sentry | src/sentry/api/endpoints/broadcast_details.py | 2 | 3477 | from __future__ import absolute_import
import logging
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.utils import timezone
from rest_framework.permissions import IsAuthenticated
from sentry.api.base import Endpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize, AdminBroadcastSerializer, BroadcastSerializer
from sentry.api.validators import AdminBroadcastValidator, BroadcastValidator
from sentry.auth.superuser import is_active_superuser
from sentry.models import Broadcast, BroadcastSeen
logger = logging.getLogger('sentry')
class BroadcastDetailsEndpoint(Endpoint):
permission_classes = (IsAuthenticated, )
def _get_broadcast(self, request, broadcast_id):
if is_active_superuser(request) and request.access.has_permission('broadcasts.admin'):
queryset = Broadcast.objects.all()
else:
queryset = Broadcast.objects.filter(
Q(date_expires__isnull=True) | Q(date_expires__gt=timezone.now()),
is_active=True,
)
try:
return queryset.get(id=broadcast_id)
except Broadcast.DoesNotExist:
raise ResourceDoesNotExist
def _get_validator(self, request):
if is_active_superuser(request):
return AdminBroadcastValidator
return BroadcastValidator
def _serialize_response(self, request, broadcast):
if is_active_superuser(request):
serializer_cls = AdminBroadcastSerializer
else:
serializer_cls = BroadcastSerializer
return self.respond(serialize(broadcast, request.user, serializer=serializer_cls()))
def get(self, request, broadcast_id):
broadcast = self._get_broadcast(request, broadcast_id)
return self._serialize_response(request, broadcast)
def put(self, request, broadcast_id):
broadcast = self._get_broadcast(request, broadcast_id)
validator = self._get_validator(request)(data=request.DATA, partial=True)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.object
update_kwargs = {}
if result.get('title'):
update_kwargs['title'] = result['title']
if result.get('message'):
update_kwargs['message'] = result['message']
if result.get('link'):
update_kwargs['link'] = result['link']
if result.get('isActive') is not None:
update_kwargs['is_active'] = result['isActive']
if result.get('dateExpires', -1) != -1:
update_kwargs['date_expires'] = result['dateExpires']
if update_kwargs:
with transaction.atomic():
broadcast.update(**update_kwargs)
logger.info('broadcasts.update', extra={
'ip_address': request.META['REMOTE_ADDR'],
'user_id': request.user.id,
'broadcast_id': broadcast.id,
'data': update_kwargs,
})
if result.get('hasSeen'):
try:
with transaction.atomic():
BroadcastSeen.objects.create(
broadcast=broadcast,
user=request.user,
)
except IntegrityError:
pass
return self._serialize_response(request, broadcast)
| bsd-3-clause |
mssurajkaiga/empathy | tools/glib-gtypes-generator.py | 12 | 12756 | #!/usr/bin/python
# Generate GLib GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import xml.dom.minidom
from libtpcodegen import file_set_contents, u
from libglibcodegen import escape_as_identifier, \
get_docstring, \
NS_TP, \
Signature, \
type_to_gtype, \
xml_escape
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
class GTypesGenerator(object):
def __init__(self, dom, output, mixed_case_prefix):
self.dom = dom
self.Prefix = mixed_case_prefix
self.PREFIX_ = self.Prefix.upper() + '_'
self.prefix_ = self.Prefix.lower() + '_'
self.header = []
self.body = []
self.docs = []
self.output = output
for f in (self.header, self.body, self.docs):
f.append('/* Auto-generated, do not edit.\n *\n'
' * This file may be distributed under the same terms\n'
' * as the specification from which it was generated.\n'
' */\n\n')
# keys are e.g. 'sv', values are the key escaped
self.need_mappings = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_structs = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_struct_arrays = {}
# keys are the contents of the array (unlike need_struct_arrays!),
# values are the key escaped
self.need_other_arrays = {}
def h(self, code):
self.header.append(code)
def c(self, code):
self.body.append(code)
def d(self, code):
self.docs.append(code)
def do_mapping_header(self, mapping):
members = mapping.getElementsByTagNameNS(NS_TP, 'member')
assert len(members) == 2
impl_sig = ''.join([elt.getAttribute('type')
for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'HASH_TYPE_' +
mapping.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_hash_' + esc_impl_sig
docstring = get_docstring(mapping) or '(Undocumented)'
self.d('/**\n * %s:\n *\n' % name.strip())
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GHashTable\n')
self.d(' * appropriate for representing a D-Bus\n')
self.d(' * dictionary of signature\n')
self.d(' * <literal>a{%s}</literal>.\n' % impl_sig)
self.d(' *\n')
key, value = members
self.d(' * Keys (D-Bus type <literal>%s</literal>,\n'
% key.getAttribute('type'))
tp_type = key.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% key.getAttribute('name'))
docstring = get_docstring(key) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * Values (D-Bus type <literal>%s</literal>,\n'
% value.getAttribute('type'))
tp_type = value.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% value.getAttribute('name'))
docstring = get_docstring(value) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
self.need_mappings[impl_sig] = esc_impl_sig
array_name = mapping.getAttribute('array-name')
if array_name:
gtype_name = self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper()
contents_sig = 'a{' + impl_sig + '}'
esc_contents_sig = escape_as_identifier(contents_sig)
impl = self.prefix_ + 'type_dbus_array_of_' + esc_contents_sig
self.d('/**\n * %s:\n\n' % gtype_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (gtype_name, impl))
self.need_other_arrays[contents_sig] = esc_contents_sig
def do_struct_header(self, struct):
members = struct.getElementsByTagNameNS(NS_TP, 'member')
impl_sig = ''.join([elt.getAttribute('type') for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'STRUCT_TYPE_' +
struct.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_struct_' + esc_impl_sig
docstring = struct.getElementsByTagNameNS(NS_TP, 'docstring')
if docstring:
docstring = docstring[0].toprettyxml()
if docstring.startswith('<tp:docstring>'):
docstring = docstring[14:]
if docstring.endswith('</tp:docstring>\n'):
docstring = docstring[:-16]
if docstring.strip() in ('<tp:docstring/>', ''):
docstring = '(Undocumented)'
else:
docstring = '(Undocumented)'
self.d('/**\n * %s:\n\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GValueArray\n')
self.d(' * appropriate for representing a D-Bus struct\n')
self.d(' * with signature <literal>(%s)</literal>.\n'
% impl_sig)
self.d(' *\n')
for i, member in enumerate(members):
self.d(' * Member %d (D-Bus type '
'<literal>%s</literal>,\n'
% (i, member.getAttribute('type')))
tp_type = member.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% member.getAttribute('name'))
docstring = get_docstring(member) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
array_name = struct.getAttribute('array-name')
if array_name != '':
array_name = (self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper())
impl = self.prefix_ + 'type_dbus_array_' + esc_impl_sig
self.d('/**\n * %s:\n\n' % array_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (array_name, impl))
self.need_struct_arrays[impl_sig] = esc_impl_sig
self.need_structs[impl_sig] = esc_impl_sig
def __call__(self):
mappings = self.dom.getElementsByTagNameNS(NS_TP, 'mapping')
structs = self.dom.getElementsByTagNameNS(NS_TP, 'struct')
for mapping in mappings:
self.do_mapping_header(mapping)
for sig in self.need_mappings:
self.h('GType %stype_dbus_hash_%s (void);\n\n' %
(self.prefix_, self.need_mappings[sig]))
self.c('GType\n%stype_dbus_hash_%s (void)\n{\n' %
(self.prefix_, self.need_mappings[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
# FIXME: translate sig into two GTypes
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
self.c(' t = dbus_g_type_get_map ("GHashTable", '
'%s, %s);\n' % (gtypes[0], gtypes[1]))
self.c(' return t;\n')
self.c('}\n\n')
for struct in structs:
self.do_struct_header(struct)
for sig in self.need_structs:
self.h('GType %stype_dbus_struct_%s (void);\n\n' %
(self.prefix_, self.need_structs[sig]))
self.c('GType\n%stype_dbus_struct_%s (void)\n{\n' %
(self.prefix_, self.need_structs[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_struct ("GValueArray",\n')
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
for gtype in gtypes:
self.c(' %s,\n' % gtype)
self.c(' G_TYPE_INVALID);\n')
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_struct_arrays:
self.h('GType %stype_dbus_array_%s (void);\n\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c('GType\n%stype_dbus_array_%s (void)\n{\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_collection ("GPtrArray", '
'%stype_dbus_struct_%s ());\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_other_arrays:
self.h('GType %stype_dbus_array_of_%s (void);\n\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c('GType\n%stype_dbus_array_of_%s (void)\n{\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
if sig[:2] == 'a{' and sig[-1:] == '}':
# array of mappings
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_hash_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:2] == 'a(' and sig[-1:] == ')':
# array of arrays of struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:1] == 'a':
# array of arrays of non-struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_of_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[1:])))
else:
raise AssertionError("array of '%s' not supported" % sig)
self.c(' return t;\n')
self.c('}\n\n')
file_set_contents(self.output + '.h', u('').join(self.header).encode('utf-8'))
file_set_contents(self.output + '-body.h', u('').join(self.body).encode('utf-8'))
file_set_contents(self.output + '-gtk-doc.h', u('').join(self.docs).encode('utf-8'))
if __name__ == '__main__':
argv = sys.argv[1:]
dom = xml.dom.minidom.parse(argv[0])
GTypesGenerator(dom, argv[1], argv[2])()
| gpl-2.0 |
readevalprint/mezzanine | mezzanine/utils/docs.py | 16 | 12232 | """
Utils called from project_root/docs/conf.py when Sphinx
documentation is generated.
"""
from __future__ import division, print_function, unicode_literals
from future.builtins import map, open, str
from collections import OrderedDict
from datetime import datetime
import os.path
from shutil import copyfile, move
from string import ascii_letters as letters
from socket import gethostname
from warnings import warn
from django.template.defaultfilters import urlize
from django.utils.encoding import force_text
from django.utils.functional import Promise
from mezzanine import __version__
from mezzanine.conf import registry
from mezzanine.utils.importing import import_dotted_path, path_for_import
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value
def build_settings_docs(docs_path, prefix=None):
"""
Converts names, descriptions and defaults for settings in
``mezzanine.conf.registry`` into RST format for use in docs,
optionally filtered by setting names with the given prefix.
"""
# String to use instead of setting value for dynamic defaults
dynamic = "[dynamic]"
lines = [".. THIS DOCUMENT IS AUTO GENERATED VIA conf.py"]
for name in sorted(registry.keys()):
if prefix and not name.startswith(prefix):
continue
setting = registry[name]
settings_name = "``%s``" % name
settings_label = ".. _%s:" % name
setting_default = setting["default"]
if isinstance(setting_default, str):
if gethostname() in setting_default or (
setting_default.startswith("/") and
os.path.exists(setting_default)):
setting_default = dynamic
if setting_default != dynamic:
setting_default = repr(deep_force_unicode(setting_default))
lines.extend(["", settings_label])
lines.extend(["", settings_name, "-" * len(settings_name)])
lines.extend(["",
urlize(setting["description"] or "").replace(
"<a href=\"", "`").replace(
"\" rel=\"nofollow\">", " <").replace(
"</a>", ">`_")])
if setting["choices"]:
choices = ", ".join(["%s: ``%s``" % (str(v), force_text(k))
for k, v in setting["choices"]])
lines.extend(["", "Choices: %s" % choices, ""])
lines.extend(["", "Default: ``%s``" % setting_default])
with open(os.path.join(docs_path, "settings.rst"), "w") as f:
f.write("\n".join(lines).replace("u'", "'").replace("yo'",
"you'").replace("'", "'"))
def build_deploy_docs(docs_path):
try:
from fabric.main import load_fabfile
except ImportError:
warn("Couldn't build fabfile.rst, fabric not installed")
return
project_template_path = path_for_import("mezzanine.project_template")
commands = load_fabfile(os.path.join(project_template_path, "fabfile"))[1]
lines = []
for name in sorted(commands.keys()):
doc = commands[name].__doc__.strip().split("\n")[0]
lines.append(" * ``fab %s`` - %s" % (name, doc))
with open(os.path.join(docs_path, "fabfile.rst"), "w") as f:
f.write("\n".join(lines))
# Python complains if this is inside build_changelog which uses exec.
_changeset_date = lambda cs: datetime.fromtimestamp(cs.date()[0])
def build_changelog(docs_path, package_name="mezzanine"):
"""
Converts Mercurial commits into a changelog in RST format.
"""
project_path = os.path.join(docs_path, "..")
version_file = os.path.join(package_name, "__init__.py")
version_var = "__version__"
changelog_filename = "CHANGELOG"
changelog_file = os.path.join(project_path, changelog_filename)
versions = OrderedDict()
repo = None
ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8",
"whitespace", "README", "trans", "print debug",
"debugging", "tabs", "style", "sites", "ignore",
"tweak", "cleanup", "minor", "for changeset",
".com``", "oops", "syntax")
hotfixes = {
"40cbc47b8d8a": "1.0.9",
"a25749986abc": "1.0.10",
}
# Load the repo.
try:
from mercurial import ui, hg, error
from mercurial.commands import tag
except ImportError:
pass
else:
try:
ui = ui.ui()
repo = hg.repository(ui, project_path)
except error.RepoError:
return
if repo is None:
return
# Go through each changeset and assign it to the versions dict.
changesets = [repo.changectx(changeset) for changeset in repo.changelog]
for cs in sorted(changesets, reverse=True, key=_changeset_date):
# Check if the file with the version number is in this changeset
# and if it is, pull it out and assign it as a variable.
files = cs.files()
new_version = False
# Commit message cleanup hacks.
description = cs.description().decode("utf-8")
description = description.rstrip(".").replace("\n", ". ")
while " " in description:
description = description.replace(" ", " ")
description = description.replace(". . ", ". ").replace("...", ",")
while ".." in description:
description = description.replace("..", ".")
description = description.replace(":.", ":").replace("n'. t", "n't")
words = description.split()
# Format var names in commit.
for i, word in enumerate(words):
if (set("._") & set(word[:-1]) and set(letters) & set(word) and
"`" not in word and not word[0].isdigit()):
last = ""
if word[-1] in ",.":
last, word = word[-1], word[:-1]
words[i] = "``%s``%s" % (word, last)
description = " ".join(words)
if version_file in files:
for line in cs[version_file].data().split("\n"):
if line.startswith(version_var):
exec(line)
if locals()[version_var] == "0.1.0":
locals()[version_var] = "1.0.0"
break
versions[locals()[version_var]] = {
"changes": [],
"date": _changeset_date(cs).strftime("%b %d, %Y")
}
new_version = len(files) == 1
# Tag new versions.
hotfix = hotfixes.get(cs.hex()[:12])
if hotfix or new_version:
if hotfix:
version_tag = hotfix
else:
try:
version_tag = locals()[version_var]
except KeyError:
version_tag = None
if version_tag and version_tag not in cs.tags():
try:
tag(ui, repo, version_tag, rev=cs.hex())
print("Tagging version %s" % version_tag)
except:
pass
# Ignore changesets that are merges, bumped the version, closed
# a branch, regenerated the changelog itself, contain an ignore
# word, or contain too few words to be meaningful.
merge = len(cs.parents()) > 1
branch_closed = len(files) == 0
changelog_update = changelog_filename in files
ignored = [w for w in ignore if w.lower() in description.lower()]
too_few_words = len(description.split()) <= 3
if (merge or new_version or branch_closed or changelog_update or
ignored or too_few_words):
continue
# Ensure we have a current version and if so, add this changeset's
# description to it.
version = None
try:
version = locals()[version_var]
except KeyError:
if not hotfix:
continue
user = cs.user().decode("utf-8").split("<")[0].strip()
entry = "%s - %s" % (description, user)
if hotfix or entry not in versions[version]["changes"]:
if hotfix:
versions[hotfix] = {
"changes": [entry],
"date": _changeset_date(cs).strftime("%b %d, %Y"),
}
else:
versions[version]["changes"].insert(0, entry)
# Write out the changelog.
with open(changelog_file, "w") as f:
for version, version_info in versions.items():
header = "Version %s (%s)" % (version, version_info["date"])
f.write("%s\n" % header)
f.write("%s\n" % ("-" * len(header)))
f.write("\n")
if version_info["changes"]:
for change in version_info["changes"]:
f.write(" * %s\n" % change)
else:
f.write(" * No changes listed.\n")
f.write("\n")
def build_modelgraph(docs_path, package_name="mezzanine"):
"""
Creates a diagram of all the models for mezzanine and the given
package name, generates a smaller version and add it to the
docs directory for use in model-graph.rst
"""
to_path = os.path.join(docs_path, "img", "graph.png")
build_path = os.path.join(docs_path, "build", "_images")
resized_path = os.path.join(os.path.dirname(to_path), "graph-small.png")
settings = import_dotted_path(package_name +
".project_template.project_name.settings")
apps = [a.rsplit(".")[1] for a in settings.INSTALLED_APPS
if a.startswith("mezzanine.") or a.startswith(package_name + ".")]
try:
from django_extensions.management.commands import graph_models
except ImportError:
warn("Couldn't build model_graph, django_extensions not installed")
else:
options = {"inheritance": True, "outputfile": "graph.png",
"layout": "dot"}
try:
graph_models.Command().execute(*apps, **options)
except Exception as e:
warn("Couldn't build model_graph, graph_models failed on: %s" % e)
else:
try:
move("graph.png", to_path)
except OSError as e:
warn("Couldn't build model_graph, move failed on: %s" % e)
# docs/img/graph.png should exist in the repo - move it to the build path.
try:
if not os.path.exists(build_path):
os.makedirs(build_path)
copyfile(to_path, os.path.join(build_path, "graph.png"))
except OSError as e:
warn("Couldn't build model_graph, copy to build failed on: %s" % e)
try:
from PIL import Image
image = Image.open(to_path)
image.width = 800
image.height = image.size[1] * 800 // image.size[0]
image.save(resized_path, "PNG", quality=100)
except Exception as e:
warn("Couldn't build model_graph, resize failed on: %s" % e)
return
# Copy the dashboard screenshot to the build dir too. This doesn't
# really belong anywhere, so we do it here since this is the only
# spot we deal with doc images.
d = "dashboard.png"
copyfile(os.path.join(docs_path, "img", d), os.path.join(build_path, d))
def build_requirements(docs_path, package_name="mezzanine"):
"""
Updates the requirements file with Mezzanine's version number.
"""
mezz_string = "Mezzanine=="
project_path = os.path.join(docs_path, "..")
requirements_file = os.path.join(project_path, package_name,
"project_template", "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.readlines()
with open(requirements_file, "w") as f:
f.write("Mezzanine==%s\n" % __version__)
for requirement in requirements:
if requirement.strip() and not requirement.startswith(mezz_string):
f.write(requirement)
| bsd-2-clause |
geimer/easybuild-easyconfigs | test/easyconfigs/suite.py | 8 | 2616 | #!/usr/bin/python
##
# Copyright 2012-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
This script is a collection of all the testcases for easybuild-easyconfigs.
Usage: "python -m easybuild.easyconfigs.test.suite.py" or "./easybuild/easyconfigs/test/suite.py"
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
import sys
import tempfile
import unittest
from vsc import fancylogger
import easybuild.tools.build_log # initialize EasyBuild logging, so we disable it
import test.easyconfigs.easyconfigs as e
# disable all logging to significantly speed up tests
fancylogger.disableDefaultHandlers()
fancylogger.setLogLevelError()
os.environ['EASYBUILD_TMP_LOGDIR'] = tempfile.mkdtemp(prefix='easyconfigs_test_')
# call suite() for each module and then run them all
SUITE = unittest.TestSuite([x.suite() for x in [e]])
# uses XMLTestRunner if possible, so we can output an XML file that can be supplied to Jenkins
xml_msg = ""
try:
import xmlrunner # requires unittest-xml-reporting package
xml_dir = 'test-reports'
res = xmlrunner.XMLTestRunner(output=xml_dir, verbosity=1).run(SUITE)
xml_msg = ", XML output of tests available in %s directory" % xml_dir
except ImportError, err:
sys.stderr.write("WARNING: xmlrunner module not available, falling back to using unittest...\n\n")
res = unittest.TextTestRunner().run(SUITE)
shutil.rmtree(os.environ['EASYBUILD_TMP_LOGDIR'])
del os.environ['EASYBUILD_TMP_LOGDIR']
if not res.wasSuccessful():
sys.stderr.write("ERROR: Not all tests were successful.\n")
sys.exit(2)
| gpl-2.0 |
sdrdl/sdipylib | sdipylib/geo.py | 1 | 1843 | """Support functions for geographic operations"""
def aspect(df):
"""Return the aspect ratio of a Geopandas dataset"""
tb = df.total_bounds
return abs((tb[0] - tb[2]) / (tb[1] - tb[3]))
def scale(df, x):
"""Given an x dimension, return the x and y dimensions to maintain the dataframe aspect ratio"""
return (x, x / aspect(df))
def aspect_fig_size(df, width, subplots='111', **kwargs):
"""
Create a matplotlib figure and axis with a given X width and a height
to keep the boundary box aspect ratio.
:param df: Geopandas GeoDataFrame, from which to calculate the aspect ratio
:param width: X dimension, in inches, of the plot
:param subplots: A Matplotlib subplots string
:param kwargs: Other arguments for plt.figure
:return:
"""
import matplotlib.pylab as plt
fig = plt.figure(figsize = scale(df, width), **kwargs)
ax = fig.add_subplot(subplots)
return fig, ax
def total_centroid(df):
return list(reversed(df.geometry.unary_union.centroid.coords[0]))
def folium_map(df, data_column, tiles='Stamen Toner', fill_color='RdYlGn', zoom_start=12, **kwargs):
import folium
mapa = folium.Map(location=total_centroid(df),
tiles=tiles, zoom_start=zoom_start)
if not df.crs:
df.crs = {'init' :'epsg:4326'}
#threshold_scale = np.linspace(_['non_min_r'].min(),
# _['non_min_r'].max(), 6, dtype=float).tolist()
choro_args = dict(
fill_color=fill_color,
fill_opacity=.6,
line_weight=.7
)
mapa.choropleth(geo_data=df.reset_index(),
data=df.reset_index(),
key_on='feature.properties.geoid',
columns=['geoid',data_column],
**choro_args
)
return mapa
| bsd-2-clause |
samueldotj/TeeRISC-Simulator | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
yjxtogo/horizon | openstack_dashboard/dashboards/project/stacks/urls.py | 56 | 1765 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.stacks import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^select_template$',
views.SelectTemplateView.as_view(),
name='select_template'),
url(r'^launch$', views.CreateStackView.as_view(), name='launch'),
url(r'^preview_template$',
views.PreviewTemplateView.as_view(), name='preview_template'),
url(r'^preview$', views.PreviewStackView.as_view(), name='preview'),
url(r'^preview_details$',
views.PreviewStackDetailsView.as_view(), name='preview_details'),
url(r'^stack/(?P<stack_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(r'^(?P<stack_id>[^/]+)/change_template$',
views.ChangeTemplateView.as_view(), name='change_template'),
url(r'^(?P<stack_id>[^/]+)/edit_stack$',
views.EditStackView.as_view(), name='edit_stack'),
url(r'^stack/(?P<stack_id>[^/]+)/(?P<resource_name>[^/]+)/$',
views.ResourceView.as_view(), name='resource'),
url(r'^get_d3_data/(?P<stack_id>[^/]+)/$',
views.JSONView.as_view(), name='d3_data'),
)
| apache-2.0 |
splav/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py | 11 | 6268 | import sys
import mock
import pytest
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
sauce = pytest.importorskip("wptrunner.browsers.sauce")
from wptserve.config import ConfigBuilder
def test_sauceconnect_success():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists:
# Act as if it's still running
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
# Act as if file created
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with sauce_connect:
pass
@pytest.mark.parametrize("readyfile,returncode", [
(True, 0),
(True, 1),
(True, 2),
(False, 0),
(False, 1),
(False, 2),
])
def test_sauceconnect_failure_exit(readyfile, returncode):
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = returncode
Popen.return_value.returncode = returncode
exists.return_value = readyfile
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with pytest.raises(sauce.SauceException):
with sauce_connect:
pass
# Given we appear to exit immediately with these mocks, sleep shouldn't be called
sleep.assert_not_called()
def test_sauceconnect_cleanup():
"""Ensure that execution pauses when the process is closed while exiting
the context manager. This allow Sauce Connect to close any active
tunnels."""
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = True
Popen.return_value.returncode = None
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with sauce_connect:
Popen.return_value.poll.return_value = None
sleep.assert_not_called()
sleep.assert_called()
def test_sauceconnect_failure_never_ready():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
exists.return_value = False
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with pytest.raises(sauce.SauceException):
with sauce_connect:
pass
# We should sleep while waiting for it to create the readyfile
sleep.assert_called()
# Check we actually kill it after termination fails
Popen.return_value.terminate.assert_called()
Popen.return_value.kill.assert_called()
def test_sauceconnect_tunnel_domains():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists:
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net",
alternate_hosts={"alt": "example.org"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as env_config:
sauce_connect(None, env_config)
with sauce_connect:
Popen.assert_called_once()
args, kwargs = Popen.call_args
cmd = args[0]
assert "--tunnel-domains" in cmd
i = cmd.index("--tunnel-domains")
rest = cmd[i+1:]
assert len(rest) >= 1
if len(rest) > 1:
assert rest[1].startswith("-"), "--tunnel-domains takes a comma separated list (not a space separated list)"
assert set(rest[0].split(",")) == {'example.net',
'a.example.net',
'b.example.net',
'example.org',
'a.example.org',
'b.example.org'}
| mpl-2.0 |
HPENetworking/HPEIMCUtils | PythonUtilities/Gather_IMC_Data/Gather_System_Device_Categories/gather_system_device_categories.py | 1 | 1524 |
#!/usr/bin/env python3
# author: @netmanchris
"""
Copyright 2016 Hewlett Packard Enterprise Development LP.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This file will take the GET the contents of the HPE IMC Network Assets module and dump them into a CSV file called
all_assets.csv where each line of the CSV file represents one physical or logical asset as discovered by the HPE IMC
platform.
This library uses the pyhpeimc python wrapper around the IMC RESTful API to automatically push the new performance tasks
with minimal effort on the part of the user."""
import csv
from pyhpeimc.auth import *
from pyhpeimc.plat.device import *
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
all_device_categories = get_system_category(auth.creds, auth.url)
keys = all_device_categories[0].keys()
with open ('all_device_categories.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_device_categories)
| apache-2.0 |
mhostetter/gnuradio | gr-audio/examples/python/noise.py | 58 | 1968 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import digital
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src = digital.glfsr_source_b(32) # Pseudorandom noise source
b2f = digital.chunks_to_symbols_bf([ampl, -ampl], 1)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src, b2f, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
trezorg/django | tests/regressiontests/select_related_regress/models.py | 92 | 2390 | from django.db import models
class Building(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return u"Building: %s" % self.name
class Device(models.Model):
building = models.ForeignKey('Building')
name = models.CharField(max_length=10)
def __unicode__(self):
return u"device '%s' in building %s" % (self.name, self.building)
class Port(models.Model):
device = models.ForeignKey('Device')
port_number = models.CharField(max_length=10)
def __unicode__(self):
return u"%s/%s" % (self.device.name, self.port_number)
class Connection(models.Model):
start = models.ForeignKey(Port, related_name='connection_start',
unique=True)
end = models.ForeignKey(Port, related_name='connection_end', unique=True)
def __unicode__(self):
return u"%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person)
class Student(models.Model):
person = models.ForeignKey(Person)
class Class(models.Model):
org = models.ForeignKey(Organizer)
class Enrollment(models.Model):
std = models.ForeignKey(Student)
cls = models.ForeignKey(Class)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, null=True)
status = models.ForeignKey(ClientStatus)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
class Parent(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, null=True)
def __unicode__(self):
return self.name
| bsd-3-clause |
synaptek/libchromiumcontent | tools/generate_filenames_gypi.py | 3 | 2342 | #!/usr/bin/env python
import glob
import os
import sys
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
SHARED_LIBRARY_SUFFIX = {
'darwin': 'dylib',
'linux': 'so',
'win32': 'dll',
}[TARGET_PLATFORM]
STATIC_LIBRARY_SUFFIX = {
'darwin': 'a',
'linux': 'a',
'win32': 'lib',
}[TARGET_PLATFORM]
EXCLUDE_SHARED_LIBRARIES = {
'darwin': [
],
'linux': [
],
'win32': [
'd3dcompiler_47.dll',
'libEGL.dll',
'libGLESv2.dll',
],
}[TARGET_PLATFORM]
EXCLUDE_STATIC_LIBRARIES = {
'darwin': [
'libv8_nosnapshot.a',
],
'linux': [
'libprotobuf_full_do_not_use.a',
'libgenperf_libs.a',
'libv8_nosnapshot.a',
],
'win32': [
'libEGL.dll.lib',
'libGLESv2.dll.lib',
],
}[TARGET_PLATFORM]
GYPI_TEMPLATE = """\
{
'variables': {
'libchromiumcontent_root_dir': %(src)s,
'libchromiumcontent_shared_libraries': %(shared_libraries)s,
'libchromiumcontent_shared_v8_libraries': %(shared_v8_libraries)s,
'libchromiumcontent_static_libraries': %(static_libraries)s,
'libchromiumcontent_static_v8_libraries': %(static_v8_libraries)s,
},
}
"""
def main(target_file, shared_src, static_src):
(shared_libraries, shared_v8_libraries) = searh_files(
shared_src, SHARED_LIBRARY_SUFFIX, EXCLUDE_SHARED_LIBRARIES)
(static_libraries, static_v8_libraries) = searh_files(
static_src, STATIC_LIBRARY_SUFFIX, EXCLUDE_STATIC_LIBRARIES)
content = GYPI_TEMPLATE % {
'src': repr(os.path.abspath(os.path.dirname(target_file))),
'shared_libraries': shared_libraries,
'shared_v8_libraries': shared_v8_libraries,
'static_libraries': static_libraries,
'static_v8_libraries': static_v8_libraries,
}
with open(target_file, 'wb+') as f:
f.write(content)
def searh_files(src, suffix, exclude):
files = glob.glob(os.path.join(src, '*.' + suffix))
files = [f for f in files if os.path.basename(f) not in exclude]
return ([os.path.abspath(f) for f in files if not is_v8_library(f)],
[os.path.abspath(f) for f in files if is_v8_library(f)])
def is_v8_library(p):
return (os.path.basename(p).startswith(('v8', 'libv8')) or
os.path.basename(p).startswith(('icu', 'libicu')))
if __name__ == '__main__':
sys.exit(main(sys.argv[1], sys.argv[2], sys.argv[3]))
| mit |
boryszef/moltools-python | tests/test_trajectory_xyz.py | 1 | 9849 | import unittest
import tempfile
import random
import os
import stat
import numpy
import mdarray as mt
# Return random string of spaces and tabs
def rndb():
blanks = [' ', '\t']
l = [ random.choice(blanks) for i in range(random.randint(0, 5)) ]
random.shuffle(l)
return "".join(l)
atomicMasses = {
'H':(1,1.008), 'C':(6,12.011), 'N':(7,14.007), 'O':(8,15.999),
'P':(15,30.973762), 'S':(16,32.06), 'Cl':(17,35.45), 'Na':(11,22.98976928),
'Cu':(29,63.546), 'Fe':(26,55.845) }
characters = "".join([chr(x) for x in range(ord('A'), ord('z')+1)])
class TestTrajectoryXYZ(unittest.TestCase):
def setUp(self):
# Create tempdir for XYZ files
self.tmpDir = tempfile.mkdtemp()
# Keep for comparison
self.data = []
# Write ten, very messy xyz files
self.nFiles = 10
for i in range(self.nFiles):
structure = {}
nAtoms = random.randint(1, 20)
structure['nAtoms'] = nAtoms
nFrames = random.randint(1, 10)
structure['nFrames'] = nFrames
xyz = open("%s/%d.xyz" % (self.tmpDir, i), "w")
asym = list(atomicMasses.keys())
symbols = [random.choice(asym) for x in range(nAtoms)]
structure['symbols'] = symbols[:]
structure['coordinates'] = []
structure['comments'] = []
for f in range(nFrames):
xyz.write("%s%d%s\n" % (rndb(), nAtoms, rndb()))
comLen = random.randint(0, 100)
comment = "".join([random.choice(characters) for x in range(comLen)])
structure['comments'].append(comment[:])
xyz.write("%s\n" % comment)
crd = numpy.random.uniform(-100, 100, (nAtoms, 3))
structure['coordinates'].append(crd.copy())
for a in range(nAtoms):
xyz.write("%s%s%s %.6f%s %.6f%s %.6f%s\n" % (rndb(),
symbols[a], rndb(), crd[a,0], rndb(), crd[a,1], rndb(),
crd[a,2], rndb()))
xyz.close()
self.data.append(structure)
# Use various units
nAtoms = 10
units = { "angs" : 1, "bohr" : 0.529177209, "nm" : 10 }
self.units_crd = numpy.random.uniform(-10, 10, (nAtoms, 3))
for u,f in units.items():
xyz = open("%s/%s.xyz" % (self.tmpDir, u), "w")
xyz.write("%d\n\n" % nAtoms)
for a in range(nAtoms):
xyz.write("He %.7f %.7f %.7f\n" % tuple(self.units_crd[a,:]/f))
xyz.close()
# Write file with extra data
nAtoms = 10
crd = numpy.random.uniform(-10, 10, (nAtoms, 4))
self.extra_data = crd[:,3]
xyz = open("%s/extra.xyz" % self.tmpDir, "w")
xyz.write("%d\n\n" % nAtoms)
for a in range(nAtoms):
xyz.write("Ar %.3f %.3f %.3f %.6f\n" % tuple(crd[a,:]))
xyz.close()
def tearDown(self):
# Remove files
for f in os.listdir(self.tmpDir):
if not f.startswith("."): os.remove(self.tmpDir+"/"+f)
# Remove directory
os.rmdir(self.tmpDir)
def test_initResult(self):
for i in range(self.nFiles):
absolute = "%s/%d.xyz" % (self.tmpDir, i)
traj = mt.Trajectory(absolute)
self.assertEqual(traj.fileName, absolute)
self.assertEqual(traj.nAtoms, self.data[i]['nAtoms'])
symbols = self.data[i]['symbols']
for a in range(self.data[i]['nAtoms']):
self.assertEqual(traj.symbols[a], symbols[a])
m = atomicMasses[symbols[a]]
self.assertAlmostEqual(traj.masses[a], m[1])
self.assertEqual(traj.aNumbers[a], m[0])
def test_readXYZ(self):
for i in range(self.nFiles):
absolute = "%s/%d.xyz" % (self.tmpDir, i)
traj = mt.Trajectory(absolute)
frameNo = 0
frame = traj.read()
while frame:
diff = frame['coordinates'] - self.data[i]['coordinates'][frameNo]
maxDiff = numpy.max(numpy.abs(diff))
self.assertTrue(maxDiff <= 1e-6)
comment = frame['comment']
self.assertEqual(comment, self.data[i]['comments'][frameNo])
frameNo += 1
frame = traj.read()
self.assertEqual(frameNo, len(self.data[i]['coordinates']))
def test_Units(self):
for u in ["angs", "bohr", "nm"]:
absolute = "%s/%s.xyz" % (self.tmpDir, u)
traj = mt.Trajectory(absolute, units=u)
frame = traj.read()
diff = frame['coordinates'] - self.units_crd
maxDiff = numpy.max(numpy.abs(diff))
self.assertTrue(maxDiff <= 1e-6)
def test_initExceptions(self):
self.assertRaises(TypeError, mt.Trajectory)
self.assertRaises(FileNotFoundError, mt.Trajectory, 'nonexistent.xyz')
denied = self.tmpDir+"/denied.xyz"
open(denied, "w").close()
os.chmod(denied, 0)
self.assertRaises(PermissionError, mt.Trajectory, denied)
os.chmod(denied, stat.S_IRWXU)
xyz = self.tmpDir+"/angs.xyz"
# Wrong format
self.assertRaises(ValueError, mt.Trajectory, xyz, format='fmt')
# Wrong mode
self.assertRaises(ValueError, mt.Trajectory, xyz, 'mode')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='mode')
# Wrong units
self.assertRaises(ValueError, mt.Trajectory, xyz, units='units')
# Symbols in 'read' mode
self.assertRaises(ValueError, mt.Trajectory, xyz, 'r', [])
self.assertRaises(ValueError, mt.Trajectory, xyz, 'r', symbols=[])
# 'append' mode but no symbols
self.assertRaises(ValueError, mt.Trajectory, xyz, 'a')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='a')
empty = self.tmpDir+"/empty"
open(empty, "w").close()
# Empty file and no format given
self.assertRaises(OSError, mt.Trajectory, empty)
nonempty = self.tmpDir+"/nonempty"
f = open(nonempty, "w")
f.write("x\n")
f.close()
# Guessing format fails
self.assertRaises(RuntimeError, mt.Trajectory, nonempty)
self.assertRaises(RuntimeError, mt.Trajectory, nonempty, 'a', [])
# Writing to an existing file
self.assertRaises(FileExistsError, mt.Trajectory, nonempty, 'w', [], format='XYZ')
# Wrong format
self.assertRaises(ValueError, mt.Trajectory, nonempty, 'a', [], format='fmt')
# Wrong units
self.assertRaises(ValueError, mt.Trajectory, nonempty, 'a', [], format='XYZ', units='units')
def test_WriteXYZ(self):
sym = ['C']
comment = 'blah'
crd = [1.1111111111,2,3]
for dtype in [ numpy.float16, numpy.float32,
numpy.float64, numpy.float128 ]:
x = numpy.array([crd], dtype=dtype)
xyz = self.tmpDir+"/out.xyz"
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ', units='angs')
traj.write(x, comment=comment)
del traj
traj = mt.Trajectory(xyz)
self.assertEqual(traj.nAtoms, 1)
self.assertEqual(traj.symbols, sym)
frame = traj.read()
self.assertEqual(frame['comment'], comment)
diff = crd - frame['coordinates'][0,:]
maxDiff = numpy.max(numpy.abs(diff))
# 1e-8 is the precision of write_frame_to_xyz due to the format % 12.8f
epsilon = max(1e-8, numpy.finfo(dtype).resolution)
self.assertTrue(maxDiff <= epsilon)
# Try (and fail) to read more frames
frame = traj.read()
self.assertIsNone(frame)
os.remove(xyz)
def test_writeExceptions(self):
sym = ['C']
comment = 'blah'
crd = [1,2,3]
xyz = self.tmpDir+"/out.xyz"
# No symbols given
self.assertRaises(ValueError, mt.Trajectory, xyz, 'w', format='XYZ')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='w', format='XYZ')
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ')
# Wrong shape of the matrix
x = numpy.array([], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
x = numpy.array([[[]]], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
# Wrong dimensions of the matrix
x = numpy.array([crd, crd], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
# Try to read in write mode
self.assertRaises(RuntimeError, traj.read)
del traj
def test_writeMultiple(self):
nFrames = 10
sym = ['C', 'O']
comment = 'blah'
crd = [[1,2,3], [4,5,6]]
x = numpy.array(crd, dtype=numpy.float)
xyz = self.tmpDir+"/out.xyz"
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ', units='angs')
for i in range(nFrames):
traj.write(x, comment=str(i))
del traj
traj = mt.Trajectory(xyz)
count = 0
frame = traj.read()
while frame:
self.assertEqual(frame['comment'], str(count))
count += 1
frame = traj.read()
self.assertEqual(traj.lastFrame, nFrames-1)
def test_readExtraData(self):
traj = mt.Trajectory(self.tmpDir+"/extra.xyz")
frame = traj.read()
self.assertTrue('extra' in frame)
diff = frame['extra'] - self.extra_data
maxDiff = numpy.max(numpy.abs(diff))
epsilon = max(1e-6, numpy.finfo(numpy.float).resolution)
self.assertTrue(maxDiff <= epsilon)
| gpl-3.0 |
inveniosoftware/invenio-upgrader | invenio_upgrader/upgrades/invenio_upgrader_2015_09_23_legacy_removal.py | 3 | 5314 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Remove legacy upgrade recipes."""
import warnings
from invenio_db import db
from sqlalchemy.sql import text
from invenio_upgrader import UpgradeBase, op
class LegacyRemoval(UpgradeBase):
"""Remove legacy upgrade recipes."""
_depends_on = []
legacy_upgrades = [
'invenio_2012_10_29_idxINDEX_new_indexer_column',
'invenio_2012_10_31_WebAuthorProfile_bibformat_dependency_update',
'invenio_2012_10_31_tablesorter_location',
'invenio_2012_11_01_lower_user_email',
'invenio_2012_11_04_circulation_and_linkback_updates',
'invenio_2012_11_07_xtrjob_last_recid',
'invenio_2012_11_15_bibdocfile_model',
'invenio_2012_11_15_hstRECORD_marcxml_longblob',
'invenio_2012_11_21_aiduserinputlog_userid_check',
'invenio_2012_11_27_new_selfcite_tables',
'invenio_2012_12_05_oaiHARVEST_arguments_blob',
'invenio_2012_12_06_new_citation_dict_table',
'invenio_2012_12_11_new_citation_errors_table',
'invenio_2013_01_08_new_goto_table',
'invenio_2013_01_12_bibrec_master_format',
'invenio_2013_02_01_oaiREPOSITORY_last_updated',
'invenio_2013_02_06_new_collectionboxname_table',
'invenio_2013_03_07_crcILLREQUEST_overdue_letter',
'invenio_2013_03_18_aidPERSONIDDATA_last_updated',
'invenio_2013_03_18_bibauthorid_search_engine_tables',
'invenio_2013_03_18_wapCACHE_object_value_longblob',
'invenio_2013_03_20_idxINDEX_synonym_kb',
'invenio_2013_03_20_new_self_citation_dict_table',
'invenio_2013_03_21_idxINDEX_stopwords',
'invenio_2013_03_25_idxINDEX_html_markup',
'invenio_2013_03_26_new_citation_log_table',
'invenio_2013_03_28_bibindex_bibrank_type_index',
'invenio_2013_03_28_idxINDEX_tokenizer',
'invenio_2013_03_29_idxINDEX_stopwords_update',
'invenio_2013_04_11_bibformat_2nd_pass',
'invenio_2013_04_30_new_plotextractor_websubmit_function',
'invenio_2013_06_11_rnkDOWNLOADS_file_format',
'invenio_2013_06_20_new_bibcheck_rules_table',
'invenio_2013_06_24_new_bibsched_status_table',
'invenio_2013_08_20_bibauthority_updates',
'invenio_2013_08_22_hstRECORD_affected_fields',
'invenio_2013_08_22_new_index_itemcount',
'invenio_2013_09_02_new_bibARXIVPDF',
'invenio_2013_09_10_new_param_websubmit_function',
'invenio_2013_09_13_new_bibEDITCACHE',
'invenio_2013_09_16_aidPERSONIDDATA_datablob',
'invenio_2013_09_25_virtual_indexes',
'invenio_2013_09_26_webauthorlist',
'invenio_2013_09_30_indexer_interface',
'invenio_2013_10_11_bibHOLDINGPEN_longblob',
'invenio_2013_10_18_crcLIBRARY_type',
'invenio_2013_10_18_new_index_filetype',
'invenio_2013_10_25_delete_recjson_cache',
'invenio_2013_10_25_new_param_websubmit_function',
'invenio_2013_11_12_new_param_websubmit_function',
'invenio_2013_12_04_seqSTORE_larger_value',
'invenio_2013_12_05_new_index_doi',
'invenio_2014_01_22_queue_table_virtual_index',
'invenio_2014_01_22_redis_sessions',
'invenio_2014_01_24_seqSTORE_larger_value',
'invenio_2014_03_13_new_index_filename',
'invenio_2014_06_02_oaiHARVEST_arguments_cfg_namechange',
'invenio_2014_08_12_format_code_varchar20',
'invenio_2014_08_13_tag_recjsonvalue',
'invenio_2014_08_31_next_collection_tree',
'invenio_2014_09_09_tag_recjsonvalue_not_null',
'invenio_2014_11_04_format_recjson',
'invenio_2015_01_13_hide_holdings',
'invenio_2015_03_03_tag_value',
'invenio_2015_04_15_collection_names_with_slashes',
'invenio_release_1_1_0',
'invenio_release_1_2_0',
'submit_2015_03_03_fix_models',
'submit_2015_04_30_fix_models_sbmFORMATEXTENSION_sbmGFILERESULT',
'invenio_2015_07_14_innodb',
]
def do_upgrade(self):
"""Implement your upgrades here."""
sql = text('delete from upgrade where upgrade = :upgrade')
for upgrade in self.legacy_upgrades:
db.engine.execute(sql, upgrade=upgrade)
def pre_upgrade(self):
"""Run pre-upgrade checks (optional)."""
sql = text('select 1 from upgrade where upgrade = :upgrade')
for upgrade in self.legacy_upgrades:
if not db.engine.execute(sql, upgrade=upgrade).fetchall():
warnings.warn("Upgrade '{}' was not applied.".format(upgrade))
| gpl-2.0 |
Naeka/vosae-app | www/core/views.py | 1 | 4175 | # -*- coding:Utf-8 -*-
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader
from django.views.generic import TemplateView
from django.utils.formats import get_format
from django.conf import settings
from django.http import (
HttpResponseRedirect,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
Http404
)
import urllib2
from core.models import (
Tenant,
VosaeUser,
VosaeFile
)
def error_403(request):
t = loader.get_template('core/errors/403.html')
return HttpResponseForbidden(t.render(RequestContext(request)))
def error_404(request):
t = loader.get_template('core/errors/404.html')
return HttpResponseNotFound(t.render(RequestContext(request)))
def error_500(request):
t = loader.get_template('core/errors/500.html')
return HttpResponseServerError(t.render(RequestContext(request)))
def root(request):
if not request.user.is_authenticated():
return redirect('signin')
return HttpResponseRedirect(settings.WEB_ENDPOINT)
def download_file(request, tenant_slug, file_id, public_token=None, stream=False):
try:
request.tenant = Tenant.objects.get(slug=tenant_slug)
if not request.user.is_anonymous():
request.user.groups.get(name=tenant_slug)
request.vosae_user = VosaeUser.objects.get(tenant=request.tenant, email=request.user.email)
else:
# If anonymous user, public_token is required
assert public_token is not None
except:
raise Http404()
if request.vosae_user and not request.vosae_user.has_perm("see_vosaefile"):
return HttpResponseForbidden()
try:
kwargs = {'tenant': request.tenant, 'id': file_id}
if public_token:
kwargs.update(public_token=public_token)
vosae_file = VosaeFile.objects.get(**kwargs)
if not public_token:
# Since public token is provided, file is assumed as publicly accessible
# and authorization checks are useless.
for perm in vosae_file.permissions:
if not request.vosae_user.has_perm(perm):
return HttpResponseForbidden()
response_headers = {'response-content-disposition': 'inline; filename="{0}"'.format(str(urllib2.quote(vosae_file.name.encode('utf8'))))}
s3url = vosae_file.file.key.generate_url(20, response_headers=response_headers if stream else None)
return HttpResponseRedirect(s3url)
except:
raise Http404()
def spec(request):
# During specs tests we use local static files (even during Travis builds)
original_settings_staticfiles_storage = settings.STATICFILES_STORAGE
settings.STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# Force the auth user email during specs
request.user.email = "[email protected]"
formats = {
# Defaults
"DECIMAL_SEPARATOR": get_format("DECIMAL_SEPARATOR"),
"THOUSAND_SEPARATOR": get_format("THOUSAND_SEPARATOR"),
"NUMBER_GROUPING": get_format("NUMBER_GROUPING"),
# Custom
"MON_DECIMAL_POINT": get_format("MON_DECIMAL_POINT"),
"MON_THOUSANDS_SEP": get_format("MON_THOUSANDS_SEP"),
"N_CS_PRECEDES": get_format("N_CS_PRECEDES"),
"P_CS_PRECEDES": get_format("P_CS_PRECEDES"),
"N_SEP_BY_SPACE": get_format("N_SEP_BY_SPACE"),
"P_SEP_BY_SPACE": get_format("P_SEP_BY_SPACE"),
"N_SIGN_POSN": get_format("N_SIGN_POSN"),
"P_SIGN_POSN": get_format("P_SIGN_POSN"),
"NEGATIVE_SIGN": get_format("NEGATIVE_SIGN"),
"POSITIVE_SIGN": get_format("POSITIVE_SIGN"),
}
response = render_to_response("core/spec.html", {'formats': formats}, context_instance=RequestContext(request))
settings.STATICFILES_STORAGE = original_settings_staticfiles_storage
return response
class TextTemplateView(TemplateView):
def render_to_response(self, context, **response_kwargs):
response_kwargs['content_type'] = 'text/plain'
return super(TemplateView, self).render_to_response(context, **response_kwargs)
| agpl-3.0 |
olituks/sentinella | frontend/library/web2py/applications/admin/languages/pt-br.py | 17 | 18517 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português Brasileiro',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novo_valor\'". Não é permitido atualizar ou apagar resultados de um JOIN',
'%s %%{row} deleted': '%s registros apagados',
'%s %%{row} updated': '%s registros atualizados',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(requires internet access, experimental)': '(requer acesso à internet, experimental)',
'(something like "it-it")': '(algo como "it-it")',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Searching: **%s** %%{file}': 'Buscando: **%s** arquivos',
'A new version of web2py is available': 'Está disponível uma nova versão do web2py',
'A new version of web2py is available: %s': 'Está disponível uma nova versão do web2py: %s',
'About': 'sobre',
'About application': 'Sobre a aplicação',
'additional code for your application': 'código adicional para sua aplicação',
'Additional code for your application': 'Código adicional para sua aplicação',
'admin disabled because no admin password': ' admin desabilitado por falta de senha definida',
'admin disabled because not supported on google app engine': 'admin desabilitado porque não é suportado no GAE',
'admin disabled because unable to access password file': 'admin desabilitado porque não foi possível ler o arquivo de senha',
'Admin is disabled because insecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin is disabled because unsecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin language': 'Idioma do Admin',
'administrative interface': 'interface administrativa',
'Administrator Password:': 'Senha de administrador:',
'and rename it (required):': 'e renomeie (requerido):',
'and rename it:': ' e renomeie:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin desabilitado porque o canal não é seguro',
'application "%s" uninstalled': 'aplicação "%s" desinstalada',
'application compiled': 'aplicação compilada',
'application is compiled and cannot be designed': 'A aplicação está compilada e não pode ser modificada',
'Application name:': 'Nome da aplicação:',
'Are you sure you want to delete file "%s"?': 'Tem certeza que deseja apagar o arquivo "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Tem certeza que deseja apagar o plugin "%s"?',
'Are you sure you want to delete this object?': 'Tem certeza que deseja apagar esse objeto?',
'Are you sure you want to uninstall application "%s"': 'Tem certeza que deseja desinstalar a aplicação "%s"?',
'Are you sure you want to uninstall application "%s"?': 'Tem certeza que deseja desinstalar a aplicação "%s"?',
'Are you sure you want to upgrade web2py now?': 'Tem certeza que deseja atualizar o web2py agora?',
'arguments': 'argumentos',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENÇÃO: o login requer uma conexão segura (HTTPS) ou executar de localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENÇÃO OS TESTES NÃO SÃO THREAD SAFE, NÃO EFETUE MÚLTIPLOS TESTES AO MESMO TEMPO.',
'ATTENTION: you cannot edit the running application!': 'ATENÇÃO: Não pode modificar a aplicação em execução!',
'Available databases and tables': 'Bancos de dados e tabelas disponíveis',
'back': 'voltar',
'Basics': 'Informações básicas',
'Begin': 'Iniciar',
'browse': 'navegar',
'cache': 'cache',
'cache, errors and sessions cleaned': 'cache, erros e sessões eliminadas',
'can be a git repo': 'pode ser um repositório git',
'Cannot be empty': 'Não pode ser vazio',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Não é possível compilar: Existem erros em sua aplicação. Depure, corrija os errros e tente novamente',
'Cannot compile: there are errors in your app:': 'Não é possível compilar: Existem erros em sua aplicação',
'cannot create file': 'Não é possível criar o arquivo',
'cannot upload file "%(filename)s"': 'não é possível fazer upload do arquivo "%(filename)s"',
'Change admin password': 'mudar senha de administrador',
'Change Password': 'Mudar Senha',
'check all': 'marcar todos',
'Check for upgrades': 'Verificar se existem atualizações',
'Check to delete': 'Marque para apagar',
'Checking for upgrades...': 'Buscando atualizações...',
'Clean': 'Limpo',
'click here for online examples': 'clique para ver exemplos online',
'click here for the administrative interface': 'Clique aqui para acessar a interface administrativa',
'Click row to expand traceback': 'Clique na linha para expandir o traceback',
'click to check for upgrades': 'clique aqui para verificar se existem atualizações',
'click to open': 'clique para abrir',
'Client IP': 'IP do cliente',
'code': 'código',
'collapse/expand all': 'fechar/abrir todos',
'commit (mercurial)': 'commit (mercurial)',
'Compile': 'Compilar',
'compiled application removed': 'a aplicação compilada foi removida',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Count': 'Contagem',
'Create': 'Criar',
'create file with filename:': 'criar um arquivo com o nome:',
'Create new application using the Wizard': 'Criar nova aplicação utilizando o Assistente',
'create new application:': 'nome da nova aplicação:',
'Create new simple application': 'Crie uma nova aplicação',
'created by': 'criado por',
'crontab': 'crontab',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'currently running': 'Executando',
'currently saved or': 'Atualmente salvo ou',
'customize me!': 'Modifique-me!',
'data uploaded': 'Dados enviados',
'database': 'banco de dados',
'database %s select': 'Select no banco de dados %s',
'database administration': 'administração do banco de dados',
'Date and Time': 'Data e Hora',
'db': 'db',
'Debug': 'Debug',
'defines tables': 'define as tabelas',
'Delete': 'Apague',
'delete': 'apagar',
'delete all checked': 'apagar marcados',
'delete plugin': 'apagar plugin',
'Delete:': 'Apague:',
'Deploy': 'Publicar',
'Deploy on Google App Engine': 'Publicar no Google App Engine',
'Deploy to OpenShift': 'Publicar no OpenShift',
'Description': 'Descrição',
'design': 'projeto',
'DESIGN': 'Projeto',
'Design for': 'Projeto de',
'Detailed traceback description': 'Descrição detalhada do traceback',
'direction: ltr': 'direção: ltr',
'Disable': 'Desabilitar',
'done!': 'feito!',
'Download .w2p': 'Download .w2p',
'download layouts': 'download de layouts',
'download plugins': 'download de plugins',
'E-mail': 'E-mail',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'edit all': 'editar todos',
'Edit application': 'Editar aplicação',
'edit controller': 'editar controlador',
'Edit current record': 'Editar o registro atual',
'Edit Profile': 'Editar Perfil',
'edit views:': 'editar visões:',
'Editing file': 'Editando arquivo',
'Editing file "%s"': 'Editando arquivo "%s"',
'Editing Language file': 'Editando arquivo de idioma',
'Enterprise Web Framework': 'Framework Web Corporativo',
'Error': 'Erro',
'Error logs for "%(app)s"': 'Logs de erro para "%(app)s"',
'Error snapshot': 'Momento do Erro',
'Error ticket': 'Tiquete de Erro',
'Errors': 'Erros',
'Exception instance attributes': 'Atributos de instância da Exception',
'export as csv file': 'exportar como arquivo CSV',
'exposes': 'expõe',
'extends': 'estende',
'failed to reload module': 'Falha ao recarregar o módulo',
'failed to reload module because:': 'falha ao recarregar o módulo porque:',
'File': 'Arquivo',
'file "%(filename)s" created': 'arquivo "%(filename)s" criado',
'file "%(filename)s" deleted': 'arquivo "%(filename)s" apagado',
'file "%(filename)s" uploaded': 'arquivo "%(filename)s" enviado',
'file "%(filename)s" was not deleted': 'arquivo "%(filename)s" não foi apagado',
'file "%s" of %s restored': 'arquivo "%s" de %s restaurado',
'file changed on disk': 'arquivo modificado no disco',
'file does not exist': 'arquivo não existe',
'file saved on %(time)s': 'arquivo salvo em %(time)s',
'file saved on %s': 'arquivo salvo em %s',
'filter': 'filtro',
'First name': 'Nome',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'Funções sem doctests resultarão em testes [aceitos].',
'Generate': 'Gerar',
'go!': 'vai!',
'Group ID': 'ID do Grupo',
'Hello World': 'Olá Mundo',
'Help': 'Ajuda',
'htmledit': 'htmledit',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Se o relatório acima contém um número de ticket, isso indica uma falha no controlador em execução, antes de tentar executar os doctests. Isto acontece geralmente por erro de identação ou um erro fora do código da função.\nO título em verde indica que os testes (se definidos) passaram. Neste caso o resultado dos testes não são mostrados.',
'Import/Export': 'Importar/Exportar',
'includes': 'inclui',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'inspect attributes': 'inspeciona atributos',
'Install': 'instalar',
'Installed applications': 'Aplicações instaladas',
'internal error': 'erro interno',
'Internal State': 'Estado Interno',
'Invalid action': 'Ação inválida',
'Invalid email': 'E-mail inválido',
'invalid password': 'senha inválida',
'Invalid Query': 'Consulta inválida',
'invalid request': 'solicitação inválida',
'invalid ticket': 'ticket inválido',
'language file "%(filename)s" created/updated': 'arquivo de idioma "%(filename)s" criado/atualizado',
'Language files (static strings) updated': 'Arquivos de idioma (textos estáticos) atualizados',
'languages': 'idiomas',
'Languages': 'Idiomas',
'languages updated': 'idiomas atualizados',
'Last name': 'Sobrenome',
'Last saved on:': 'Salvo pela última vez em:',
'License for': 'Licença para',
'loading...': 'carregando...',
'locals': 'locals',
'Login': 'Entrar',
'login': 'início de sessão',
'Login to the Administrative Interface': 'Entrar na interface adminitrativa',
'Logout': 'finalizar sessão',
'Lost Password': 'Perdi a senha',
'Manage': 'Gerenciar',
'manage': 'gerenciar',
'merge': 'juntar',
'Models': 'Modelos',
'models': 'modelos',
'Modules': 'Módulos',
'modules': 'módulos',
'Name': 'Nome',
'new application "%s" created': 'nova aplicação "%s" criada',
'New Application Wizard': 'Assistente para novas aplicações ',
'New application wizard': 'Assistente para novas aplicações',
'new plugin installed': 'novo plugin instalado',
'New Record': 'Novo registro',
'new record inserted': 'novo registro inserido',
'New simple application': 'Nova aplicação básica',
'next 100 rows': 'próximos 100 registros',
'NO': 'NÃO',
'No databases in this application': 'Não existem bancos de dados nesta aplicação',
'no match': 'não encontrado',
'no package selected': 'nenhum pacote selecionado',
'Or Get from URL:': 'Ou baixa da URL:',
'or import from csv file': 'ou importar de um arquivo CSV',
'or provide app url:': 'ou forneça a url de uma aplicação:',
'or provide application url:': 'ou forneça a url de uma aplicação:',
'Origin': 'Origem',
'Original/Translation': 'Original/Tradução',
'Overwrite installed app': 'Sobrescrever aplicação instalada',
'Pack all': 'Criar pacote',
'Pack compiled': 'Criar pacote compilado',
'Pack custom': 'Customizar pacote',
'pack plugin': 'empacotar plugin',
'PAM authenticated user, cannot change password here': 'usuário autenticado por PAM não pode alterar a senha aqui',
'Password': 'Senha',
'password changed': 'senha alterada',
'Peeking at file': 'Visualizando arquivo',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" apagado',
'Plugin "%s" in application': 'Plugin "%s" na aplicação',
'plugins': 'plugins',
'Plugins': 'Plugins',
'Powered by': 'Este site utiliza',
'previous 100 rows': '100 registros anteriores',
'Query:': 'Consulta:',
'record': 'registro',
'record does not exist': 'o registro não existe',
'record id': 'id do registro',
'Record ID': 'ID do Registro',
'Register': 'Registrar-se',
'Registration key': 'Chave de registro',
'Reload routes': 'Recarregar routes',
'Remove compiled': 'Eliminar compilados',
'request': 'request',
'Resolve Conflict file': 'Arquivo de resolução de conflito',
'response': 'response',
'restart': 'reiniciar',
'restore': 'restaurar',
'revert': 'reverter',
'Role': 'Papel',
'Rows in table': 'Registros na tabela',
'Rows selected': 'Registros selecionados',
'Running on %s': 'Rodando em %s',
'save': 'salvar',
'Saved file hash:': 'Hash do arquivo salvo:',
'Select Files to Package': 'Selecione arquivos para empacotar',
'selected': 'selecionado(s)',
'session': 'session',
'session expired': 'sessão expirada',
'shell': 'Terminal',
'Site': 'Site',
'skip to generate': 'pular para a gerar a aplicação',
'some files could not be removed': 'alguns arquivos não puderam ser removidos',
'Start a new app': 'Inicie uma nova aplicação',
'Start wizard': 'Iniciar assistente',
'state': 'estado',
'static': 'estáticos',
'Static files': 'Arquivos estáticos',
'Step': 'Passo',
'Submit': 'Enviar',
'submit': 'enviar',
'Sure you want to delete this object?': 'Tem certeza que deseja apagar este objeto?',
'table': 'tabela',
'Table name': 'Nome da tabela',
'test': 'testar',
'Testing application': 'Testando a aplicação',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'A "consulta" é uma condição como "db.tabela.campo1==\'valor\'". Algo como "db.tabela1.campo1==db.tabela2.campo2" resulta em um JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'A lógica da aplicação, cada URL é mapeada para uma função exposta pelo controlador',
'The application logic, each URL path is mapped in one exposed function in the controller': 'A lógica da aplicação, cada URL é mapeada para uma função exposta pelo controlador',
'the data representation, define database tables and sets': 'A representação dos dados, define tabelas do banco de dados e conjuntos',
'The data representation, define database tables and sets': 'A representação dos dados, define tabelas do banco de dados e conjuntos',
'The presentations layer, views are also known as templates': 'A camada de apresentação, as visões também são chamadas de templates',
'the presentations layer, views are also known as templates': 'A camada de apresentação, as visões também são chamadas de templates',
'There are no controllers': 'Não existem controladores',
'There are no models': 'Não existem modelos',
'There are no modules': 'Não existem módulos',
'There are no plugins': 'Não existem plugins',
'There are no static files': 'Não existem arquicos estáticos',
'There are no translators, only default language is supported': 'Não há tradutores, somente a linguagem padrão é suportada',
'There are no views': 'Não existem visões',
'These files are served without processing, your images go here': 'Estes arquivos são servidos sem processamento, suas imagens ficam aqui',
'these files are served without processing, your images go here': 'Estes arquivos são servidos sem processamento, suas imagens ficam aqui',
'This is the %(filename)s template': 'Este é o template %(filename)s',
'Ticket': 'Ticket',
'Ticket ID': 'Número do Ticket',
'Timestamp': 'Momento de geração',
'TM': 'MR',
'to previous version.': 'para a versão anterior.',
'To create a plugin, name a file/folder plugin_[name]': 'Para criar um plugin, nomeie um arquivo/pasta como plugin_[nome]',
'Traceback': 'Traceback',
'translation strings for the application': 'textos traduzidos para a aplicação',
'Translation strings for the application': 'textos traduzidos para a aplicação',
'try': 'tente',
'try something like': 'tente algo como',
'Try the mobile interface': 'Experimente a interface para smartphones e tablets',
'Unable to check for upgrades': 'Não é possível checar as atualizações',
'unable to create application "%s"': 'não é possível criar a aplicação "%s"',
'unable to delete file "%(filename)s"': 'não é possível criar o arquivo "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'não é possível criar o plugin "%(plugin)s"',
'Unable to download': 'Não é possível efetuar o download',
'Unable to download app': 'Não é possível baixar a aplicação',
'Unable to download app because:': 'Não é possível baixar a aplicação porque:',
'Unable to download because': 'Não é possível baixar porque',
'unable to parse csv file': 'não é possível analisar o arquivo CSV',
'unable to uninstall "%s"': 'não é possível desinstalar "%s"',
'unable to upgrade because "%s"': 'não é possível atualizar porque "%s"',
'uncheck all': 'desmarcar todos',
'Uninstall': 'Desinstalar',
'update': 'alterar',
'update all languages': 'alterar todos os idiomas',
'Update:': 'Alterar:',
'upgrade now to %s': 'Atualize agora para %s',
'upgrade web2py now': 'atualize o web2py agora',
'upload': 'upload',
'Upload a package:': 'Faça upload de um pacote:',
'Upload and install packed application': 'Faça upload e instale uma aplicação empacotada',
'upload application:': 'Fazer upload de uma aplicação:',
'Upload existing application': 'Faça upload de uma aplicação existente',
'upload file:': 'Enviar arquivo:',
'upload plugin file:': 'Enviar arquivo de plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT, para criar consultas mais complexas.',
'Use an url:': 'Use uma url:',
'User ID': 'ID do Usuário',
'variables': 'variáveis',
'Version': 'Versão',
'versioning': 'versionamento',
'Versioning': 'Versionamento',
'view': 'visão',
'Views': 'Visões',
'views': 'visões',
'Web Framework': 'Web Framework',
'web2py is up to date': 'web2py está atualizado',
'web2py Recent Tweets': 'Tweets Recentes de @web2py',
'web2py upgraded; please restart it': 'web2py atualizado; favor reiniciar',
'Welcome to web2py': 'Bem-vindo ao web2py',
'YES': 'SIM',
}
| lgpl-2.1 |
PyEatingContest/crispy-node | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py | 426 | 56534 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle': 'com.apple.product-type.application.watchapp',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit |
teonlamont/mne-python | mne/realtime/tests/test_stim_client_server.py | 2 | 2527 | import threading
import time
import pytest
from mne.realtime import StimServer, StimClient
from mne.externals.six.moves import queue
from mne.utils import requires_good_network, run_tests_if_main
_server = None
_have_put_in_trigger = False
_max_wait = 10.
@requires_good_network
def test_connection():
"""Test TCP/IP connection for StimServer <-> StimClient."""
global _server, _have_put_in_trigger
# have to start a thread to simulate the effect of two
# different computers since stim_server.start() is designed to
# be a blocking method
# use separate queues because timing matters
trig_queue1 = queue.Queue()
trig_queue2 = queue.Queue()
# start a thread to emulate 1st client
thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,))
thread1.daemon = True
# start another thread to emulate 2nd client
thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,))
thread2.daemon = True
thread1.start()
thread2.start()
with StimServer(port=4218, n_clients=2) as stim_server:
_server = stim_server
stim_server.start(timeout=10.0) # don't allow test to hang
# Add the trigger to the queue for both clients
stim_server.add_trigger(20)
_have_put_in_trigger = True # monkey patch
# the assert_equal must be in the test_connection() method
# Hence communication between threads is necessary
trig1 = trig_queue1.get(timeout=_max_wait)
trig2 = trig_queue2.get(timeout=_max_wait)
assert trig1 == 20
# test if both clients receive the same trigger
assert trig1 == trig2
# test timeout for stim_server
with StimServer(port=4218) as stim_server:
pytest.raises(StopIteration, stim_server.start, 0.1)
def _connect_client(trig_queue):
"""Instantiate the StimClient."""
# just wait till the main thread reaches stim_server.start()
t0 = time.time()
while (time.time() - t0 < _max_wait and
(_server is None or not _server._running)):
time.sleep(0.01)
assert _server is not None and _server._running
# instantiate StimClient
stim_client = StimClient('localhost', port=4218)
# wait for script to reach stim_server.add_trigger()
t0 = time.time()
while (time.time() - t0 < _max_wait and not _have_put_in_trigger):
time.sleep(0.01)
assert _have_put_in_trigger
trig_queue.put(stim_client.get_trigger())
stim_client.close()
run_tests_if_main()
| bsd-3-clause |
pchaigno/grreat | gui/api_object_renderers.py | 2 | 11656 | #!/usr/bin/env python
"""This module contains RESTful API renderers for AFF4 objects and RDFValues."""
import itertools
import numbers
import re
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import structs
class ApiObjectRenderer(object):
"""Baseclass for restful API objects rendering classes."""
__metaclass__ = registry.MetaclassRegistry
# API renderers can render RDFValues and AFF4Objects. Each renderer has
# to be bound either to a single AFF4 type or to a rdfvalue type.
aff4_type = None
rdfvalue_type = None
def __init__(self):
if self.aff4_type and self.rdfvalue_type:
raise ValueError("Can't have both aff4_type and rdfvalue_type set.")
if not self.aff4_type and not self.rdfvalue_type:
raise ValueError("Have to set either aff4_type or rdfvalue_type.")
_type_list_cache = {}
def GetTypeList(self, value):
try:
return ApiObjectRenderer._type_list_cache[value.__class__.__name__]
except KeyError:
type_list = [klass.__name__ for klass in value.__class__.__mro__]
ApiObjectRenderer._type_list_cache[value.__class__.__name__] = type_list
return type_list
def _ToPrimitive(self, value, request):
"""Function used to convert values to JSON-friendly data structure.
Args:
value: An value to convert. May be either a plain Python value,
an RDFValue or an Enum.
request: Request parameters dictionary.
Returns:
JSON-friendly data: a string, a number, a dict or an array.
"""
limit_lists = int(request.get("limit_lists", 0))
no_lists = request.get("no_lists", False)
# We use RenderObject (main function of this module) to render
# all AFF4 and RDF values.
if isinstance(value, rdfvalue.RDFValue):
return RenderObject(value, request)
# Repeated fields get converted to lists with each value
# being recursively converted with _ToPrimitive().
if isinstance(value, structs.RepeatedFieldHelper):
if no_lists:
return []
return list(self._ToPrimitive(v, request) for v in value)
# Plain dictionaries are converted to dictionaries with each value
# being recursively converted with _ToPrimitive().
elif isinstance(value, dict):
result = {}
for k, v in value.items():
if isinstance(v, structs.RepeatedFieldHelper):
if no_lists:
continue
if limit_lists and len(v) > 10:
result[k] = self._ToPrimitive(v[:limit_lists], request)
result[k + "_fetch_more_url"] = "to_be_implemented"
else:
result[k] = self._ToPrimitive(v, request)
else:
result[k] = self._ToPrimitive(v, request)
return result
# Enums are converted to strings representing the name of the value.
elif isinstance(value, structs.Enum):
return value.name
# Make sure string values are properly encoded, otherwise we may have
# problems with JSON-encoding them.
elif isinstance(value, basestring):
return utils.SmartUnicode(value)
# Numbers are returned as-is.
elif isinstance(value, numbers.Number):
return value
# Everything else is returned in as string.
else:
return utils.SmartUnicode(value)
def RenderObject(self, obj, request):
"""Renders given object as plain JSON-friendly data structure."""
raise NotImplementedError()
class RDFValueApiObjectRenderer(ApiObjectRenderer):
"""Renderer for a generic rdfvalue."""
rdfvalue_type = "RDFValue"
def RenderObject(self, value, request):
with_type_info = request.get("with_type_info", False)
result = value.SerializeToDataStore()
if isinstance(result, basestring):
result = utils.SmartUnicode(result)
if with_type_info:
result = dict(type=value.__class__.__name__,
mro=self.GetTypeList(value),
value=result,
age=value.age.AsSecondsFromEpoch())
return result
class RDFValueArrayApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for RDFValueArray."""
rdfvalue_type = "RDFValueArray"
def RenderObject(self, value, request):
return list(self._ToPrimitive(v, request) for v in value)
class FlowStateApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for FlowState."""
rdfvalue_type = "FlowState"
def RenderObject(self, value, request):
return self._ToPrimitive(value.data, request)
class DataBlobApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for DataBlob."""
rdfvalue_type = "DataBlob"
def RenderObject(self, value, request):
return self._ToPrimitive(value.GetValue(), request)
class EmbeddedRDFValueApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for EmbeddedRDFValue."""
rdfvalue_type = "EmbeddedRDFValue"
def RenderObject(self, value, request):
return self._ToPrimitive(value.payload, request)
class RDFProtoStructApiObjectRenderer(ApiObjectRenderer):
"""Renderer for RDFProtoStructs."""
rdfvalue_type = "RDFProtoStruct"
translator = {}
descriptors_cache = {}
def RenderObject(self, value, request):
with_type_info = request.get("with_type_info", False)
with_descriptors = request.get("with_descriptors", False)
result = self._ToPrimitive(value.AsDict(), request)
for key in result.keys():
if key in self.translator:
result[key] = self.translator[key](self, value, request)
# If type information is needed, converted value is placed in the
# resulting dictionary under the 'value' key.
if with_type_info:
result = dict(type=value.__class__.__name__,
mro=self.GetTypeList(value),
value=result,
age=value.age.AsSecondsFromEpoch())
if with_descriptors:
try:
descriptors, order = self.descriptors_cache[value.__class__.__name__]
except KeyError:
descriptors = {}
order = []
for descriptor, _ in value.ListFields():
order.append(descriptor.name)
descriptors[descriptor.name] = {
"friendly_name": descriptor.friendly_name,
"description": descriptor.description
}
self.descriptors_cache[value.__class__.__name__] = (descriptors,
order)
result["descriptors"] = descriptors
result["fields_order"] = order
return result
class GrrMessageApiObjectRenderer(RDFProtoStructApiObjectRenderer):
"""Renderer for GrrMessage objects."""
rdfvalue_type = "GrrMessage"
def RenderPayload(self, value, request):
return self._ToPrimitive(value.payload, request)
translator = dict(args=RenderPayload)
class AFF4ObjectApiObjectRenderer(ApiObjectRenderer):
"""Renderer for a generic AFF4 object."""
aff4_type = "AFF4Object"
def RenderObject(self, aff4_object, request):
"""Render given aff4 object into JSON-serializable data structure."""
with_type_info = request.get("with_type_info", False)
with_descriptors = request.get("with_descriptors", False)
attributes = {}
for attribute, values in aff4_object.synced_attributes.items():
attributes[attribute.predicate] = []
for value in values:
# This value is really a LazyDecoder() instance. We need to get at the
# real data here.
value = value.ToRDFValue()
if aff4_object.age_policy != aff4.NEWEST_TIME:
attributes[attribute.predicate].append(self._ToPrimitive(value,
request))
else:
attributes[attribute.predicate] = self._ToPrimitive(value, request)
result = dict(aff4_class=aff4_object.__class__.__name__,
urn=utils.SmartUnicode(aff4_object.urn),
attributes=attributes,
age_policy=aff4_object.age_policy)
if with_type_info and with_descriptors:
descriptors = {}
for attribute, _ in aff4_object.synced_attributes.items():
descriptors[attribute.predicate] = {
"description": attribute.description
}
result["descriptors"] = descriptors
return result
class RDFValueCollectionApiObjectRenderer(AFF4ObjectApiObjectRenderer):
"""Renderer for RDFValueCollections."""
aff4_type = "RDFValueCollection"
def RenderObject(self, aff4_object, request):
offset = int(request.get("offset", 0))
count = int(request.get("count", 10000))
with_total_count = request.get("with_total_count", False)
filter_value = request.get("filter", "")
if filter_value:
index = 0
items = []
for item in aff4_object.GenerateItems():
serialized_item = item.SerializeToString()
if re.search(re.escape(filter_value), serialized_item, re.I):
if index >= offset:
items.append(item)
index += 1
if len(items) >= count:
break
else:
items = itertools.islice(aff4_object.GenerateItems(),
offset, offset + count)
rendered_object = super(RDFValueCollectionApiObjectRenderer,
self).RenderObject(aff4_object, request)
rendered_object["offset"] = offset
rendered_object["items"] = [self._ToPrimitive(item, request)
for item in items]
if with_total_count:
if hasattr(aff4_object, "CalculateLength"):
total_count = aff4_object.CalculateLength()
else:
total_count = len(aff4_object)
rendered_object["total_count"] = total_count
return rendered_object
class VFSGRRClientApiObjectRenderer(AFF4ObjectApiObjectRenderer):
"""Renderer for VFSGRRClient objects."""
aff4_type = "VFSGRRClient"
def RenderObject(self, aff4_object, request):
rendered_object = super(VFSGRRClientApiObjectRenderer, self).RenderObject(
aff4_object, request)
rendered_object["summary"] = self._ToPrimitive(aff4_object.GetSummary(),
request)
return rendered_object
RENDERERS_CACHE = {}
def RenderObject(obj, request=None):
"""Handler for the /api/aff4 requests."""
if request is None:
request = {}
if isinstance(obj, aff4.AFF4Object):
is_aff4 = True
key = "aff4." + obj.__class__.__name__
elif isinstance(obj, rdfvalue.RDFValue):
is_aff4 = False
key = "rdfvalue." + obj.__class__.__name__
else:
raise ValueError("Can't render object that's neither AFF4Object nor "
"RDFValue: %s." % utils.SmartStr(obj))
try:
renderer_cls = RENDERERS_CACHE[key]
except KeyError:
candidates = []
for candidate in ApiObjectRenderer.classes.values():
if is_aff4 and candidate.aff4_type:
candidate_class = aff4.AFF4Object.classes[candidate.aff4_type]
elif candidate.rdfvalue_type:
candidate_class = rdfvalue.RDFValue.classes[candidate.rdfvalue_type]
else:
continue
if aff4.issubclass(obj.__class__, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError("No renderer found for object %s." %
obj.__class__.__name__)
candidates = sorted(candidates,
key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
RENDERERS_CACHE[key] = renderer_cls
api_renderer = renderer_cls()
rendered_data = api_renderer.RenderObject(obj, request)
return rendered_data
| apache-2.0 |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_insert.py | 115 | 8132 | from .. import fixtures, config
from ..config import requirements
from .. import exclusions
from ..assertions import eq_
from .. import engines
from sqlalchemy import Integer, String, select, literal_column, literal
from ..schema import Table, Column
class LastrowidTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
__requires__ = 'implements_get_lastrowid', 'autoincrement_insert'
__engine_options__ = {"implicit_returning": False}
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
def test_autoincrement_on_insert(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
# failed on pypy1.9 but seems to be OK on pypy 2.1
# @exclusions.fails_if(lambda: util.pypy,
# "lastrowid not maintained after "
# "connection close")
@requirements.dbapi_lastrowid
def test_native_lastrowid_autoinc(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
lastrowid = r.lastrowid
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
lastrowid, pk
)
class InsertBehaviorTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
Table('includes_defaults', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('x', Integer, default=5),
Column('y', Integer,
default=literal_column("2", type_=Integer) + literal(2)))
def test_autoclose_on_insert(self):
if requirements.returning.enabled:
engine = engines.testing_engine(
options={'implicit_returning': False})
else:
engine = config.db
r = engine.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.returning
def test_autoclose_on_insert_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.empty_inserts
def test_empty_insert(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
)
assert r._soft_closed
assert not r.closed
r = config.db.execute(
self.tables.autoinc_pk.select().
where(self.tables.autoinc_pk.c.id != None)
)
assert len(r.fetchall())
@requirements.insert_from_select
def test_insert_from_select(self):
table = self.tables.manual_pk
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
]
)
config.db.execute(
table.insert(inline=True).
from_select(("id", "data",),
select([table.c.id + 5, table.c.data]).
where(table.c.data.in_(["data2", "data3"]))
),
)
eq_(
config.db.execute(
select([table.c.data]).order_by(table.c.data)
).fetchall(),
[("data1", ), ("data2", ), ("data2", ),
("data3", ), ("data3", )]
)
@requirements.insert_from_select
def test_insert_from_select_with_defaults(self):
table = self.tables.includes_defaults
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
]
)
config.db.execute(
table.insert(inline=True).
from_select(("id", "data",),
select([table.c.id + 5, table.c.data]).
where(table.c.data.in_(["data2", "data3"]))
),
)
eq_(
config.db.execute(
select([table]).order_by(table.c.data, table.c.id)
).fetchall(),
[(1, 'data1', 5, 4), (2, 'data2', 5, 4),
(7, 'data2', 5, 4), (3, 'data3', 5, 4), (8, 'data3', 5, 4)]
)
class ReturningTest(fixtures.TablesTest):
run_create_tables = 'each'
__requires__ = 'returning', 'autoincrement_insert'
__backend__ = True
__engine_options__ = {"implicit_returning": True}
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
@requirements.fetch_rows_post_commit
def test_explicit_returning_pk_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
r = engine.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_explicit_returning_pk_no_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
with engine.begin() as conn:
r = conn.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_autoincrement_on_insert_implcit_returning(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
__all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest')
| mit |
svn2github/google-protobuf | python/google/protobuf/internal/reflection_test.py | 23 | 120400 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = '[email protected] (Will Robinson)'
import copy
import gc
import operator
import struct
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
def EndOfStream(self):
return self._pos == len(self._bytes)
class ReflectionTest(basetest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self):
message = unittest_pb2.TestAllTypes(optional_int32 = 12)
self.assertEquals(2, message.ByteSize())
message = unittest_pb2.TestAllTypes(
optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage())
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(repeated_int32 = [12])
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(
repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()])
self.assertEquals(3, message.ByteSize())
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testGetDefaultMessageAfterDisconnectingDefaultMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message')
del proto
del nested
# Force a garbage collect so that the underlying CMessages are freed along
# with the Messages they point to. This is to make sure we're not deleting
# default message instances.
gc.collect()
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
def testDisconnectingNestedMessageAfterSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
self.assertTrue(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertEqual(5, nested.bb)
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testDisconnectingNestedMessageBeforeGettingField(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
def testDisconnectingNestedMessageAfterMerge(self):
# This test exercises the code path that does not use ReleaseMessage().
# The underlying fear is that if we use ReleaseMessage() incorrectly,
# we will have memory leaks. It's hard to check that that doesn't happen,
# but at least we can exercise that code path to make sure it works.
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_nested_message.bb = 5
proto1.MergeFrom(proto2)
self.assertTrue(proto1.HasField('optional_nested_message'))
proto1.ClearField('optional_nested_message')
self.assertTrue(not proto1.HasField('optional_nested_message'))
def testDisconnectingLazyNestedMessage(self):
# This test exercises releasing a nested message that is lazy. This test
# only exercises real code in the C++ implementation as Python does not
# support lazy parsing, but the current C++ implementation results in
# memory corruption and a crash.
if api_implementation.Type() != 'python':
return
proto = unittest_pb2.TestAllTypes()
proto.optional_lazy_message.bb = 5
proto.ClearField('optional_lazy_message')
del proto
gc.collect()
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in xrange(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual(b'', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual(b'world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testClearRemovesChildren(self):
# Make sure there aren't any implementation bugs that are only partially
# clearing the message (which can happen in the more complex C++
# implementation which has parallel message lists).
proto = unittest_pb2.TestRequiredForeign()
for i in range(10):
proto.repeated_message.add()
proto2 = unittest_pb2.TestRequiredForeign()
proto.CopyFrom(proto2)
self.assertRaises(IndexError, lambda: proto.repeated_message[5])
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testIntegerTypes(self):
def TestGetAndDeserialize(field_name, value, expected_type):
proto = unittest_pb2.TestAllTypes()
setattr(proto, field_name, value)
self.assertTrue(isinstance(getattr(proto, field_name), expected_type))
proto2 = unittest_pb2.TestAllTypes()
proto2.ParseFromString(proto.SerializeToString())
self.assertTrue(isinstance(getattr(proto2, field_name), expected_type))
TestGetAndDeserialize('optional_int32', 1, int)
TestGetAndDeserialize('optional_int32', 1 << 30, int)
TestGetAndDeserialize('optional_uint32', 1 << 30, int)
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
TestGetAndDeserialize('optional_uint32', 1 << 31, long)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
TestGetAndDeserialize('optional_int64', 1 << 30, long)
TestGetAndDeserialize('optional_int64', 1 << 60, long)
TestGetAndDeserialize('optional_uint64', 1 << 30, long)
TestGetAndDeserialize('optional_uint64', 1 << 60, long)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
pb = unittest_pb2.TestAllTypes()
pb.optional_nested_enum = 1
self.assertEqual(1, pb.optional_nested_enum)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
# Repeated enums tests.
#proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testEnum_Name(self):
self.assertEqual('FOREIGN_FOO',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_FOO))
self.assertEqual('FOREIGN_BAR',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAR))
self.assertEqual('FOREIGN_BAZ',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAZ))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Name, 11312)
proto = unittest_pb2.TestAllTypes()
self.assertEqual('FOO',
proto.NestedEnum.Name(proto.FOO))
self.assertEqual('FOO',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.FOO))
self.assertEqual('BAR',
proto.NestedEnum.Name(proto.BAR))
self.assertEqual('BAR',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAR))
self.assertEqual('BAZ',
proto.NestedEnum.Name(proto.BAZ))
self.assertEqual('BAZ',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAZ))
self.assertRaises(ValueError,
proto.NestedEnum.Name, 11312)
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Name, 11312)
def testEnum_Value(self):
self.assertEqual(unittest_pb2.FOREIGN_FOO,
unittest_pb2.ForeignEnum.Value('FOREIGN_FOO'))
self.assertEqual(unittest_pb2.FOREIGN_BAR,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAR'))
self.assertEqual(unittest_pb2.FOREIGN_BAZ,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAZ'))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Value, 'FO')
proto = unittest_pb2.TestAllTypes()
self.assertEqual(proto.FOO,
proto.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
unittest_pb2.TestAllTypes.NestedEnum.Value('FOO'))
self.assertEqual(proto.BAR,
proto.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAZ,
proto.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAZ'))
self.assertRaises(ValueError,
proto.NestedEnum.Value, 'Foo')
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Value, 'Foo')
def testEnum_KeysAndValues(self):
self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'],
unittest_pb2.ForeignEnum.keys())
self.assertEqual([4, 5, 6],
unittest_pb2.ForeignEnum.values())
self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6)],
unittest_pb2.ForeignEnum.items())
proto = unittest_pb2.TestAllTypes()
self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys())
self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values())
self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)],
proto.NestedEnum.items())
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in xrange(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
def testRepeatedCompositeRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
# Need to set some differentiating variable so m0 != m1 != m2:
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
self.assertTrue(m0 != m1)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertEqual(3, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m0)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
self.assertEqual(m2, proto.repeated_nested_message[1])
# Removing m0 again or removing None should raise error
self.assertRaises(ValueError, proto.repeated_nested_message.remove, m0)
self.assertRaises(ValueError, proto.repeated_nested_message.remove, None)
self.assertEqual(2, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m2)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testDescriptorProtoSupport(self):
# Hand written descriptors/reflection are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
def AddDescriptorField(proto, field_name, field_type):
AddDescriptorField.field_index += 1
new_field = proto.field.add()
new_field.name = field_name
new_field.type = field_type
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
AddDescriptorField.field_index = 0
desc_proto = descriptor_pb2.DescriptorProto()
desc_proto.name = 'Car'
fdp = descriptor_pb2.FieldDescriptorProto
AddDescriptorField(desc_proto, 'name', fdp.TYPE_STRING)
AddDescriptorField(desc_proto, 'year', fdp.TYPE_INT64)
AddDescriptorField(desc_proto, 'automatic', fdp.TYPE_BOOL)
AddDescriptorField(desc_proto, 'price', fdp.TYPE_DOUBLE)
# Add a repeated field
AddDescriptorField.field_index += 1
new_field = desc_proto.field.add()
new_field.name = 'owners'
new_field.type = fdp.TYPE_STRING
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
self.assertTrue(desc.fields_by_name.has_key('name'))
self.assertTrue(desc.fields_by_name.has_key('year'))
self.assertTrue(desc.fields_by_name.has_key('automatic'))
self.assertTrue(desc.fields_by_name.has_key('price'))
self.assertTrue(desc.fields_by_name.has_key('owners'))
class CarMessage(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = desc
prius = CarMessage()
prius.name = 'prius'
prius.year = 2010
prius.automatic = True
prius.price = 25134.75
prius.owners.extend(['bob', 'susan'])
serialized_prius = prius.SerializeToString()
new_prius = reflection.ParseMessage(desc, serialized_prius)
self.assertTrue(new_prius is not prius)
self.assertEqual(prius, new_prius)
# these are unnecessary assuming message equality works as advertised but
# explicitly check to be safe since we're mucking about in metaclass foo
self.assertEqual(prius.name, new_prius.name)
self.assertEqual(prius.year, new_prius.year)
self.assertEqual(prius.automatic, new_prius.automatic)
self.assertEqual(prius.price, new_prius.price)
self.assertEqual(prius.owners, new_prius.owners)
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testRegisteredExtensions(self):
self.assertTrue('protobuf_unittest.optional_int32_extension' in
unittest_pb2.TestAllExtensions._extensions_by_name)
self.assertTrue(1 in unittest_pb2.TestAllExtensions._extensions_by_number)
# Make sure extensions haven't been registered into types that shouldn't
# have any.
self.assertEquals(0, len(unittest_pb2.TestAllTypes._extensions_by_name))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testStaticParseFrom(self):
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = unittest_pb2.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = unittest_pb2.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testMergeFromBug(self):
message1 = unittest_pb2.TestAllTypes()
message2 = unittest_pb2.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testCopyFromBadType(self):
# The python implementation doesn't raise an exception in this
# case. In theory it should.
if api_implementation.Type() == 'python':
return
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testDeepCopy(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = copy.deepcopy(proto1)
self.assertEqual(1, proto2.optional_int32)
proto1.repeated_int32.append(2)
proto1.repeated_int32.append(3)
container = copy.deepcopy(proto1.repeated_int32)
self.assertEqual([2, 3], container)
# TODO(anuraag): Implement deepcopy for repeated composite / extension dict
def testClear(self):
proto = unittest_pb2.TestAllTypes()
# C++ implementation does not support lazy fields right now so leave it
# out for now.
if api_implementation.Type() == 'python':
test_util.SetAllFields(proto)
else:
test_util.SetAllNonLazyFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def testDisconnectingBeforeClear(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
foreign = proto.optional_foreign_message
foreign.c = 6
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
self.assertTrue(foreign is not proto.optional_foreign_message)
self.assertEqual(5, nested.bb)
self.assertEqual(6, foreign.c)
nested.bb = 15
foreign.c = 16
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertFalse(proto.HasField('optional_foreign_message'))
self.assertEqual(0, proto.optional_foreign_message.c)
def testOneOf(self):
proto = unittest_pb2.TestAllTypes()
proto.oneof_uint32 = 10
proto.oneof_nested_message.bb = 11
self.assertEqual(11, proto.oneof_nested_message.bb)
self.assertFalse(proto.HasField('oneof_uint32'))
nested = proto.oneof_nested_message
proto.oneof_string = 'abc'
self.assertEqual('abc', proto.oneof_string)
self.assertEqual(11, nested.bb)
self.assertFalse(proto.HasField('oneof_nested_message'))
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto):
self.assertFalse(proto.IsInitialized())
self.assertRaises(message.EncodeError, proto.SerializeToString)
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
@basetest.unittest.skipIf(
api_implementation.Type() != 'cpp' or api_implementation.Version() != 2,
'Errors are only available from the most recent C++ implementation.')
def testFileDescriptorErrors(self):
file_name = 'test_file_descriptor_errors.proto'
package_name = 'test_file_descriptor_errors.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = file_name
file_descriptor_proto.package = package_name
m1 = file_descriptor_proto.message_type.add()
m1.name = 'msg1'
# Compiles the proto into the C++ descriptor pool
descriptor.FileDescriptor(
file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
# Add a FileDescriptorProto that has duplicate symbols
another_file_name = 'another_test_file_descriptor_errors.proto'
file_descriptor_proto.name = another_file_name
m2 = file_descriptor_proto.message_type.add()
m2.name = 'msg2'
with self.assertRaises(TypeError) as cm:
descriptor.FileDescriptor(
another_file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
self.assertTrue(hasattr(cm, 'exception'), '%s not raised' %
getattr(cm.expected, '__name__', cm.expected))
self.assertIn('test_file_descriptor_errors.proto', str(cm.exception))
# Error message will say something about this definition being a
# duplicate, though we don't check the message exactly to avoid a
# dependency on the C++ logging code.
self.assertIn('test_file_descriptor_errors.msg1', str(cm.exception))
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', b'a\x80a')
if str is bytes: # PY2
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
else:
proto.optional_string = 'Тест'
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
bytes_read = raw.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_read)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actual bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'bytes' (in 7 bit ASCII).
badbytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * b'\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(badbytes)
except UnicodeDecodeError:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) is bytes)
def testBytesInTextFormat(self):
proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n',
unicode(proto))
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.CopyFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
bytes_read = proto.optional_nested_message.MergeFromString(b'')
self.assertEqual(0, bytes_read)
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.ParseFromString(b'')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = unittest_pb2.TestAllTypes()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertTrue(proto2.HasField('optional_nested_message'))
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(basetest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(basetest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testNoneNotEqual(self):
self.assertNotEqual(self.first_proto, None)
self.assertNotEqual(None, self.second_proto)
def testNotEqualToOtherMessage(self):
third_proto = unittest_pb2.TestRequired()
self.assertNotEqual(self.first_proto, third_proto)
self.assertNotEqual(third_proto, self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(basetest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(basetest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(basetest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
if api_implementation.Type() == 'python':
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(basetest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeWithOptionalGroup(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
first_proto.optionalgroup.a = 242
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeNegativeValues(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.optional_int32 = -1
first_proto.optional_int64 = -(2 << 40)
first_proto.optional_sint32 = -3
first_proto.optional_sint64 = -(4 << 40)
first_proto.optional_sfixed32 = -5
first_proto.optional_sfixed64 = -(6 << 40)
second_proto = unittest_pb2.TestAllTypes.FromString(
first_proto.SerializeToString())
self.assertEqual(first_proto, second_proto)
def testParseTruncated(self):
# This test is only applicable for the Python implementation of the API.
if api_implementation.Type() != 'python':
return
first_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
for truncation_point in xrange(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
pos = second_proto._InternalParse(serialized, 0, truncation_point)
# If we didn't raise an error then we read exactly the amount expected.
self.assertEqual(truncation_point, pos)
# Parsing to unknown fields should not throw if parsing to known fields
# did not.
try:
pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point)
self.assertEqual(truncation_point, pos2)
except message.DecodeError:
self.fail('Parsing unknown fields failed when parsing known fields '
'did not.')
except message.DecodeError:
# Parsing unknown fields should also fail.
self.assertRaises(message.DecodeError, unknown_fields._InternalParse,
serialized, 0, truncation_point)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
bytes_parsed = second_proto.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_parsed)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
self.assertEqual(
len(serialized),
raw.MergeFromString(serialized))
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
self.assertEqual(
len(raw.item[0].message),
message1.MergeFromString(raw.item[0].message))
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(
len(raw.item[1].message),
message2.MergeFromString(raw.item[1].message))
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto.MergeFromString(serialized))
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class as ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: '
'a,b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertFalse(proto2.HasField('a'))
# proto2 ParseFromString does not check that required fields are set.
proto2.ParseFromString(partial)
self.assertFalse(proto2.HasField('a'))
proto.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
self.assertEqual(
len(partial),
proto2.MergeFromString(partial))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
def testSerializeUninitializedSubMessage(self):
proto = unittest_pb2.TestRequiredForeign()
# Sub-message doesn't exist yet, so this succeeds.
proto.SerializeToString()
proto.optional_message.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequiredForeign '
'is missing required fields: '
'optional_message.b,optional_message.c')
proto.optional_message.b = 2
proto.optional_message.c = 3
proto.SerializeToString()
proto.repeated_message.add().a = 1
proto.repeated_message.add().b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequiredForeign is missing required fields: '
'repeated_message[0].b,repeated_message[0].c,'
'repeated_message[1].a,repeated_message[1].c')
proto.repeated_message[0].b = 2
proto.repeated_message[0].c = 3
proto.repeated_message[1].a = 1
proto.repeated_message[1].c = 3
proto.SerializeToString()
def testSerializeAllPackedFields(self):
first_proto = unittest_pb2.TestPackedTypes()
second_proto = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testSerializeAllPackedExtensions(self):
first_proto = unittest_pb2.TestPackedExtensions()
second_proto = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(first_proto)
serialized = first_proto.SerializeToString()
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testMergePackedFromStringWhenSomeFieldsAlreadySet(self):
first_proto = unittest_pb2.TestPackedTypes()
first_proto.packed_int32.extend([1, 2])
first_proto.packed_double.append(3.0)
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestPackedTypes()
second_proto.packed_int32.append(3)
second_proto.packed_double.extend([1.0, 2.0])
second_proto.packed_sint32.append(4)
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual([3, 1, 2], second_proto.packed_int32)
self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double)
self.assertEqual([4], second_proto.packed_sint32)
def testPackedFieldsWireFormat(self):
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes
proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes
proto.packed_float.append(2.0) # 4 bytes, will be before double
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(1+1+1+2, d.ReadInt32())
self.assertEqual(1, d.ReadInt32())
self.assertEqual(2, d.ReadInt32())
self.assertEqual(150, d.ReadInt32())
self.assertEqual(3, d.ReadInt32())
self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(4, d.ReadInt32())
self.assertEqual(2.0, d.ReadFloat())
self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(8+8, d.ReadInt32())
self.assertEqual(1.0, d.ReadDouble())
self.assertEqual(1000.0, d.ReadDouble())
self.assertTrue(d.EndOfStream())
def testParsePackedFromUnpacked(self):
unpacked = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(unpacked)
packed = unittest_pb2.TestPackedTypes()
serialized = unpacked.SerializeToString()
self.assertEqual(
len(serialized),
packed.MergeFromString(serialized))
expected = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(expected)
self.assertEqual(expected, packed)
def testParseUnpackedFromPacked(self):
packed = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(packed)
unpacked = unittest_pb2.TestUnpackedTypes()
serialized = packed.SerializeToString()
self.assertEqual(
len(serialized),
unpacked.MergeFromString(serialized))
expected = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(expected)
self.assertEqual(expected, unpacked)
def testFieldNumbers(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51)
def testExtensionFieldNumbers(self):
self.assertEqual(unittest_pb2.TestRequired.single.number, 1000)
self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000)
self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001)
self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001)
self.assertEqual(unittest_pb2.optional_int32_extension.number, 1)
self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16)
self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16)
self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18)
self.assertEqual(
unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18)
self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21)
self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
21)
self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31)
self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46)
self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46)
self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48)
self.assertEqual(
unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48)
self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51)
self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
51)
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(
optional_int32=1,
optional_string='foo',
optional_bool=True,
optional_bytes=b'bar',
optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1),
optional_foreign_message=unittest_pb2.ForeignMessage(c=1),
optional_nested_enum=unittest_pb2.TestAllTypes.FOO,
optional_foreign_enum=unittest_pb2.FOREIGN_FOO,
repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_int32'))
self.assertTrue(proto.HasField('optional_string'))
self.assertTrue(proto.HasField('optional_bool'))
self.assertTrue(proto.HasField('optional_bytes'))
self.assertTrue(proto.HasField('optional_nested_message'))
self.assertTrue(proto.HasField('optional_foreign_message'))
self.assertTrue(proto.HasField('optional_nested_enum'))
self.assertTrue(proto.HasField('optional_foreign_enum'))
self.assertEqual(1, proto.optional_int32)
self.assertEqual('foo', proto.optional_string)
self.assertEqual(True, proto.optional_bool)
self.assertEqual(b'bar', proto.optional_bytes)
self.assertEqual(1, proto.optional_nested_message.bb)
self.assertEqual(1, proto.optional_foreign_message.c)
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
proto.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum)
self.assertEqual([1, 2, 3], proto.repeated_int32)
def testInitArgsUnknownFieldName(self):
def InitalizeEmptyMessageWithExtraKeywordArg():
unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown')
self._CheckRaises(ValueError,
InitalizeEmptyMessageWithExtraKeywordArg,
'Protocol message has no "unknown" field.')
def testInitRequiredKwargs(self):
proto = unittest_pb2.TestRequired(a=1, b=1, c=1)
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('a'))
self.assertTrue(proto.HasField('b'))
self.assertTrue(proto.HasField('c'))
self.assertTrue(not proto.HasField('dummy2'))
self.assertEqual(1, proto.a)
self.assertEqual(1, proto.b)
self.assertEqual(1, proto.c)
def testInitRequiredForeignKwargs(self):
proto = unittest_pb2.TestRequiredForeign(
optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1))
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_message'))
self.assertTrue(proto.optional_message.IsInitialized())
self.assertTrue(proto.optional_message.HasField('a'))
self.assertTrue(proto.optional_message.HasField('b'))
self.assertTrue(proto.optional_message.HasField('c'))
self.assertTrue(not proto.optional_message.HasField('dummy2'))
self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1),
proto.optional_message)
self.assertEqual(1, proto.optional_message.a)
self.assertEqual(1, proto.optional_message.b)
self.assertEqual(1, proto.optional_message.c)
def testInitRepeatedKwargs(self):
proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertEqual(1, proto.repeated_int32[0])
self.assertEqual(2, proto.repeated_int32[1])
self.assertEqual(3, proto.repeated_int32[2])
class OptionsTest(basetest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testPackedOptions(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_int32 = 1
proto.optional_double = 3.0
for field_descriptor, _ in proto.ListFields():
self.assertEqual(False, field_descriptor.GetOptions().packed)
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.append(1)
proto.packed_double.append(3.0)
for field_descriptor, _ in proto.ListFields():
self.assertEqual(True, field_descriptor.GetOptions().packed)
self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED,
field_descriptor.label)
class ClassAPITest(basetest.TestCase):
def testMakeClassWithNestedDescriptor(self):
leaf_desc = descriptor.Descriptor('leaf', 'package.parent.child.leaf', '',
containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
child_desc = descriptor.Descriptor('child', 'package.parent.child', '',
containing_type=None, fields=[],
nested_types=[leaf_desc], enum_types=[],
extensions=[])
sibling_desc = descriptor.Descriptor('sibling', 'package.parent.sibling',
'', containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
parent_desc = descriptor.Descriptor('parent', 'package.parent', '',
containing_type=None, fields=[],
nested_types=[child_desc, sibling_desc],
enum_types=[], extensions=[])
message_class = reflection.MakeClass(parent_desc)
self.assertIn('child', message_class.__dict__)
self.assertIn('sibling', message_class.__dict__)
self.assertIn('leaf', message_class.child.__dict__)
def _GetSerializedFileDescriptor(self, name):
"""Get a serialized representation of a test FileDescriptorProto.
Args:
name: All calls to this must use a unique message name, to avoid
collisions in the cpp descriptor pool.
Returns:
A string containing the serialized form of a test FileDescriptorProto.
"""
file_descriptor_str = (
'message_type {'
' name: "' + name + '"'
' field {'
' name: "flat"'
' number: 1'
' label: LABEL_REPEATED'
' type: TYPE_UINT32'
' }'
' field {'
' name: "bar"'
' number: 2'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Bar"'
' }'
' nested_type {'
' name: "Bar"'
' field {'
' name: "baz"'
' number: 3'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Baz"'
' }'
' nested_type {'
' name: "Baz"'
' enum_type {'
' name: "deep_enum"'
' value {'
' name: "VALUE_A"'
' number: 0'
' }'
' }'
' field {'
' name: "deep"'
' number: 4'
' label: LABEL_OPTIONAL'
' type: TYPE_UINT32'
' }'
' }'
' }'
'}')
file_descriptor = descriptor_pb2.FileDescriptorProto()
text_format.Merge(file_descriptor_str, file_descriptor)
return file_descriptor.SerializeToString()
def testParsingFlatClassWithExplicitClassDeclaration(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('A'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
class MessageClass(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = msg_descriptor
msg = MessageClass()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingFlatClass(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('B'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingNestedClass(self):
"""Test that the generated class can parse a nested message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('C'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'bar {'
' baz {'
' deep: 4'
' }'
'}')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.bar.baz.deep, 4)
if __name__ == '__main__':
basetest.main()
| bsd-3-clause |
undoware/neutron-drive | google_appengine/google/appengine/_internal/django/core/servers/fastcgi.py | 23 | 6483 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from google.appengine._internal.django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from google.appengine._internal.django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from google.appengine._internal.django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| bsd-3-clause |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/django/contrib/sessions/models.py | 25 | 2675 | import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.hashcompat import md5_constructor
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
pickled = pickle.dumps(session_dict)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django website).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'))
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
encoded_data = base64.decodestring(self.session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation, "User tampered with session cookie."
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
| agpl-3.0 |
janocat/odoo | openerp/modules/loading.py | 131 | 21198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import time
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.modules.registry
import openerp.osv as osv
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models, adapt_version
from module import runs_post_install
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: deprecated parameter, unused, left to avoid changing signature in 8.0
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def load_test(module_name, idref, mode):
cr.commit()
try:
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_test_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
# avoid keeping stale xml_id, etc. in cache
openerp.modules.registry.RegistryManager.clear_caches(cr.dbname)
def _get_files_of_kind(kind):
if kind == 'demo':
kind = ['demo_xml', 'demo']
elif kind == 'data':
kind = ['init_xml', 'update_xml', 'data']
if isinstance(kind, str):
kind = [kind]
files = []
for k in kind:
for f in package.data[k]:
files.append(f)
if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')):
# init_xml, update_xml and demo_xml are deprecated except
# for the case of init_xml with yaml, csv and sql files as
# we can't specify noupdate for those file.
correct_key = 'demo' if k.count('demo') else 'data'
_logger.warning(
"module %s: key '%s' is deprecated in favor of '%s' for file '%s'.",
package.name, k, correct_key, f
)
return files
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
try:
if kind in ('demo', 'test'):
threading.currentThread().testing = True
for filename in _get_files_of_kind(kind):
_logger.info("loading %s/%s", module_name, filename)
noupdate = False
if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')):
noupdate = True
tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report)
finally:
if kind in ('demo', 'test'):
threading.currentThread().testing = False
processed_modules = []
loaded_modules = []
registry = openerp.registry(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.info('loading %d modules...', len(graph))
registry.clear_manual_fields()
# register, instantiate and initialize models for each modules
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
new_install = package.state == 'to install'
if new_install:
py_module = sys.modules['openerp.addons.%s' % (module_name,)]
pre_init = package.info.get('pre_init_hook')
if pre_init:
getattr(py_module, pre_init)(cr)
models = registry.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
registry.setup_models(cr, partial=True)
init_module_models(cr, package.name, models)
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = registry['ir.module.module']
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
_load_data(cr, module_name, idref, mode, kind='data')
has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed')
if has_demo:
_load_data(cr, module_name, idref, mode, kind='demo')
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['demo'], [module_id])
migrations.migrate_module(package, 'post')
if new_install:
post_init = package.info.get('post_init_hook')
if post_init:
getattr(py_module, post_init)(cr, registry)
registry._init_modules.add(package.name)
# validate all the views at a whole
registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name)
if has_demo:
# launch tests only in demo mode, allowing tests to use demo data.
if tools.config.options['test_enable']:
# Yamel test
report.record_result(load_test(module_name, idref, mode))
# Python tests
ir_http = registry['ir.http']
if hasattr(ir_http, '_routing_map'):
# Force routing map to be rebuilt between each module test suite
del(ir_http._routing_map)
report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname))
processed_modules.append(package.name)
ver = adapt_version(package.data['version'])
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]})
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
registry._init_modules.add(package.name)
cr.commit()
_logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
registry.clear_manual_fields()
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
if not module_list:
break
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed:
break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
initialize_sys_path()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
update_module = True # process auto-installed modules
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new registry, just created in
# openerp.modules.registry.RegistryManager.new().
registry = openerp.registry(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = registry._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language'] or update_module:
# some base models are used below, so make sure they are set up
registry.setup_models(cr, partial=True)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = registry['ir.module.module']
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['state'])
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
# IMPORTANT 2: We have to loop here until all relevant modules have been
# processed, because in some rare cases the dependencies have
# changed, and modules that depend on an uninstalled module
# will not be processed on the first pass.
# It's especially useful for migrations.
previously_processed = -1
while previously_processed < len(processed_modules):
previously_processed = len(processed_modules)
processed_modules += load_marked_modules(cr, graph,
['installed', 'to upgrade', 'to remove'],
force, status, report, loaded_modules, update_module)
if update_module:
processed_modules += load_marked_modules(cr, graph,
['to install'], force, status, report,
loaded_modules, update_module)
registry.setup_models(cr)
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
if model in registry and not registry[model].is_transient() and not isinstance(registry[model], openerp.osv.orm.AbstractModel):
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
if model in registry and registry[model].is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
if model in registry:
registry[model]._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
registry['ir.model.data']._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove',))
modules_to_remove = dict(cr.fetchall())
if modules_to_remove:
pkgs = reversed([p for p in graph if p.name in modules_to_remove])
for pkg in pkgs:
uninstall_hook = pkg.info.get('uninstall_hook')
if uninstall_hook:
py_module = sys.modules['openerp.addons.%s' % (pkg.name,)]
getattr(py_module, uninstall_hook)(cr, registry)
registry['ir.module.module'].module_uninstall(cr, SUPERUSER_ID, modules_to_remove.values())
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
openerp.api.Environment.reset()
return openerp.modules.registry.RegistryManager.new(cr.dbname, force_demo, status, update_module)
# STEP 7: verify custom views on every model
if update_module:
Views = registry['ir.ui.view']
custom_view_test = True
for model in registry.models.keys():
if not Views._validate_custom_views(cr, SUPERUSER_ID, model):
custom_view_test = False
_logger.error('invalid custom view(s) for model %s', model)
report.record_result(custom_view_test)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 8: call _register_hook on every model
for model in registry.models.values():
model._register_hook(cr)
# STEP 9: Run the post-install tests
cr.commit()
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
if openerp.tools.config['test_enable']:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
_logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anryko/ansible | lib/ansible/modules/packaging/os/svr4pkg.py | 95 | 7684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
type: bool
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg:
name: CSWcommon
src: /tmp/cswpkgs.pkg
state: present
# Install a package directly from an http site
- svr4pkg:
name: CSWpkgutil
src: 'http://get.opencsw.org/now'
state: present
zone: current
# Install a package with a response file
- svr4pkg:
name: CSWggrep
src: /tmp/third-party.pkg
response_file: /tmp/ggrep.response
state: present
# Ensure that a package is not installed.
- svr4pkg:
name: SUNWgnome-sound-recorder
state: absent
# Ensure that a category is not installed.
- svr4pkg:
name: FIREFOX
state: absent
category: true
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = ['pkgadd', '-n']
if zone == 'current':
cmd += ['-G']
cmd += ['-a', adminfile, '-d', src]
if proxy is not None:
cmd += ['-x', proxy]
if response_file is not None:
cmd += ['-r', response_file]
if category:
cmd += ['-Y']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = ['pkgrm', '-na', adminfile, '-Y', name]
else:
cmd = ['pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
src=dict(default=None),
proxy=dict(default=None),
response_file=dict(default=None),
zone=dict(required=False, default='all', choices=['current', 'all']),
category=dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
phlax/pootle | pootle/apps/pootle_app/views/admin/util.py | 7 | 6759 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import forms
from django.forms.models import modelformset_factory
from django.forms.utils import ErrorList
from django.shortcuts import render
from django.utils.safestring import mark_safe
from pootle.core.paginator import paginate
from pootle.i18n.gettext import ugettext as _
def form_set_as_table(formset, link=None, linkfield='code'):
"""Create an HTML table from the formset. The first form in the
formset is used to obtain a list of the fields that need to be
displayed.
Errors, if there are any, appear in the row above the form which
triggered any errors.
If the forms are based on database models, the order of the
columns is determined by the order of the fields in the model
specification.
"""
def add_header(result, fields, form):
result.append('<tr>\n')
for field in fields:
widget = form.fields[field].widget
widget_name = widget.__class__.__name__
if widget.is_hidden or \
widget_name in ('CheckboxInput', 'SelectMultiple'):
result.append('<th class="sorttable_nosort">')
else:
result.append('<th>')
if widget_name in ('CheckboxInput',):
result.append(form[field].as_widget())
result.append(form[field].label_tag())
elif form.fields[field].label is not None and not widget.is_hidden:
result.append(unicode(form.fields[field].label))
result.append('</th>\n')
result.append('</tr>\n')
def add_footer(result, fields, form):
result.append('<tr>\n')
for field in fields:
field_obj = form.fields[field]
result.append('<td>')
if field_obj.label is not None and not field_obj.widget.is_hidden:
result.append(unicode(field_obj.label))
result.append('</td>\n')
result.append('</tr>\n')
def add_errors(result, fields, form):
# If the form has errors, then we'll add a table row with the
# errors.
if len(form.errors) > 0:
result.append('<tr>\n')
for field in fields:
result.append('<td>')
result.append(form.errors.get(field, ErrorList()).as_ul())
result.append('</td>\n')
result.append('</tr>\n')
def add_widgets(result, fields, form, link):
result.append('<tr class="item">\n')
for i, field in enumerate(fields):
result.append('<td class="%s">' % field)
# Include a hidden element containing the form's id to the
# first column.
if i == 0:
result.append(form['id'].as_hidden())
# `link` indicates whether we put the first field as a link or as
# widget
if field == linkfield and linkfield in form.initial and link:
if callable(link):
result.append(link(form.instance))
result.append(form[field].as_hidden())
else:
result.append(form[field].as_widget())
result.append('</td>\n')
result.append('</tr>\n')
result = []
try:
first_form = formset.forms[0]
# Get the fields of the form, but filter our the 'id' field,
# since we don't want to print a table column for it.
fields = [field for field in first_form.fields if field != 'id']
result.append('<thead>\n')
add_header(result, fields, first_form)
result.append('</thead>\n')
result.append('<tfoot>\n')
add_footer(result, fields, first_form)
result.append('</tfoot>\n')
result.append('<tbody>\n')
# Do not display the delete checkbox for the 'add a new entry' form.
if formset.extra_forms:
formset.forms[-1].fields['DELETE'].widget = forms.HiddenInput()
for form in formset.forms:
add_errors(result, fields, form)
add_widgets(result, fields, form, link)
result.append('</tbody>\n')
except IndexError:
result.append('<tr>\n')
result.append('<td>\n')
result.append(_('No files in this project.'))
result.append('</td>\n')
result.append('</tr>\n')
return u''.join(result)
def process_modelformset(request, model_class, queryset, **kwargs):
"""With the Django model class `model_class` and the given `queryset`,
construct a formset process its submission.
"""
# Create a formset class for the model `model_class` (i.e. it will contain
# forms whose contents are based on the fields of `model_class`);
# parameters for the construction of the forms used in the formset should
# be in kwargs.
formset_class = modelformset_factory(model_class, **kwargs)
if queryset is None:
queryset = model_class.objects.all()
# If the request is a POST, we want to possibly update our data
if request.method == 'POST' and request.POST:
# Create a formset from all the 'model_class' instances whose values
# will be updated using the contents of request.POST
objects = paginate(request, queryset)
formset = formset_class(request.POST, queryset=objects.object_list)
# Validate all the forms in the formset
if formset.is_valid():
# If all is well, Django can save all our data for us
formset.save()
else:
# Otherwise, complain to the user that something went wrong
return formset, _("There are errors in the form. Please review "
"the problems below."), objects
# Hack to force reevaluation of same query
queryset = queryset.filter()
objects = paginate(request, queryset)
return formset_class(queryset=objects.object_list), None, objects
def edit(request, template, model_class, ctx=None,
link=None, linkfield='code', queryset=None, **kwargs):
formset, msg, objects = process_modelformset(request, model_class,
queryset=queryset, **kwargs)
if ctx is None:
ctx = {}
ctx.update({
'formset_text': mark_safe(form_set_as_table(formset, link, linkfield)),
'formset': formset,
'objects': objects,
'error_msg': msg,
'can_add': kwargs.get('extra', 1) != 0,
})
return render(request, template, ctx)
| gpl-3.0 |
emiprotechnologies/connector-magento | __unported__/magentoerpconnect_export_partner/connector.py | 32 | 1031 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.connector.connector import install_in_connector
install_in_connector()
| agpl-3.0 |
liyitest/rr | openstack_dashboard/usage/views.py | 19 | 3332 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import base
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
csv_template_name = None
page_title = _("Overview")
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage_class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return (self.csv_template_name or
".".join((self.template_name.rsplit('.', 1)[0], 'csv')))
return self.template_name
def get_content_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
try:
project_id = self.kwargs.get('project_id',
self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve usage information.'))
return []
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
try:
context['simple_tenant_usage_enabled'] = \
api.nova.extension_supported('SimpleTenantUsage', self.request)
except Exception:
context['simple_tenant_usage_enabled'] = True
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
context = self.render_context_with_title(context)
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
| apache-2.0 |
NStep/android_kernel_motorola_msm8226_prev | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
mattclark/osf.io | osf/models/analytics.py | 1 | 8085 | import logging
from dateutil import parser
from django.db import models, transaction
from django.db.models import Sum
from django.db.models.expressions import RawSQL
from django.utils import timezone
from framework.sessions import session
from osf.models.base import BaseModel, Guid
from osf.models.files import BaseFileNode
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
logger = logging.getLogger(__name__)
class UserActivityCounter(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=5, null=False, blank=False, db_index=True,
unique=True) # 5 in prod
action = DateTimeAwareJSONField(default=dict)
date = DateTimeAwareJSONField(default=dict)
total = models.PositiveIntegerField(default=0)
@classmethod
def get_total_activity_count(cls, user_id):
try:
return cls.objects.get(_id=user_id).total
except cls.DoesNotExist:
return 0
@classmethod
def increment(cls, user_id, action, date_string):
date = parser.parse(date_string).strftime('%Y/%m/%d')
with transaction.atomic():
# select_for_update locks the row but only inside a transaction
uac, created = cls.objects.select_for_update().get_or_create(_id=user_id)
if uac.total > 0:
uac.total += 1
else:
uac.total = 1
if action in uac.action:
uac.action[action]['total'] += 1
if date in uac.action[action]['date']:
uac.action[action]['date'][date] += 1
else:
uac.action[action]['date'][date] = 1
else:
uac.action[action] = dict(total=1, date={date: 1})
if date in uac.date:
uac.date[date]['total'] += 1
else:
uac.date[date] = dict(total=1)
uac.save()
return True
class PageCounter(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=300, null=False, blank=False, db_index=True,
unique=True) # 272 in prod
date = DateTimeAwareJSONField(default=dict)
total = models.PositiveIntegerField(default=0)
unique = models.PositiveIntegerField(default=0)
action = models.CharField(max_length=128, null=True, blank=True)
resource = models.ForeignKey(Guid, related_name='pagecounters', null=True, blank=True)
file = models.ForeignKey('osf.BaseFileNode', null=True, blank=True, related_name='pagecounters')
version = models.IntegerField(null=True, blank=True)
DOWNLOAD_ALL_VERSIONS_ID_PATTERN = r'^download:[^:]*:{1}[^:]*$'
@classmethod
def get_all_downloads_on_date(cls, date):
"""
Queries the total number of downloads on a date
:param str date: must be formatted the same as a page counter key so 'yyyy/mm/dd'
:return: long sum:
"""
formatted_date = date.strftime('%Y/%m/%d')
# Get all PageCounters with data for the date made for all versions downloads,
# regex insures one colon so all versions are queried.
page_counters = cls.objects.filter(date__has_key=formatted_date, _id__regex=cls.DOWNLOAD_ALL_VERSIONS_ID_PATTERN)
# Get the total download numbers from the nested dict on the PageCounter by annotating it as daily_total then
# aggregating the sum.
daily_total = page_counters.annotate(daily_total=RawSQL("((date->%s->>'total')::int)", (formatted_date,))).aggregate(sum=Sum('daily_total'))['sum']
return daily_total
@staticmethod
def clean_page(page):
return page.replace(
'.', '_'
).replace(
'$', '_'
)
@staticmethod
def deconstruct_id(page):
"""
Backwards compatible code for use in writing to both _id field and
action, resource, file, and version fields simultaneously.
"""
split = page.split(':')
action = split[0]
resource = Guid.load(split[1])
file = BaseFileNode.load(split[2])
if len(split) == 3:
version = None
else:
version = split[3]
return resource, file, action, version
@classmethod
def update_counter(cls, page, node_info):
cleaned_page = cls.clean_page(page)
date = timezone.now()
date_string = date.strftime('%Y/%m/%d')
visited_by_date = session.data.get('visited_by_date', {'date': date_string, 'pages': []})
with transaction.atomic():
# Temporary backwards compat - when creating new PageCounters, temporarily keep writing to _id field.
# After we're sure this is stable, we can stop writing to the _id field, and query on
# resource/file/action/version
resource, file, action, version = cls.deconstruct_id(cleaned_page)
model_instance, created = PageCounter.objects.select_for_update().get_or_create(_id=cleaned_page)
model_instance.resource = resource
model_instance.file = file
model_instance.action = action
model_instance.version = version
# if they visited something today
if date_string == visited_by_date['date']:
# if they haven't visited this page today
if cleaned_page not in visited_by_date['pages']:
# if the model_instance has today in it
if date_string in model_instance.date.keys():
# increment the number of unique visitors for today
model_instance.date[date_string]['unique'] += 1
else:
# set the number of unique visitors for today to 1
model_instance.date[date_string] = dict(unique=1)
# if they haven't visited something today
else:
# set their visited by date to blank
visited_by_date['date'] = date_string
visited_by_date['pages'] = []
# if the model_instance has today in it
if date_string in model_instance.date.keys():
# increment the number of unique visitors for today
model_instance.date[date_string]['unique'] += 1
else:
# set the number of unique visitors to 1
model_instance.date[date_string] = dict(unique=1)
# update their sessions
visited_by_date['pages'].append(cleaned_page)
session.data['visited_by_date'] = visited_by_date
if date_string in model_instance.date.keys():
if 'total' not in model_instance.date[date_string].keys():
model_instance.date[date_string].update(total=0)
model_instance.date[date_string]['total'] += 1
else:
model_instance.date[date_string] = dict(total=1)
# if a download counter is being updated, only perform the update
# if the user who is downloading isn't a contributor to the project
page_type = cleaned_page.split(':')[0]
if page_type in ('download', 'view') and node_info:
if node_info['contributors'].filter(guids___id__isnull=False, guids___id=session.data.get('auth_user_id')).exists():
model_instance.save()
return
visited = session.data.get('visited', [])
if page not in visited:
model_instance.unique += 1
visited.append(page)
session.data['visited'] = visited
session.save()
model_instance.total += 1
model_instance.save()
@classmethod
def get_basic_counters(cls, page):
try:
counter = cls.objects.get(_id=cls.clean_page(page))
return (counter.unique, counter.total)
except cls.DoesNotExist:
return (None, None)
| apache-2.0 |
rajrohith/blobstore | azure/storage/blob/appendblobservice.py | 1 | 27806 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from .._common_conversion import (
_to_str,
_int_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from .._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from .._http import HTTPRequest
from ._upload_chunking import (
_AppendBlobChunkUploader,
_upload_blob_chunks,
)
from .models import (
_BlobTypes,
ResourceProperties
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from ._serialization import (
_get_path,
)
from ._deserialization import (
_parse_append_block,
_parse_base_properties,
)
from .baseblobservice import BaseBlobService
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
class AppendBlobService(BaseBlobService):
'''
An append blob is comprised of blocks and is optimized for append operations.
When you modify an append blob, blocks are added to the end of the blob only,
via the append_block operation. Updating or deleting of existing blocks is not
supported. Unlike a block blob, an append blob does not expose its block IDs.
Each block in an append blob can be a different size, up to a maximum of 4 MB,
and an append blob can include up to 50,000 blocks. The maximum size of an
append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
:ivar int MAX_BLOCK_SIZE:
The size of the blocks put by append_blob_from_* methods. Smaller blocks
may be put if there is less data provided. The maximum block size the service
supports is 4MB.
'''
MAX_BLOCK_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
'''
self.blob_type = _BlobTypes.AppendBlob
super(AppendBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string, socket_timeout)
def create_blob(self, container_name, blob_name, content_settings=None,
metadata=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Creates a blob or overrides an existing blob. Use if_match=* to
prevent overriding an existing blob.
See create_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to
perform the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
return self._perform_request(request, _parse_base_properties)
def append_block(self, container_name, blob_name, block,
validate_content=False, maxsize_condition=None,
appendpos_condition=None,
lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Commits a new block of data to the end of an existing append blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes block:
Content of the block in bytes.
:param bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the
AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
ETag, last modified, append offset, and committed block count
properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'appendblock',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
request.body = _get_data_bytes_only('block', block)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_append_block)
#----Convenience APIs----------------------------------------------
def append_blob_from_path(
self, container_name, blob_name, file_path, validate_content=False,
maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from a file path, with automatic
chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
progress_callback=progress_callback,
lease_id=lease_id,
timeout=timeout)
def append_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from an array of bytes, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the array of bytes.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
def append_blob_from_text(
self, container_name, blob_name, text, encoding='utf-8',
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from str/unicode, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str text:
Text to upload to the blob.
:param str encoding:
Python encoding to use to convert the text to bytes.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if not isinstance(text, bytes):
_validate_not_none('encoding', encoding)
text = text.encode(encoding)
return self.append_blob_from_bytes(
container_name,
blob_name,
text,
index=0,
count=len(text),
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
def append_blob_from_stream(
self, container_name, blob_name, stream, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from a file/stream, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
# _upload_blob_chunks returns the block ids for block blobs so resource_properties
# is passed as a parameter to get the last_modified and etag for page and append blobs.
# this info is not needed for block_blobs since _put_block_list is called after which gets this info
resource_properties = ResourceProperties()
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_BLOCK_SIZE,
stream=stream,
max_connections=1, # upload not easily parallelizable
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_AppendBlobChunkUploader,
maxsize_condition=maxsize_condition,
timeout=timeout,
resource_properties=resource_properties
)
return resource_properties | apache-2.0 |
SaschaMester/delicium | build/android/pylib/gtest/gtest_test_instance_test.py | 74 | 2367 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pylib.gtest import gtest_test_instance
class GtestTestInstanceTests(unittest.TestCase):
def testParseGTestListTests_simple(self):
raw_output = [
'TestCaseOne.',
' testOne',
' testTwo',
'TestCaseTwo.',
' testThree',
' testFour',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TestCaseOne.testOne',
'TestCaseOne.testTwo',
'TestCaseTwo.testThree',
'TestCaseTwo.testFour',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_typeParameterized_old(self):
raw_output = [
'TPTestCase/WithTypeParam/0.',
' testOne',
' testTwo',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TPTestCase/WithTypeParam/0.testOne',
'TPTestCase/WithTypeParam/0.testTwo',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_typeParameterized_new(self):
raw_output = [
'TPTestCase/WithTypeParam/0. # TypeParam = TypeParam0',
' testOne',
' testTwo',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TPTestCase/WithTypeParam/0.testOne',
'TPTestCase/WithTypeParam/0.testTwo',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_valueParameterized_old(self):
raw_output = [
'VPTestCase.',
' testWithValueParam/0',
' testWithValueParam/1',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'VPTestCase.testWithValueParam/0',
'VPTestCase.testWithValueParam/1',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_valueParameterized_new(self):
raw_output = [
'VPTestCase.',
' testWithValueParam/0 # GetParam() = 0',
' testWithValueParam/1 # GetParam() = 1',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'VPTestCase.testWithValueParam/0',
'VPTestCase.testWithValueParam/1',
]
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
zhaowenxiang/chisch | chisch/local_settings.py | 1 | 2131 | # -*- coding: utf-8 -*-
"""
Django local_settings for chisch project.
import by 'setting.py.
"""
# Open malicious authentication
OPEN_MRP = False
OPEN_SSM = False
# The name of the certificate
ACCESS_TOKEN_NAME = 'Access-Token'
# universal verify code
UNIVERSAL_VERIFY_CODE = "888888"
ALIYUN_OSS = {
'BUCKET_NAME': 'chisch',
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'ACCESS_KEY_SECRET': '0qMu5s3yHEBrxb2klSyZKnHmOPb0HZ',
'ENDPOINT': 'https://oss-cn-shenzhen.aliyuncs.com/',
'ROLE_ARN': 'acs:ram::1709927201743129:role/aliyunosstokengeneratorrole',
'TokenExpireTime': 60 * 60,
'VIDEO_SIGN_URL_EXPIRES': 60 * 60, # 视频私有链接有效时间60分
'REGION': 'cn-shenzhen',
'ROLE_SESSION_NAME': 'chisch',
'BOUNDARY': '-'*10 + 'chisch' + '-'*10,
'OBJECT_KEYS_SUB_ELEMENTS': {
'upload_user_avatar': {
'path': 'image/user/avatar/',
'only_one': True,
},
'create_curriculum': {
'path': 'image/curriculum/cover/',
'only_one': True,
},
'upload_video': {
'path': 'video/',
'file_suffix': '.mp4',
'only_one': True,
},
}
}
# SMALL_OBJECT_UPPER_LIMIT = 1024 * 1024 * 10 # 小文件的定义规则为大小不超过10M
OBJECT_LOCAL_TRANSFER_DIR = '/tmp/chisch/transfer/' # 本地转存路径
OBJECT_PREFERRED_SIZE = 1024 * 1024 * 2 # 分片大小,2M
IMAGE_TYPE = '.png'
VIDEO_TYPE = '.mp4'
MNS = {
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'ACCESS_KEY_SECRET': '0qMu5s3yHEBrxb2klSyZKnHmOPb0HZ',
'ENDPOINT': 'https://1709927201743129.mns.cn-hangzhou.aliyuncs.com/',
'SIGN_NAME': '千寻教育',
'TEMPLATE_CODE': 'SMS_62440527',
'TOPIC': 'sms.topic-cn-hangzhou',
'QUEUE_NAME': 'verify-code-queue',
}
VOD = {
'USER_UNIQUE': 'tkHh5O7431CgWayx',
'SECRET_KEY': 'f8dc95d9b6d5cdfeb204a8b10ab2af1a',
'FORMAT': 'json',
'VERSION': '2017-03-21',
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'SIGNATUREMETHOD': 'HMAC-SHA1',
'SIGNATUREVERSION': '1.0',
'SERVER_URL': 'https://vod.cn-shanghai.aliyuncs.com'
}
| mit |
alivecor/tensorflow | tensorflow/contrib/framework/python/framework/__init__.py | 43 | 1458 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
| apache-2.0 |
keelhaule/alfanous | conf.py | 5 | 8206 | # -*- coding: utf-8 -*-
#
# Alfanous Quranic Search Engine documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 07:42:48 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Alfanous Quranic Search Engine'
copyright = u'2014, Assem Chelli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.00'
# The full version, including alpha/beta/rc tags.
release = '0.7.00'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AlfanousQuranicSearchEnginedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AlfanousQuranicSearchEngine.tex', u'Alfanous Quranic Search Engine Documentation',
u'Assem Chelli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'alfanousquranicsearchengine', u'Alfanous Quranic Search Engine Documentation',
[u'Assem Chelli'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AlfanousQuranicSearchEngine', u'Alfanous Quranic Search Engine Documentation',
u'Assem Chelli', 'AlfanousQuranicSearchEngine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 |
harmodio/poesiapueril | poesiapuerilbot.py | 1 | 3499 | #/usr/bin/env python3
# -*- coding: utf8 -*-
##
## Copyright (c) 2017 Jorge Harmodio
## poesiapuerilbot is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## poesiapuerilbot is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Unoporuno. If not, see <http://www.gnu.org/licenses/>.
import tweepy
import time
import urllib3
#from time import sleep
import logging
#Create variables for each key, secret, token
consumer_key = 'PzwW4qupC1qy3562g2wtCfMV1'
consumer_secret = 'wCivK4jYECDUbJCQBn9294EtGsYAgk2Z5V8wyLFM31tNjqNypW'
access_token = '869611298547421184-PfE7owxRC1RLC7bgzp14jdjvCcyjpyV'
access_token_secret = 'aU4r2FCuZtU5d6tR1LbfAbXb0r7MlcejbEso7OlSax7jI'
#Write a tweet to push to our @poesiapueril account
#tweet = 'hola\nsoy\nun\nbot\nde\n#poesíapueril'
#api.update_status(status=tweet)
logging.basicConfig(level=logging.INFO)
logging.info("Starting execution of poesiapuerilbot on " + time.asctime())
#todo: repeated_tweet check must be done with a database
repeated_tweets = []
while True:
#Setup OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
tweets = []
try:
for tweet in tweepy.Cursor(api.search, q='#poesíapueril').items():
tweets.append(tweet)
for tweet in tweepy.Cursor(api.search, q='"poesía pueril"').items():
tweets.append(tweet)
logging.info("Trying to retweet a list of " + str(len(tweets)) + " tweets")
for tweet in tweets:
try:
#Add \n escape character to print() to organize tweets
logging.info('\Tweet by @' + tweet.user.screen_name)
#Retweets tweets as they are found if different from @poesiapueril
if tweet.user.screen_name=="poesiapueril":
logging.info('My tweet: excluded')
elif tweet in repeated_tweets:
logging.info('I already tweeted that one')
else:
tweet.retweet()
logging.info('Retweeted the tweet:ready to sleep')
time.sleep(1800)
except tweepy.TweepError as e:
logging.error(str(e.reason))
if e.args[0][0]['code']==185:
logging.info('Sleeping over a 185 error: User is over daily status limit')
#Catching 'User is over daily status limit' error: we will wait
time.sleep(1800)
elif e.args[0][0]['code']==327:
repeated_tweets.append(tweet)
logging.error('Repeated tweet')
#end of the for: sleep 30min
time.sleep(1800)
except urllib3.exceptions.ProtocolError as err:
logging.error(str(err))
except ConnectionResetError as err:
logging.error(str(err))
except OSError as err:
logging.error(str(err))
| gpl-3.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.2/tests/unit/beacons/adb_beacon_test.py | 2 | 12498 | # coding: utf-8
# Python libs
from __future__ import absolute_import
# Salt libs
from salt.beacons import adb
# Salt testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch, Mock
# Globals
adb.__salt__ = {}
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ADBBeaconTestCase(TestCase):
'''
Test case for salt.beacons.adb
'''
def setUp(self):
adb.last_state = {}
adb.last_state_extra = {'no_devices': False}
def test_no_adb_command(self):
with patch('salt.utils.which') as mock:
mock.return_value = None
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertFalse(ret)
def test_with_adb_command(self):
with patch('salt.utils.which') as mock:
mock.return_value = '/usr/bin/adb'
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertEqual(ret, 'adb')
def test_non_dict_config(self):
config = []
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Configuration for adb beacon must be a dict.')
def test_empty_config(self):
config = {}
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Configuration for adb beacon must include a states array.')
def test_invalid_states(self):
config = {'states': ['Random', 'Failings']}
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Need a one of the following adb states:'
' offline, bootloader, device, host, recovery, '
'no permissions, sideload, unauthorized, unknown, missing')
def test_device_state(self):
config = {'states': ['device']}
mock = Mock(return_value='List of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_state_change(self):
config = {'states': ['offline']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'offline', 'tag': 'offline'}])
def test_multiple_devices(self):
config = {'states': ['offline', 'device']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline\nNexus\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [
{'device': 'HTC', 'state': 'offline', 'tag': 'offline'},
{'device': 'Nexus', 'state': 'device', 'tag': 'device'}
])
def test_no_devices_with_different_states(self):
config = {'states': ['offline'], 'no_devices_event': True}
mock = Mock(return_value='List of devices attached\nHTC\tdevice')
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_no_devices_no_repeat(self):
config = {'states': ['offline', 'device'], 'no_devices_event': True}
out = [
'List of devices attached',
'List of devices attached'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'tag': 'no_devices'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_missing(self):
config = {'states': ['device', 'missing']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached',
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'missing', 'tag': 'missing'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_with_startup(self):
config = {'states': ['device']}
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_with_user(self):
config = {'states': ['device'], 'user': 'fred'}
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
mock.assert_called_once_with('adb devices', runas='fred')
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_low_battery(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_no_repeat(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_capacity_increase(self):
config = {'states': ['device'], 'battery_low': 75}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'30'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_with_not_found_state(self):
config = {'states': ['offline'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_battery_charged(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'100',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_low_battery_equal(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_battery_not_found(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'/system/bin/sh: cat: /sys/class/power_supply/*/capacity: No such file or directory',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_repeat_multi(self):
config = {'states': ['offline'], 'battery_low': 35}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'40',
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'80'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_weird_batteries(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'-9000',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_multiple_batteries(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25\n40',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_multiple_low_batteries(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25\n14',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
| apache-2.0 |
NCAR/mizuRoute | manage_externals/test/doc/conf.py | 1 | 5279 | # -*- coding: utf-8 -*-
#
# Manage Externals documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 29 10:53:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Manage Externals'
copyright = u'2017, CSEG at NCAR'
author = u'CSEG at NCAR'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManageExternalsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ManageExternals.tex', u'Manage Externals Documentation',
u'CSEG at NCAR', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'manageexternals', u'Manage Externals Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ManageExternals', u'Manage Externals Documentation',
author, 'ManageExternals', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 |
davgibbs/django | tests/get_object_or_404/models.py | 409 | 1133 | """
DB-API Shortcuts
``get_object_or_404()`` is a shortcut function to be used in view functions for
performing a ``get()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``get()`` call.
``get_list_or_404()`` is a shortcut function to be used in view functions for
performing a ``filter()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``filter()`` call.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ArticleManager(models.Manager):
def get_queryset(self):
return super(ArticleManager, self).get_queryset().filter(authors__name__icontains='sir')
@python_2_unicode_compatible
class Article(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
def __str__(self):
return self.title
| bsd-3-clause |
c7zero/chipsec | chipsec/modules/common/secureboot/variables.py | 9 | 7895 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
"""
`UEFI 2.4 spec Section 28 <http://uefi.org/>`_
Verify that all Secure Boot key/whitelist/blacklist UEFI variables are authenticated (BS+RT+AT)
and protected from unauthorized modification.
Use '-a modify' option for the module to also try to write/corrupt the variables.
"""
from chipsec.module_common import *
import chipsec.file
from chipsec.hal.uefi import *
# ############################################################
# SPECIFY PLATFORMS THIS MODULE IS APPLICABLE TO
# ############################################################
_MODULE_NAME = 'variables'
TAGS = [MTAG_SECUREBOOT]
class variables(BaseModule):
def __init__(self):
BaseModule.__init__(self)
self._uefi = UEFI( self.cs )
def is_supported( self ):
supported = self.cs.helper.EFI_supported()
if not supported: self.logger.log_skipped_check( "OS does not support UEFI Runtime API" )
return supported
def can_modify( self, name, guid, data, attrs ):
self.logger.log( " > attempting to modify variable %s:%s" % (guid,name) )
datalen = len(data)
#print_buffer( data )
baddata = chr( ord(data[0]) ^ 0xFF ) + data[1:]
#if datalen > 1: baddata = baddata[:datalen-1] + chr( ord(baddata[datalen-1]) ^ 0xFF )
status = self._uefi.set_EFI_variable( name, guid, baddata )
if StatusCode.EFI_SUCCESS != status: self.logger.log( ' < modification of %s returned error 0x%X' % (name,status) )
else: self.logger.log( ' < modification of %s returned succees' % name )
self.logger.log( ' > checking variable %s contents after modification..' % name )
newdata = self._uefi.get_EFI_variable( name, guid )
#print_buffer( newdata )
#chipsec.file.write_file( name+'_'+guid+'.bin', data )
#chipsec.file.write_file( name+'_'+guid+'.bin.bad', baddata )
#chipsec.file.write_file( name+'_'+guid+'.bin.new', newdata )
_changed = (data != newdata)
if _changed:
self.logger.log_bad( "EFI variable %s has been modified. Restoring original contents.." % name )
self._uefi.set_EFI_variable( name, guid, data )
# checking if restored correctly
restoreddata = self._uefi.get_EFI_variable( name, guid )
#print_buffer( restoreddata )
if (restoreddata != data): self.logger.error( "Failed to restore contents of variable %s failed!" % name )
else: self.logger.log( " contents of variable %s have been restored" % name )
else:
self.logger.log_good( "Could not modify UEFI variable %s:%s" % (guid,name) )
return _changed
## check_secureboot_variable_attributes
# checks authentication attributes of Secure Boot EFI variables
def check_secureboot_variable_attributes( self, do_modify ):
res = ModuleResult.ERROR
not_found = 0
not_auth = 0
not_wp = 0
is_secureboot_enabled = False
sbvars = self._uefi.list_EFI_variables()
if sbvars is None:
self.logger.log_error_check( 'Could not enumerate UEFI variables (non-UEFI OS?)' )
return ModuleResult.ERROR
for name in SECURE_BOOT_VARIABLES:
if name in sbvars.keys() and sbvars[name] is not None:
if len(sbvars[name]) > 1:
self.logger.log_failed_check( 'There should only be one instance of variable %s' % name )
return ModuleResult.FAILED
for (off, buf, hdr, data, guid, attrs) in sbvars[name]:
self.logger.log( "[*] Checking protections of UEFI variable %s:%s" % (guid,name) )
# check the status of Secure Boot
if EFI_VAR_NAME_SecureBoot == name:
is_secureboot_enabled = (data is not None and len(data) == 1 and ord(data) == 0x1)
#
# Verify if the Secure Boot key/database variable is authenticated
#
if name in SECURE_BOOT_KEY_VARIABLES:
if IS_VARIABLE_ATTRIBUTE( attrs, EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS ):
self.logger.log_good( 'Variable %s:%s is authenticated (AUTHENTICATED_WRITE_ACCESS)' % (guid,name) )
elif IS_VARIABLE_ATTRIBUTE( attrs, EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS ):
self.logger.log_good( 'Variable %s:%s is authenticated (TIME_BASED_AUTHENTICATED_WRITE_ACCESS)' % (guid,name) )
else:
not_auth += 1
self.logger.log_bad( 'Variable %s:%s is not authenticated' % (guid,name) )
#
# Attempt to modify contents of the variables
#
if do_modify:
if self.can_modify( name, guid, data, attrs ): not_wp += 1
else:
not_found += 1
self.logger.log_important( 'Secure Boot variable %s is not found' % name )
continue
self.logger.log( '' )
self.logger.log( '[*] Secure Boot appears to be %sabled' % ('en' if is_secureboot_enabled else 'dis') )
if len(SECURE_BOOT_VARIABLES) == not_found:
# None of Secure Boot variables were not found
self.logger.log_skipped_check( 'None of required Secure Boot variables found. Secure Boot is not enabled' )
return ModuleResult.SKIPPED
else:
# Some Secure Boot variables exist
sb_vars_failed = (not_found > 0) or (not_auth > 0) or (not_wp > 0)
if sb_vars_failed:
if not_found > 0: self.logger.log_bad( "Some required Secure Boot variables are missing" )
if not_auth > 0: self.logger.log_bad( 'Some Secure Boot keying variables are not authenticated' )
if not_wp > 0: self.logger.log_bad( 'Some Secure Boot variables can be modified' )
if is_secureboot_enabled:
self.logger.log_failed_check( 'Not all Secure Boot UEFI variables are protected' )
return ModuleResult.FAILED
else:
self.logger.log_warn_check( 'Not all Secure Boot UEFI variables are protected' )
return ModuleResult.WARNING
else:
self.logger.log_passed_check( 'All Secure Boot UEFI variables are protected' )
return ModuleResult.PASSED
# --------------------------------------------------------------------------
# run( module_argv )
# Required function: run here all tests from this module
# --------------------------------------------------------------------------
def run( self, module_argv ):
self.logger.start_test( "Attributes of Secure Boot EFI Variables" )
do_modify = (len(module_argv) > 0 and module_argv[0] == OPT_MODIFY)
return self.check_secureboot_variable_attributes( do_modify )
| gpl-2.0 |
sebfung/yellowpillowcase | vendor/cache/gems/pygments.rb-0.6.3/vendor/pygments-main/setup.py | 36 | 2931 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image \
formats that PIL supports and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/default.zip#egg=Pygments-dev
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
try:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
from distutils.core import setup
def find_packages(*args, **kwargs):
return [
'pygments',
'pygments.lexers',
'pygments.formatters',
'pygments.styles',
'pygments.filters',
]
have_setuptools = False
if have_setuptools:
add_keywords = dict(
entry_points = {
'console_scripts': ['pygmentize = pygments.cmdline:main'],
},
)
else:
add_keywords = dict(
scripts = ['pygmentize'],
)
setup(
name = 'Pygments',
version = '2.0pre',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
author_email = '[email protected]',
description = 'Pygments is a syntax highlighting package written in Python.',
long_description = __doc__,
keywords = 'syntax highlighting',
packages = find_packages(exclude=['ez_setup']),
platforms = 'any',
zip_safe = False,
include_package_data = True,
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Development Status :: 6 - Mature',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
**add_keywords
)
| mit |
munyirik/python | cpython/Lib/test/test_threading.py | 2 | 39155 | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Dino0631/RedRain-Bot | lib/youtube_dl/extractor/alphaporno.py | 64 | 2724 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
parse_duration,
parse_filesize,
int_or_none,
)
class AlphaPornoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?alphaporno\.com/videos/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.alphaporno.com/videos/sensual-striptease-porn-with-samantha-alexandra/',
'md5': 'feb6d3bba8848cd54467a87ad34bd38e',
'info_dict': {
'id': '258807',
'display_id': 'sensual-striptease-porn-with-samantha-alexandra',
'ext': 'mp4',
'title': 'Sensual striptease porn with Samantha Alexandra',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1418694611,
'upload_date': '20141216',
'duration': 387,
'filesize_approx': 54120000,
'tbr': 1145,
'categories': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r"video_id\s*:\s*'([^']+)'", webpage, 'video id', default=None)
video_url = self._search_regex(
r"video_url\s*:\s*'([^']+)'", webpage, 'video url')
ext = self._html_search_meta(
'encodingFormat', webpage, 'ext', default='.mp4')[1:]
title = self._search_regex(
[r'<meta content="([^"]+)" itemprop="description">',
r'class="title" itemprop="name">([^<]+)<'],
webpage, 'title')
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail')
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration'))
filesize_approx = parse_filesize(self._html_search_meta(
'contentSize', webpage, 'file size'))
bitrate = int_or_none(self._html_search_meta(
'bitrate', webpage, 'bitrate'))
categories = self._html_search_meta(
'keywords', webpage, 'categories', default='').split(',')
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'ext': ext,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'filesize_approx': filesize_approx,
'tbr': bitrate,
'categories': categories,
'age_limit': age_limit,
}
| gpl-3.0 |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/miomio.py | 61 | 5068 | # coding: utf-8
from __future__ import unicode_literals
import random
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
xpath_text,
int_or_none,
ExtractorError,
sanitized_Request,
)
class MioMioIE(InfoExtractor):
IE_NAME = 'miomio.tv'
_VALID_URL = r'https?://(?:www\.)?miomio\.tv/watch/cc(?P<id>[0-9]+)'
_TESTS = [{
# "type=video" in flashvars
'url': 'http://www.miomio.tv/watch/cc88912/',
'info_dict': {
'id': '88912',
'ext': 'flv',
'title': '【SKY】字幕 铠武昭和VS平成 假面骑士大战FEAT战队 魔星字幕组 字幕',
'duration': 5923,
},
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc184024/',
'info_dict': {
'id': '43729',
'title': '《动漫同人插画绘制》',
},
'playlist_mincount': 86,
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc173113/',
'info_dict': {
'id': '173113',
'title': 'The New Macbook 2015 上手试玩与简评'
},
'playlist_mincount': 2,
'skip': 'Unable to load videos',
}, {
# new 'h5' player
'url': 'http://www.miomio.tv/watch/cc273997/',
'md5': '0b27a4b4495055d826813f8c3a6b2070',
'info_dict': {
'id': '273997',
'ext': 'mp4',
'title': 'マツコの知らない世界【劇的進化SP!ビニール傘&冷凍食品2016】 1_2 - 16 05 31',
},
'skip': 'Unable to load videos',
}]
def _extract_mioplayer(self, webpage, video_id, title, http_headers):
xml_config = self._search_regex(
r'flashvars="type=(?:sina|video)&(.+?)&',
webpage, 'xml config')
# skipping the following page causes lags and eventually connection drop-outs
self._request_webpage(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
video_id)
vid_config_request = sanitized_Request(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
headers=http_headers)
# the following xml contains the actual configuration information on the video file(s)
vid_config = self._download_xml(vid_config_request, video_id)
if not int_or_none(xpath_text(vid_config, 'timelength')):
raise ExtractorError('Unable to load videos!', expected=True)
entries = []
for f in vid_config.findall('./durl'):
segment_url = xpath_text(f, 'url', 'video url')
if not segment_url:
continue
order = xpath_text(f, 'order', 'order')
segment_id = video_id
segment_title = title
if order:
segment_id += '-%s' % order
segment_title += ' part %s' % order
entries.append({
'id': segment_id,
'url': segment_url,
'title': segment_title,
'duration': int_or_none(xpath_text(f, 'length', 'duration'), 1000),
'http_headers': http_headers,
})
return entries
def _download_chinese_webpage(self, *args, **kwargs):
# Requests with English locales return garbage
headers = {
'Accept-Language': 'zh-TW,en-US;q=0.7,en;q=0.3',
}
kwargs.setdefault('headers', {}).update(headers)
return self._download_webpage(*args, **kwargs)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_chinese_webpage(
url, video_id)
title = self._html_search_meta(
'description', webpage, 'title', fatal=True)
mioplayer_path = self._search_regex(
r'src="(/mioplayer(?:_h5)?/[^"]+)"', webpage, 'ref_path')
if '_h5' in mioplayer_path:
player_url = compat_urlparse.urljoin(url, mioplayer_path)
player_webpage = self._download_chinese_webpage(
player_url, video_id,
note='Downloading player webpage', headers={'Referer': url})
entries = self._parse_html5_media_entries(player_url, player_webpage, video_id)
http_headers = {'Referer': player_url}
else:
http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path}
entries = self._extract_mioplayer(webpage, video_id, title, http_headers)
if len(entries) == 1:
segment = entries[0]
segment['id'] = video_id
segment['title'] = title
segment['http_headers'] = http_headers
return segment
return {
'_type': 'multi_video',
'id': video_id,
'entries': entries,
'title': title,
'http_headers': http_headers,
}
| gpl-3.0 |
40123242/cdw11-bg3 | static/plugin/liquid_tags/diag.py | 270 | 4096 | """
Blockdiag Tag
---------
This tag implements a liquid style tag for blockdiag [1]. You can use different
diagram types like blockdiag, seqdiag, packetdiag etc. [1]
[1] http://blockdiag.com/en/blockdiag/
Syntax
------
{% blockdiag {
<diagramm type> {
<CODE>
}
}
%}
Examples
--------
{% blockdiag {
blockdiag {
A -> B -> C;
B -> D;
}
}
%}
{% blockdiag {
actdiag {
A -> B -> C -> D -> E;
lane {
A; C; E;
}
lane {
B; D;
}
}
}
%}
{% blockdiag {
packetdiag {
0-7: Source Port
8-15: Destination Port
16-31: Sequence Number
32-47: Acknowledgment Number
}
}
%}
...
Output
------
<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import io
import os
import sys
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% blockdiag [diagram type] [code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<diagram>\w+).*$', re.MULTILINE | re.DOTALL)
_draw_mode = 'PNG'
_publish_mode = 'PNG'
def get_diag(code, command):
""" Generate diagramm and return data """
import tempfile
import shutil
code = code + u'\n'
try:
tmpdir = tempfile.mkdtemp()
fd, diag_name = tempfile.mkstemp(dir=tmpdir)
f = os.fdopen(fd, "w")
f.write(code.encode('utf-8'))
f.close()
format = _draw_mode.lower()
draw_name = diag_name + '.' + format
saved_argv = sys.argv
argv = [diag_name, '-T', format, '-o', draw_name]
if _draw_mode == 'SVG':
argv += ['--ignore-pil']
# Run command
command.main(argv)
# Read image data from file
file_name = diag_name + '.' + _publish_mode.lower()
with io.open(file_name, 'rb') as f:
data = f.read()
f.close()
finally:
for file in os.listdir(tmpdir):
os.unlink(tmpdir + "/" + file)
# os.rmdir will fail -> use shutil
shutil.rmtree(tmpdir)
return data
def diag(code, command):
if command == "blockdiag": # blockdiag
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "diagram": # diagram
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "seqdiag": # seqdiag
import seqdiag.command
return get_diag(code, seqdiag.command)
elif command == "actdiag": # actdiag
import actdiag.command
return get_diag(code, actdiag.command)
elif command == "nwdiag": # nwdiag
import nwdiag.command
return get_diag(code, nwdiag.command)
elif command == "packetdiag": # packetdiag
import packetdiag.command
return get_diag(code, packetdiag.command)
elif command == "rackdiag": # racketdiag
import rackdiag.command
return get_diag(code, rackdiag.command)
else: # not found
print("No such command %s" % command)
return None
@LiquidTags.register("blockdiag")
def blockdiag_parser(preprocessor, tag, markup):
""" Blockdiag parser """
m = DOT_BLOCK_RE.search(markup)
if m:
# Get diagram type and code
diagram = m.group('diagram').strip()
code = markup
# Run command
output = diag(code, diagram)
if output:
# Return Base64 encoded image
return '<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
beezee/GAE-Django-site | django/contrib/gis/geos/tests/test_geos_mutation.py | 245 | 5464 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import django.utils.copycompat as copy
from django.contrib.gis.geos import *
from django.contrib.gis.geos.error import GEOSIndexError
from django.utils import unittest
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, map(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSMutationTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| bsd-3-clause |
notriddle/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_terminal.py | 30 | 40391 | """
terminal reporting of the full testing process.
"""
from __future__ import absolute_import, division, print_function
import collections
import sys
import pluggy
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
from _pytest.terminal import build_summary_stats_line, _plugin_nameversions
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
class Option(object):
def __init__(self, verbose=False, fulltrace=False):
self.verbose = verbose
self.fulltrace = fulltrace
@property
def args(self):
values = []
if self.verbose:
values.append("-v")
if self.fulltrace:
values.append("--fulltrace")
return values
@pytest.fixture(
params=[
Option(verbose=False),
Option(verbose=True),
Option(verbose=-1),
Option(fulltrace=True),
],
ids=["default", "verbose", "quiet", "fulltrace"],
)
def option(request):
return request.param
@pytest.mark.parametrize(
"input,expected",
[
([DistInfo(project_name="test", version=1)], ["test-1"]),
([DistInfo(project_name="pytest-test", version=1)], ["test-1"]),
(
[
DistInfo(project_name="test", version=1),
DistInfo(project_name="test", version=1),
],
["test-1"],
),
],
ids=["normal", "prefix-strip", "deduplicate"],
)
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal(object):
def test_pass_skip_fail(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
"""
)
result = testdir.runpytest(*option.args)
if option.verbose:
result.stdout.fnmatch_lines(
[
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
]
)
else:
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
result.stdout.fnmatch_lines(
[" def test_func():", "> assert 0", "E assert 0"]
)
def test_internalerror(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
excinfo = pytest.raises(ValueError, "raise ValueError('hello')")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def test_writeline(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split("\n")
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, testdir, linecomp):
item = testdir.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(
nodeid=item.nodeid, location=location, fspath=str(item.fspath)
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
def test_runtest_location_shown_before_test_starts(self, testdir):
testdir.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
child = testdir.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_itemreport_subclasses_show_subclassed_file(self, testdir):
testdir.makepyfile(
test_p1="""
class BaseTests(object):
def test_p1(self):
pass
class TestClass(BaseTests):
pass
"""
)
p2 = testdir.makepyfile(
test_p2="""
from test_p1 import BaseTests
class TestMore(BaseTests):
pass
"""
)
result = testdir.runpytest(p2)
result.stdout.fnmatch_lines(["*test_p2.py .*", "*1 passed*"])
result = testdir.runpytest("-v", p2)
result.stdout.fnmatch_lines(
["*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*"]
)
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
a = testdir.mkpydir("a123")
a.join("test_hello123.py").write(
_pytest._code.Source(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
)
result = testdir.runpytest("-v")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
assert " <- " not in result.stdout.str()
def test_keyboard_interrupt(self, testdir, option):
testdir.makepyfile(
"""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
"""
)
result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
if option.fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
else:
result.stdout.fnmatch_lines(
["(to show a full traceback on KeyboardInterrupt use --fulltrace)"]
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_keyboard_in_sessionstart(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_collect_single_item(self, testdir):
"""Use singular 'item' when reporting a single test item"""
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
def test_rewrite(self, testdir, monkeypatch):
config = testdir.parseconfig()
f = py.io.TextIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
tr._tw.fullwidth = 10
tr.write("hello")
tr.rewrite("hey", erase=True)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
class TestCollectonly(object):
def test_collectonly_basic(self, testdir):
testdir.makepyfile(
"""
def test_func():
pass
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module 'test_collectonly_basic.py'>", " <Function 'test_func'>"]
)
def test_collectonly_skipped_module(self, testdir):
testdir.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
result = testdir.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_failed_module(self, testdir):
testdir.makepyfile("""raise ValueError(0)""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
def test_collectonly_fatal(self, testdir):
testdir.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
def test_collectonly_simple(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest("--collect-only", p)
# assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*<Module '*.py'>",
"* <Function 'test_func1'*>",
"* <Class 'TestClass'>",
# "* <Instance '()'>",
"* <Function 'test_method'*>",
]
)
def test_collectonly_error(self, testdir):
p = testdir.makepyfile("import Errlkjqweqwe")
result = testdir.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
_pytest._code.Source(
"""
*ERROR*
*ImportError*
*No module named *Errlk*
*1 error*
"""
).strip()
)
def test_collectonly_missing_path(self, testdir):
"""this checks issue 115,
failure in parseargs will cause session
not to have the items attribute
"""
result = testdir.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(["*ERROR: file not found*"])
def test_collectonly_quiet(self, testdir):
testdir.makepyfile("def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
def test_collectonly_more_quiet(self, testdir):
testdir.makepyfile(test_fun="def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
def test_repr_python_version(monkeypatch):
try:
monkeypatch.setattr(sys, "version_info", (2, 5, 1, "final", 0))
assert repr_pythonversion() == "2.5.1-final-0"
sys.version_info = x = (2, 3)
assert repr_pythonversion() == str(x)
finally:
monkeypatch.undo() # do this early as pytest can get confused
class TestFixtureReporting(object):
def test_setup_fixture_error(self, testdir):
testdir.makepyfile(
"""
def setup_function(function):
print ("setup func")
assert 0
def test_nada():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, testdir):
testdir.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print ("teardown func")
assert 0
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, testdir):
testdir.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print ("teardown func")
assert False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, testdir):
""" Test for issue #442 """
testdir.makepyfile(
"""
def setup_function(function):
print ("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print ("teardown func")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
class TestTerminalFunctional(object):
def test_deselected(self, testdir):
testpath = testdir.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = testdir.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected", "*test_deselected.py ..*"]
)
assert result.ret == 0
def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.foo
def test_foobar():
pass
@pytest.mark.bar
def test_bar():
pass
def test_pass():
pass
"""
)
result = testdir.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected",
"*test_show_des*.py ..*",
"*= 2 passed, 1 deselected in * =*",
]
)
assert "= 1 deselected =" not in result.stdout.str()
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("dontshow")
"""
)
result = testdir.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, testdir):
p1 = testdir.makepyfile(
"""
def test_passes():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
old = p1.dirpath().chdir()
try:
result = testdir.runpytest()
finally:
old.chdir()
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
def test_header_trailer_info(self, testdir):
testdir.makepyfile(
"""
def test_passes():
pass
"""
)
result = testdir.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
"*===== test session starts ====*",
"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
py.__version__,
pluggy.__version__,
),
"*test_header_trailer_info.py .*",
"=* 1 passed*in *.[0-9][0-9] seconds *=",
]
)
if pytest.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
def test_showlocals(self, testdir):
p1 = testdir.makepyfile(
"""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
"""
)
result = testdir.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
# "_ _ * Locals *",
"x* = 3",
"y* = 'xxxxxx*",
]
)
def test_verbose_reporting(self, testdir, pytestconfig):
p1 = testdir.makepyfile(
"""
import pytest
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass(object):
def test_skip(self):
pytest.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
"""
)
result = testdir.runpytest(p1, "-v")
result.stdout.fnmatch_lines(
[
"*test_verbose_reporting.py::test_fail *FAIL*",
"*test_verbose_reporting.py::test_pass *PASS*",
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
"*test_verbose_reporting.py::test_gen*0* *FAIL*",
]
)
assert result.ret == 1
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
result = testdir.runpytest(p1, "-v", "-n 1")
result.stdout.fnmatch_lines(["*FAIL*test_verbose_reporting.py::test_fail*"])
assert result.ret == 1
def test_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" in s
def test_more_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" not in s
def test_report_collectionfinish_hook(self, testdir):
testdir.makeconftest(
"""
def pytest_report_collectionfinish(config, startdir, items):
return ['hello from hook: {0} items'.format(len(items))]
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
def test(i):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
def test_fail_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result = testdir.runpytest("-rf")
result.stdout.fnmatch_lines(["*test summary*", "FAIL*test_fail_extra_reporting*"])
def test_fail_reporting_on_pass(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("-rf")
assert "short test summary" not in result.stdout.str()
def test_pass_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result = testdir.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
def test_pass_reporting_on_fail(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest("-rp")
assert "short test summary" not in result.stdout.str()
def test_pass_output_reporting(testdir):
testdir.makepyfile(
"""
def test_pass_output():
print("Four score and seven years ago...")
"""
)
result = testdir.runpytest()
assert "Four score and seven years ago..." not in result.stdout.str()
result = testdir.runpytest("-rP")
result.stdout.fnmatch_lines(["Four score and seven years ago..."])
def test_color_yes(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=yes")
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
def test_color_no(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" not in result.stdout.str()
@pytest.mark.parametrize("verbose", [True, False])
def test_color_yes_collection_on_non_atty(testdir, verbose):
"""skip collect progress report when working on non-terminals.
#1397
"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes"]
if verbose:
args.append("-vv")
result = testdir.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
assert "collecting 10 items" not in result.stdout.str()
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
def test_getreportopt():
class Config(object):
class Option(object):
reportchars = ""
disable_warnings = True
option = Option()
config = Config()
config.option.reportchars = "sf"
assert getreportopt(config) == "sf"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfx"
config.option.reportchars = "sfx"
config.option.disable_warnings = False
assert getreportopt(config) == "sfxw"
config.option.reportchars = "sfxw"
config.option.disable_warnings = False
assert getreportopt(config) == "sfxw"
def test_terminalreporter_reportopt_addopts(testdir):
testdir.makeini("[pytest]\naddopts=-rs")
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def tr(request):
tr = request.config.pluginmanager.getplugin("terminalreporter")
return tr
def test_opt(tr):
assert tr.hasopt('skipped')
assert not tr.hasopt('qwe')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_tbstyle_short(testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
return 42
def test_opt(arg):
x = 0
assert x
"""
)
result = testdir.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
result.stdout.fnmatch_lines(["*%s:8*" % p.basename, " assert x", "E assert*"])
result = testdir.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
def test_traceconfig(testdir, monkeypatch):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == EXIT_NOTESTSCOLLECTED
class TestGenericReporting(object):
""" this test class can be subclassed with a different option
provider to run e.g. distributed tests.
"""
def test_collect_fail(self, testdir, option):
testdir.makepyfile("import xyz\n")
result = testdir.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
def test_maxfailures(self, testdir, option):
testdir.makepyfile(
"""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
"""
)
result = testdir.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
["*def test_1():*", "*def test_2():*", "*2 failed*"]
)
def test_tb_option(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func():
print (6*7)
g() # --calling--
"""
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
result = testdir.runpytest("--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print (6*7)" in s
else:
assert "print (6*7)" not in s
if tbopt != "no":
assert "--calling--" in s
assert "IndexError" in s
else:
assert "FAILURES" not in s
assert "--calling--" not in s
assert "IndexError" not in s
def test_tb_crashline(self, testdir, option):
p = testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func1():
print (6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
"""
)
result = testdir.runpytest("--tb=line")
bn = p.basename
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, testdir, option):
testdir.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
def pytest_report_header(config):
return "hello: %s" % config._somevalue
"""
)
testdir.mkdir("a").join("conftest.py").write(
"""
def pytest_report_header(config, startdir):
return ["line1", str(startdir)]
"""
)
result = testdir.runpytest("a")
result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(testdir.tmpdir)])
def test_show_capture(self, testdir):
testdir.makepyfile(
"""
import sys
import logging
def test_one():
sys.stdout.write('!This is stdout!')
sys.stderr.write('!This is stderr!')
logging.warning('!This is a warning log msg!')
assert False, 'Something failed'
"""
)
result = testdir.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
result = testdir.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
stdout = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
stdout = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(testdir):
testdir.makepyfile(
"""
import os, sys
k = []
def test_open_file_and_keep_alive(capfd):
stdout = os.fdopen(1, 'w', 1)
k.append(stdout)
def test_close_kept_alive_file():
stdout = k.pop()
stdout.close()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_tbstyle_native_setup_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
"""
)
result = testdir.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
def test_terminal_summary(testdir):
testdir.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.section("hello")
w.line("world")
w.line("exitstatus: {0}".format(exitstatus))
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
world
exitstatus: 5
"""
)
def test_terminal_summary_warnings_are_displayed(testdir):
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
testdir.makeconftest(
"""
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
config.warn('C1', 'internal warning')
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
["<undetermined location>", "*internal warning", "*== 1 warnings in *"]
)
assert "None" not in result.stdout.str()
@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
# The method under test only cares about the length of each
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
("red", "1 failed", {"failed": (1,)}),
("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
("red", "1 error", {"error": (1,)}),
("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
# (a status that's not known to the code)
("yellow", "1 weird", {"weird": (1,)}),
("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
("yellow", "1 warnings", {"warnings": (1,)}),
("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
("yellow", "1 skipped", {"skipped": (1,)}),
("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
("yellow", "1 deselected", {"deselected": (1,)}),
("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
("yellow", "1 xfailed", {"xfailed": (1,)}),
("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
("yellow", "1 xpassed", {"xpassed": (1,)}),
("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
# Likewise if no tests were found at all
("yellow", "no tests ran", {}),
# Test the empty-key special case
("yellow", "no tests ran", {"": (1,)}),
("green", "1 passed", {"": (1,), "passed": (1,)}),
# A couple more complex combinations
(
"red",
"1 failed, 2 passed, 3 xfailed",
{"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)},
),
(
"green",
"1 passed, 2 skipped, 3 deselected, 2 xfailed",
{
"passed": (1,),
"skipped": (1, 2),
"deselected": (1, 2, 3),
"xfailed": (1, 2),
},
),
],
)
def test_summary_stats(exp_line, exp_color, stats_arg):
print("Based on stats: %s" % stats_arg)
print('Expect summary: "%s"; with color "%s"' % (exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg)
print('Actually got: "%s"; with color "%s"' % (line, color))
assert line == exp_line
assert color == exp_color
def test_no_trailing_whitespace_after_inifile_word(testdir):
result = testdir.runpytest("")
assert "inifile:\n" in result.stdout.str()
testdir.makeini("[pytest]")
result = testdir.runpytest("")
assert "inifile: tox.ini\n" in result.stdout.str()
class TestProgress(object):
@pytest.fixture
def many_tests_files(self, testdir):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): pass
""",
)
def test_zero_tests_collected(self, testdir):
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
testdir.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
for node_id in ('nodeid1', 'nodeid2'):
rep = CollectReport(node_id, 'passed', None, None)
rep.when = 'passed'
rep.duration = 0.1
config.hook.pytest_runtest_logreport(report=rep)
"""
)
output = testdir.runpytest()
assert "ZeroDivisionError" not in output.stdout.str()
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, testdir):
output = testdir.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
r"test_foo.py \.{5} \s+ \[ 75%\]",
r"test_foobar.py \.{5} \s+ \[100%\]",
]
)
def test_verbose(self, many_tests_files, testdir):
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]",
]
)
def test_xdist_normal(self, many_tests_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
def test_xdist_verbose(self, many_tests_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]",
]
)
def test_capture_no(self, many_tests_files, testdir):
output = testdir.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
output = testdir.runpytest("--capture=no")
assert "%]" not in output.stdout.str()
class TestProgressWithTeardown(object):
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, testdir, contest_with_teardown_fixture
):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
assert False
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines(
[r"test_teardown_with_test_also_failing.py FE\s+\[100%\]"]
)
def test_teardown_many(self, testdir, many_files):
output = testdir.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(self, testdir, many_files):
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED\s+\[ 5%\]",
r"test_bar.py::test_bar\[0\] ERROR\s+\[ 5%\]",
r"test_bar.py::test_bar\[4\] PASSED\s+\[ 25%\]",
r"test_bar.py::test_bar\[4\] ERROR\s+\[ 25%\]",
]
)
def test_xdist_normal(self, many_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
| mpl-2.0 |
dago/ansible-modules-extras | notification/nexmo.py | 153 | 3819 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: nexmo
short_description: Send a SMS via nexmo
description:
- Send a SMS message via nexmo
version_added: 1.6
author: "Matt Martz (@sivel)"
options:
api_key:
description:
- Nexmo API Key
required: true
api_secret:
description:
- Nexmo API Secret
required: true
src:
description:
- Nexmo Number to send from
required: true
dest:
description:
- Phone number(s) to send SMS message to
required: true
msg:
description:
- Message to text to send. Messages longer than 160 characters will be
split into multiple messages
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
"""
EXAMPLES = """
- name: Send notification message via Nexmo
local_action:
module: nexmo
api_key: 640c8a53
api_secret: 0ce239a6
src: 12345678901
dest:
- 10987654321
- 16789012345
msg: "{{ inventory_hostname }} completed"
"""
import urllib
NEXMO_API = 'https://rest.nexmo.com/sms/json'
def send_msg(module):
failed = list()
responses = dict()
msg = {
'api_key': module.params.get('api_key'),
'api_secret': module.params.get('api_secret'),
'from': module.params.get('src'),
'text': module.params.get('msg')
}
for number in module.params.get('dest'):
msg['to'] = number
url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg))
headers = dict(Accept='application/json')
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
failed.append(number)
responses[number] = dict(failed=True)
try:
responses[number] = json.load(response)
except:
failed.append(number)
responses[number] = dict(failed=True)
else:
for message in responses[number]['messages']:
if int(message['status']) != 0:
failed.append(number)
responses[number] = dict(failed=True, **responses[number])
if failed:
msg = 'One or messages failed to send'
else:
msg = ''
module.exit_json(failed=bool(failed), msg=msg, changed=False,
responses=responses)
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dict(
api_key=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
src=dict(required=True, type='int'),
dest=dict(required=True, type='list'),
msg=dict(required=True),
),
)
module = AnsibleModule(
argument_spec=argument_spec
)
send_msg(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
camilonova/sentry | src/sentry/web/forms/__init__.py | 15 | 3194 | """
sentry.web.forms
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.constants import HTTP_METHODS
from sentry.models import User, Activity
from sentry.web.forms.fields import RadioFieldRenderer, ReadOnlyTextField
class ReplayForm(forms.Form):
url = forms.URLField(widget=forms.TextInput(attrs={'class': 'span8'}))
method = forms.ChoiceField(choices=((k, k) for k in HTTP_METHODS))
data = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'span8'}))
headers = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'span8'}))
def clean_headers(self):
value = self.cleaned_data.get('headers')
if not value:
return
return dict(line.split(': ', 1) for line in value.splitlines())
class BaseUserForm(forms.ModelForm):
email = forms.EmailField()
first_name = forms.CharField(required=True, label=_('Name'))
class NewUserForm(BaseUserForm):
create_project = forms.BooleanField(required=False,
help_text=_("Create a project for this user."))
send_welcome_mail = forms.BooleanField(required=False,
help_text=_("Send this user a welcome email which will contain their generated password."))
class Meta:
fields = ('first_name', 'username', 'email')
model = User
class ChangeUserForm(BaseUserForm):
is_staff = forms.BooleanField(required=False, label=_('Admin'),
help_text=_("Designates whether this user can perform administrative functions."))
is_superuser = forms.BooleanField(required=False, label=_('Superuser'),
help_text=_('Designates whether this user has all permissions without '
'explicitly assigning them.'))
class Meta:
fields = ('first_name', 'username', 'email', 'is_active', 'is_staff',
'is_superuser')
model = User
def __init__(self, *args, **kwargs):
super(ChangeUserForm, self).__init__(*args, **kwargs)
self.user = kwargs['instance']
if self.user.is_managed:
self.fields['username'] = ReadOnlyTextField(label="Username (managed)")
def clean_username(self):
if self.user.is_managed:
return self.user.username
return self.cleaned_data['username']
class RemoveUserForm(forms.Form):
removal_type = forms.ChoiceField(choices=(
('1', _('Disable the account.')),
('2', _('Permanently remove the user and their data.')),
), widget=forms.RadioSelect(renderer=RadioFieldRenderer))
class TestEmailForm(forms.Form):
pass
class NewNoteForm(forms.Form):
text = forms.CharField(widget=forms.Textarea(attrs={'class': 'span8'}))
def save(self, event, user):
activity = Activity.objects.create(
group=event.group, event=event, project=event.project,
type=Activity.NOTE, user=user,
data=self.cleaned_data
)
activity.send_notification()
return activity
| bsd-3-clause |
mrgloom/DeepCL | cog-batteries/cog_fluent.py | 6 | 4556 | # Copyright Hugh Perkins 2014,2015 hughperkins at gmail
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import cog
def go(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int ' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl('float ' + thisfloat + ' = 0;')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisintTitlecase + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloatTitlecase + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def go1b(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int ' + thisint + ';')
for thisfloat in floats:
cog.outl('float ' + thisfloat + ';')
cog.outl(classname + '() {')
for thisint in ints:
cog.outl(' ' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl(' ' + thisfloat + ' = 0;')
cog.outl('}')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisintTitlecase + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloatTitlecase + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def gov2(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int _' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl('float _' + thisfloat + ' = 0;')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisint + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->_' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloat + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->_' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def gov3(classname, ints = [], floats = [], strings = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int _' + thisint + ';')
for thisfloat in floats:
cog.outl('float _' + thisfloat + ';')
for thisstring in strings:
cog.outl('std::string _' + thisstring + ';')
cog.outl(classname + '() {')
for thisint in ints:
cog.outl(' _' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl(' _' + thisfloat + ' = 0;')
for thisstring in strings:
cog.outl(' _' + thisstring + ' = "";')
cog.outl('}')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisint + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->_' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloat + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->_' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisstring in strings:
thisstringTitlecase = thisstring[0].upper() + thisstring[1:]
cog.outl(classname + ' ' + thisstring + '( std::string ' + '_' + thisstring + ' ) {')
cog.outl(' this->_' + thisstring + ' = _' + thisstring + ';')
cog.outl(' return *this;')
cog.outl('}')
| mpl-2.0 |
gmalmquist/pants | contrib/cpp/src/python/pants/contrib/cpp/targets/cpp_binary.py | 14 | 1101 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.contrib.cpp.targets.cpp_target import CppTarget
class CppBinary(CppTarget):
"""A C++ binary."""
def __init__(self,
libraries=None,
*args,
**kwargs):
"""
:param libraries: Libraries that this target depends on that are not pants targets.
For example, 'm' or 'rt' that are expected to be installed on the local system.
:type libraries: List of libraries to link against.
"""
payload = Payload()
payload.add_fields({
'libraries': PrimitiveField(libraries)
})
super(CppBinary, self).__init__(payload=payload, **kwargs)
@property
def libraries(self):
return self.payload.get_field_value('libraries')
| apache-2.0 |
jhsenjaliya/incubator-airflow | airflow/api/common/experimental/trigger_dag.py | 9 | 1706 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from airflow.exceptions import AirflowException
from airflow.models import DagRun, DagBag
from airflow.utils.state import State
def trigger_dag(dag_id, run_id=None, conf=None, execution_date=None):
dagbag = DagBag()
if dag_id not in dagbag.dags:
raise AirflowException("Dag id {} not found".format(dag_id))
dag = dagbag.get_dag(dag_id)
if not execution_date:
execution_date = datetime.datetime.utcnow()
assert isinstance(execution_date, datetime.datetime)
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
raise AirflowException("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
run_conf = json.loads(conf)
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
return trigger
| apache-2.0 |
Pirata-Repository/Pirata | plugin.video.movie25/resources/libs/documentaries/documentary.py | 2 | 18086 | import urllib,urllib2,re,cookielib,urlresolver,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
def LISTDOC(murl):
if murl=='doc1':
main.GA("Documantary","DhHome")
#main.addDir('[COLOR red]Search[/COLOR]','search',89,'')
main.addDir('[COLOR red]Popular[/COLOR]','http://documentaryheaven.com/popular/',89,'')
main.addDir('[COLOR red]Recent[/COLOR]','http://documentaryheaven.com/all/',87,'')
url='http://documentaryheaven.com/'
link=main.OPENURL(url)
match=re.compile('<li class=".+?"><a href="(.+?)" title=".+?">(.+?)</a> </li>').findall(link)
for url, name in match:
main.addDir(name,'http://documentaryheaven.com'+url,87,'')
elif murl=='doc2':
main.GA("Documantary","TDFHome")
main.addDir('[COLOR red]Recent[/COLOR]','http://topdocumentaryfilms.com/all/',87,'')
main.addDir('[COLOR red]Recommended[/COLOR]','rec',89,'')
url='http://topdocumentaryfilms.com/'
link=main.OPENURL(url)
match=re.compile('href="(.+?)" title=".+?">(.+?)</a>.+?</li>').findall(link)
for url, name in match:
main.addDir(name,url,87,'')
elif murl=='doc3':
main.GA("Documantary","DLHome")
main.addDir('[COLOR red]Latest[/COLOR]','http://www.documentary-log.com/',87,'')
main.addDir("[COLOR red]Editor's Picks[/COLOR]",'http://www.documentary-log.com/category/editors-picks/',87,'')
url='http://www.documentary-log.com/'
link=main.OPENURL(url)
match=re.compile('<li class="cat-item cat-item-.+?"><a href="(.+?)" title="(.+?)">(.+?)</a> ([^<]+)').findall(link)
for url, desc, name, leng in match:
main.addDirc(name+' '+leng,url,87,'',desc,'','','','')
def LISTDOC2(murl):
match=re.compile('documentaryheaven').findall(murl)
if (len(match)>0):
main.GA("DhHome","Dh-List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match= re.compile('href="([^<]+)" rel="bookmark" title=".+?" rel=".+?"><img class=".+?" src="(.+?)" alt="([^<]+)"/></a></div><div class=".+?">(.+?)</div>').findall(link)
if (len(match)==0):
match=re.compile('href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile('<a href="([^<]+)" >Next →</a>').findall(link)
if (len(paginate)>0):
main.addDir('[COLOR blue]Next Page[/COLOR]',paginate[0],87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
match2=re.compile('topdocumentaryfilms').findall(murl)
if (len(match2)>0):
i=0
main.GA("TDFHome","TDF-List")
link=main.OPENURL(murl)
link=link.replace('\n','')
url=re.compile('href="([^<]+)">Watch now').findall(link)
match=re.compile('href=".+?".+?src="(.+?)".+?alt="(.+?)"').findall(link)
desc=re.compile('>([^<]+)</p><p><strong>').findall(link)
for thumb,name in match:
main.addPlayMs(name,url[i],88,thumb,desc[i],'','','','')
i=i+1
paginate=re.compile('</a>.+?href="([^<]+)">Next</a></div>').findall(link)
if (len(paginate)>0):
for purl in paginate:
main.addDir('[COLOR blue]Next[/COLOR]',purl,87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
match3=re.compile('documentary-log').findall(murl)
if (len(match3)>0):
main.GA("DLHome","DL-List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<div class="clear">.+?<a href="(.+?)" title=".+?"> <img src="(.+?)" alt="(.+?)" class=".+?" /> </a> <p>(.+?)<a').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile("<a href='([^<]+)' class='nextpostslink'>").findall(link)
if (len(paginate)>0):
for purl in paginate:
main.addDir('[COLOR blue]Next[/COLOR]',purl,87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
def LISTDOCPOP(murl):
if murl=='search':
keyb = xbmc.Keyboard('', 'Search Documentaries')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
surl='http://documentaryheaven.com/find/?q='+encode
link=main.OPENURL(surl)
match=re.compile('<a href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
if (len(match)==0):
match=re.compile('href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile("<span class=\'page current\'>1</span></li><li><a href=\'http://documentaryheaven.com/page/2/.?s=.+?\'").findall(link)
if (len(paginate)>0):
main.addDir('[COLOR blue]Page 2[/COLOR]','http://documentaryheaven.com/page/2/?s='+encode,9,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
elif murl=='rec':
rurl='http://topdocumentaryfilms.com/'
link=main.OPENURL(rurl)
match=re.compile('href="([^<]+)">([^<]+)</a></li><li><a').findall(link)
for url,name in match:
main.addPlayMs(name,url,88,'','','','','','')
else:
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<a href=\'(.+?)/\'><img src=\'(.+?)\'/></a></div><div class=".+?"><div class=".+?"><a href=\'.+?/\'>(.+?)</a></div><div class=".+?"><p>(.+?)</div>').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
def LINKDOC(mname,murl,thumb):
ok=True
match=re.compile('documentaryheaven').findall(murl)
if (len(match)>0):
main.GA("DocumentaryHeaven","Watched")
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
match=re.compile('<iframe frameborder=".+?" width=".+?" height=".+?" src="http:(.+?)">').findall(link)
if (len(match)==0):
match=re.compile('<iframe width=".+?" height=".+?" src="http:(.+?)" frameborder=".+?" allowfullscreen>').findall(link)
if (len(match)==0):
match=re.compile('<embe.+?src="http:([^<]+)".+?></embed>').findall(link)
for url in match:
url='http:'+url
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match8=re.compile('youtube').findall(url)
if (len(match8)>0):
match2=re.compile('http://www.youtube.com/embed/([^<]+)').findall(url)
url='http://www.youtube.com/watch?v='+match2[0]
if (len(match)==0):
match=re.compile('<iframe src="http:(.+?)" width=".+?" height=".+?" frameborder=".+?".+?</iframe>').findall(link)
for url in match:
url='http:'+url
match4=re.compile('vimeo').findall(url)
match6=re.compile('putlocker').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
elif (len(match6)>0):
url=url
else:
match2=re.compile('http://www.youtube.com/embed/([^<]+)').findall(url)
if (len(match2)==0):
match2=re.compile('http://www.youtube.com/p/([^<]+).?hl=.+?').findall(link)
url='http://www.youtube.com/watch?v='+match2[0]
listitem = xbmcgui.ListItem(mname)
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Doc-Heaven[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
match2=re.compile('topdocumentaryfilms').findall(murl)
if (len(match2)>0):
sources=[]
main.GA("TopDocumentaryFilms","Watched")
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
ok=True
link=link.replace('src="http://cdn.tdfimg.com/wp-content/uploads','')
match=re.compile('src="(.+?)"').findall(link)
for url in match:
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match7=re.compile('google').findall(url)
if (len(match7)>0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,link down,3000)")
return
match6=re.compile('youtube').findall(url)
if (len(match6)>0):
match=re.compile('http://www.youtube.com/embed/n_(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v=n_'+match[0]
else:
match=re.compile('http://www.youtube.com/embed/(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v='+match[0]
match2=re.compile('videoseries').findall(url)
if (len(match2)>0):
link2=main.OPENURL(url)
match2=re.compile('href="/watch.?v=(.+?)"').findall(link2)
match3=re.compile("http://www.youtube.com/embed/videoseries.?list=(.+?)&.+?load_policy=.+?").findall(url)
print match3[0]
try:
url='http://www.youtube.com/watch?v='+match2[0]
except:
namelist=[]
urllist=[]
link=main.OPENURL('https://gdata.youtube.com/feeds/api/playlists/'+match3[0]+'?start-index=1&max-results=50')
match=re.compile("href='https://m.youtube.com/details.?v=(.+?)'/.+?<media\:descriptio[^>]+>([^<]+)</media\:description>.+?<media\:thumbnail url='([^']+)'.+?<media:title type='plain'>(.+?)/media:title>").findall(link)
for url,desc,thumb,name in match:
name=name.replace('<','')
namelist.append(name)
urllist.append(url)
dialog = xbmcgui.Dialog()
answer =dialog.select("Playlist", namelist)
url='http://www.youtube.com/watch?v='+urllist[int(answer)]
else:
url=url.replace('?rel=0','')
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Top-Doc[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
match3=re.compile('documentary-log.com').findall(murl)
if (len(match3)>0):
main.GA("Documentary-Log","Watched")
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
link=link.replace('src="http://cdn.tdfimg.com/wp-content/uploads','')
match=re.compile('src="(.+?)" .+?></iframe>').findall(link)
if (len(match)==0):
link=link.replace('src="http://www.documentary-log.com/wp-cont','')
match=re.compile('src="(.+?)" .+?/>').findall(link)
for url in match:
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match7=re.compile('google').findall(url)
if (len(match7)>0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,link down,3000)")
return
match6=re.compile('youtube').findall(url)
if (len(match6)>0):
match=re.compile('http://www.youtube.com/embed/n_(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v=n_'+match[0]
else:
match=re.compile('http://www.youtube.com/embed/(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v='+match[0]
match2=re.compile('videoseries').findall(url)
if (len(match2)>0):
link2=main.OPENURL(url)
match2=re.compile('href="/watch.?v=(.+?)"').findall(link2)
match3=re.compile("http://www.youtube.com/embed/videoseries.?list=(.+?)&iv_load_policy=3").findall(url)
print match3
url='http://www.youtube.com/watch?v='+match2[0]
else:
url=url.replace('?rel=0','')
print "vlink " +str(url)
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Doc-Log[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except Exception, e:
if stream_url != False:
main.ErrorReport(e)
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
| gpl-2.0 |
Polyconseil/tornado | tornado/test/template_test.py | 63 | 18390 | from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import traceback
from tornado.escape import utf8, native_str, to_unicode
from tornado.template import Template, DictLoader, ParseError, Loader
from tornado.test.util import unittest
from tornado.util import u, ObjectDict, unicode_type
class TemplateTest(unittest.TestCase):
def test_simple(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name="Ben"),
b"Hello Ben!")
def test_bytes(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_expressions(self):
template = Template("2 + 2 = {{ 2 + 2 }}")
self.assertEqual(template.generate(), b"2 + 2 = 4")
def test_comment(self):
template = Template("Hello{# TODO i18n #} {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_include(self):
loader = DictLoader({
"index.html": '{% include "header.html" %}\nbody text',
"header.html": "header text",
})
self.assertEqual(loader.load("index.html").generate(),
b"header text\nbody text")
def test_extends(self):
loader = DictLoader({
"base.html": """\
<title>{% block title %}default title{% end %}</title>
<body>{% block body %}default body{% end %}</body>
""",
"page.html": """\
{% extends "base.html" %}
{% block title %}page title{% end %}
{% block body %}page body{% end %}
""",
})
self.assertEqual(loader.load("page.html").generate(),
b"<title>page title</title>\n<body>page body</body>\n")
def test_relative_load(self):
loader = DictLoader({
"a/1.html": "{% include '2.html' %}",
"a/2.html": "{% include '../b/3.html' %}",
"b/3.html": "ok",
})
self.assertEqual(loader.load("a/1.html").generate(),
b"ok")
def test_escaping(self):
self.assertRaises(ParseError, lambda: Template("{{"))
self.assertRaises(ParseError, lambda: Template("{%"))
self.assertEqual(Template("{{!").generate(), b"{{")
self.assertEqual(Template("{%!").generate(), b"{%")
self.assertEqual(Template("{{ 'expr' }} {{!jquery expr}}").generate(),
b"expr {{jquery expr}}")
def test_unicode_template(self):
template = Template(utf8(u("\u00e9")))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_unicode_literal_expression(self):
# Unicode literals should be usable in templates. Note that this
# test simulates unicode characters appearing directly in the
# template file (with utf8 encoding), i.e. \u escapes would not
# be used in the template file itself.
if str is unicode_type:
# python 3 needs a different version of this test since
# 2to3 doesn't run on template internals
template = Template(utf8(u('{{ "\u00e9" }}')))
else:
template = Template(utf8(u('{{ u"\u00e9" }}')))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_custom_namespace(self):
loader = DictLoader({"test.html": "{{ inc(5) }}"}, namespace={"inc": lambda x: x + 1})
self.assertEqual(loader.load("test.html").generate(), b"6")
def test_apply(self):
def upper(s):
return s.upper()
template = Template(utf8("{% apply upper %}foo{% end %}"))
self.assertEqual(template.generate(upper=upper), b"FOO")
def test_unicode_apply(self):
def upper(s):
return to_unicode(s).upper()
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_bytes_apply(self):
def upper(s):
return utf8(to_unicode(s).upper())
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_if(self):
template = Template(utf8("{% if x > 4 %}yes{% else %}no{% end %}"))
self.assertEqual(template.generate(x=5), b"yes")
self.assertEqual(template.generate(x=3), b"no")
def test_if_empty_body(self):
template = Template(utf8("{% if True %}{% else %}{% end %}"))
self.assertEqual(template.generate(), b"")
def test_try(self):
template = Template(utf8("""{% try %}
try{% set y = 1/x %}
{% except %}-except
{% else %}-else
{% finally %}-finally
{% end %}"""))
self.assertEqual(template.generate(x=1), b"\ntry\n-else\n-finally\n")
self.assertEqual(template.generate(x=0), b"\ntry-except\n-finally\n")
def test_comment_directive(self):
template = Template(utf8("{% comment blah blah %}foo"))
self.assertEqual(template.generate(), b"foo")
def test_break_continue(self):
template = Template(utf8("""\
{% for i in range(10) %}
{% if i == 2 %}
{% continue %}
{% end %}
{{ i }}
{% if i == 6 %}
{% break %}
{% end %}
{% end %}"""))
result = template.generate()
# remove extraneous whitespace
result = b''.join(result.split())
self.assertEqual(result, b"013456")
def test_break_outside_loop(self):
try:
Template(utf8("{% break %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
def test_break_in_apply(self):
# This test verifies current behavior, although of course it would
# be nice if apply didn't cause seemingly unrelated breakage
try:
Template(utf8("{% for i in [] %}{% apply foo %}{% break %}{% end %}{% end %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
@unittest.skipIf(sys.version_info >= division.getMandatoryRelease(),
'no testable future imports')
def test_no_inherit_future(self):
# This file has from __future__ import division...
self.assertEqual(1 / 2, 0.5)
# ...but the template doesn't
template = Template('{{ 1 / 2 }}')
self.assertEqual(template.generate(), '0')
class StackTraceTest(unittest.TestCase):
def test_error_line_number_expression(self):
loader = DictLoader({"test.html": """one
two{{1/0}}
three
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_directive(self):
loader = DictLoader({"test.html": """one
two{%if 1/0%}
three{%end%}
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_module(self):
loader = DictLoader({
"base.html": "{% module Template('sub.html') %}",
"sub.html": "{{1/0}}",
}, namespace={"_tt_modules": ObjectDict({"Template": lambda path, **kwargs: loader.load(path).generate(**kwargs)})})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue('# base.html:1' in exc_stack)
self.assertTrue('# sub.html:1' in exc_stack)
def test_error_line_number_include(self):
loader = DictLoader({
"base.html": "{% include 'sub.html' %}",
"sub.html": "{{1/0}}",
})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:1 (via base.html:1)" in
traceback.format_exc())
def test_error_line_number_extends_base_error(self):
loader = DictLoader({
"base.html": "{{1/0}}",
"sub.html": "{% extends 'base.html' %}",
})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue("# base.html:1" in exc_stack)
def test_error_line_number_extends_sub_error(self):
loader = DictLoader({
"base.html": "{% block 'block' %}{% end %}",
"sub.html": """
{% extends 'base.html' %}
{% block 'block' %}
{{1/0}}
{% end %}
"""})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:4 (via base.html:1)" in
traceback.format_exc())
def test_multi_includes(self):
loader = DictLoader({
"a.html": "{% include 'b.html' %}",
"b.html": "{% include 'c.html' %}",
"c.html": "{{1/0}}",
})
try:
loader.load("a.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# c.html:1 (via b.html:1, a.html:1)" in
traceback.format_exc())
class ParseErrorDetailTest(unittest.TestCase):
def test_details(self):
loader = DictLoader({
"foo.html": "\n\n{{",
})
with self.assertRaises(ParseError) as cm:
loader.load("foo.html")
self.assertEqual("Missing end expression }} at foo.html:3",
str(cm.exception))
self.assertEqual("foo.html", cm.exception.filename)
self.assertEqual(3, cm.exception.lineno)
class AutoEscapeTest(unittest.TestCase):
def setUp(self):
self.templates = {
"escaped.html": "{% autoescape xhtml_escape %}{{ name }}",
"unescaped.html": "{% autoescape None %}{{ name }}",
"default.html": "{{ name }}",
"include.html": """\
escaped: {% include 'escaped.html' %}
unescaped: {% include 'unescaped.html' %}
default: {% include 'default.html' %}
""",
"escaped_block.html": """\
{% autoescape xhtml_escape %}\
{% block name %}base: {{ name }}{% end %}""",
"unescaped_block.html": """\
{% autoescape None %}\
{% block name %}base: {{ name }}{% end %}""",
# Extend a base template with different autoescape policy,
# with and without overriding the base's blocks
"escaped_extends_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}""",
"escaped_overrides_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"unescaped_extends_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}""",
"unescaped_overrides_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"raw_expression.html": """\
{% autoescape xhtml_escape %}\
expr: {{ name }}
raw: {% raw name %}""",
}
def test_default_off(self):
loader = DictLoader(self.templates, autoescape=None)
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_default_on(self):
loader = DictLoader(self.templates, autoescape="xhtml_escape")
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_unextended_block(self):
loader = DictLoader(self.templates)
name = "<script>"
self.assertEqual(loader.load("escaped_block.html").generate(name=name),
b"base: <script>")
self.assertEqual(loader.load("unescaped_block.html").generate(name=name),
b"base: <script>")
def test_extended_block(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name="<script>")
self.assertEqual(render("escaped_extends_unescaped.html"),
b"base: <script>")
self.assertEqual(render("escaped_overrides_unescaped.html"),
b"extended: <script>")
self.assertEqual(render("unescaped_extends_escaped.html"),
b"base: <script>")
self.assertEqual(render("unescaped_overrides_escaped.html"),
b"extended: <script>")
def test_raw_expression(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name='<>&"')
self.assertEqual(render("raw_expression.html"),
b"expr: <>&"\n"
b"raw: <>&\"")
def test_custom_escape(self):
loader = DictLoader({"foo.py":
"{% autoescape py_escape %}s = {{ name }}\n"})
def py_escape(s):
self.assertEqual(type(s), bytes)
return repr(native_str(s))
def render(template, name):
return loader.load(template).generate(py_escape=py_escape,
name=name)
self.assertEqual(render("foo.py", "<html>"),
b"s = '<html>'\n")
self.assertEqual(render("foo.py", "';sys.exit()"),
b"""s = "';sys.exit()"\n""")
self.assertEqual(render("foo.py", ["not a string"]),
b"""s = "['not a string']"\n""")
def test_manual_minimize_whitespace(self):
# Whitespace including newlines is allowed within template tags
# and directives, and this is one way to avoid long lines while
# keeping extra whitespace out of the rendered output.
loader = DictLoader({'foo.txt': """\
{% for i in items
%}{% if i > 0 %}, {% end %}{#
#}{{i
}}{% end
%}""",
})
self.assertEqual(loader.load("foo.txt").generate(items=range(5)),
b"0, 1, 2, 3, 4")
def test_whitespace_by_filename(self):
# Default whitespace handling depends on the template filename.
loader = DictLoader({
"foo.html": " \n\t\n asdf\t ",
"bar.js": " \n\n\n\t qwer ",
"baz.txt": "\t zxcv\n\n",
"include.html": " {% include baz.txt %} \n ",
"include.txt": "\t\t{% include foo.html %} ",
})
# HTML and JS files have whitespace compressed by default.
self.assertEqual(loader.load("foo.html").generate(),
b"\nasdf ")
self.assertEqual(loader.load("bar.js").generate(),
b"\nqwer ")
# TXT files do not.
self.assertEqual(loader.load("baz.txt").generate(),
b"\t zxcv\n\n")
# Each file maintains its own status even when included in
# a file of the other type.
self.assertEqual(loader.load("include.html").generate(),
b" \t zxcv\n\n\n")
self.assertEqual(loader.load("include.txt").generate(),
b"\t\t\nasdf ")
def test_whitespace_by_loader(self):
templates = {
"foo.html": "\t\tfoo\n\n",
"bar.txt": "\t\tbar\n\n",
}
loader = DictLoader(templates, whitespace='all')
self.assertEqual(loader.load("foo.html").generate(), b"\t\tfoo\n\n")
self.assertEqual(loader.load("bar.txt").generate(), b"\t\tbar\n\n")
loader = DictLoader(templates, whitespace='single')
self.assertEqual(loader.load("foo.html").generate(), b" foo\n")
self.assertEqual(loader.load("bar.txt").generate(), b" bar\n")
loader = DictLoader(templates, whitespace='oneline')
self.assertEqual(loader.load("foo.html").generate(), b" foo ")
self.assertEqual(loader.load("bar.txt").generate(), b" bar ")
def test_whitespace_directive(self):
loader = DictLoader({
"foo.html": """\
{% whitespace oneline %}
{% for i in range(3) %}
{{ i }}
{% end %}
{% whitespace all %}
pre\tformatted
"""})
self.assertEqual(loader.load("foo.html").generate(),
b" 0 1 2 \n pre\tformatted\n")
class TemplateLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = Loader(os.path.join(os.path.dirname(__file__), "templates"))
def test_utf8_in_file(self):
tmpl = self.loader.load("utf8.html")
result = tmpl.generate()
self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
| apache-2.0 |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/test/test_startfile.py | 77 | 1201 | # Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
# use this form so that the test is skipped when startfile is not available:
from os import startfile, path
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, startfile, u"nonexisting.vbs")
def test_empty(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
def test_empty_u(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(unicode(empty, "mbcs"))
startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
| gpl-2.0 |
eLBati/odoo | addons/sale_journal/__init__.py | 443 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/telemetry/telemetry/web_components/web_components_project.py | 45 | 1359 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from trace_viewer import trace_viewer_project
def _FindAllFilesRecursive(source_paths, pred):
all_filenames = set()
for source_path in source_paths:
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
if f.startswith('.'):
continue
x = os.path.abspath(os.path.join(dirpath, f))
if pred(x):
all_filenames.add(x)
return all_filenames
class WebComponentsProject(trace_viewer_project.TraceViewerProject):
telemetry_path = os.path.abspath(util.GetTelemetryDir())
def __init__(self, *args, **kwargs):
super(WebComponentsProject, self).__init__(*args, **kwargs)
exclude_paths = [os.path.join(self.telemetry_path, 'docs'),
os.path.join(self.telemetry_path, 'unittest_data'),
os.path.join(self.telemetry_path, 'support')]
excluded_html_files = _FindAllFilesRecursive(
exclude_paths,
lambda x: x.endswith('.html'))
self.non_module_html_files.extend(excluded_html_files)
self.non_module_html_files.appendRel(self.telemetry_path, 'results.html')
self.source_paths.append(self.telemetry_path)
| bsd-3-clause |
songmonit/CTTMSONLINE | addons/account/wizard/account_change_currency.py | 385 | 3751 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_change_currency(osv.osv_memory):
_name = 'account.change.currency'
_description = 'Change Currency'
_columns = {
'currency_id': fields.many2one('res.currency', 'Change to', required=True, help="Select a currency to apply on the invoice"),
}
def view_init(self, cr , uid , fields_list, context=None):
obj_inv = self.pool.get('account.invoice')
if context is None:
context = {}
if context.get('active_id',False):
if obj_inv.browse(cr, uid, context['active_id']).state != 'draft':
raise osv.except_osv(_('Error!'), _('You can only change currency for Draft Invoice.'))
pass
def change_currency(self, cr, uid, ids, context=None):
obj_inv = self.pool.get('account.invoice')
obj_inv_line = self.pool.get('account.invoice.line')
obj_currency = self.pool.get('res.currency')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
new_currency = data.currency_id.id
invoice = obj_inv.browse(cr, uid, context['active_id'], context=context)
if invoice.currency_id.id == new_currency:
return {}
rate = obj_currency.browse(cr, uid, new_currency, context=context).rate
for line in invoice.invoice_line:
new_price = 0
if invoice.company_id.currency_id.id == invoice.currency_id.id:
new_price = line.price_unit * rate
if new_price <= 0:
raise osv.except_osv(_('Error!'), _('New currency is not configured properly.'))
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id == new_currency:
old_rate = invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = line.price_unit / old_rate
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id != new_currency:
old_rate = invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = (line.price_unit / old_rate ) * rate
obj_inv_line.write(cr, uid, [line.id], {'price_unit': new_price})
obj_inv.write(cr, uid, [invoice.id], {'currency_id': new_currency}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.