patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -34,7 +34,13 @@ type payoutInfo struct {
// in Ethereum address format
// required: true
// example: 0x000000000000000000000000000000000000000a
- EthAddress string `json:"ethAddress"`
+ EthAddress string `json:"ethAddress"`
+}
+
+// swagger:model ReferralInfoDTO
+type referralInfo struct {
+ // required: true
+ // example: ABC123
ReferralCode string `json:"referral_code"`
}
| 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package endpoints
import (
"encoding/json"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market/mysterium"
"github.com/mysteriumnetwork/node/tequilapi/utils"
"github.com/mysteriumnetwork/node/tequilapi/validation"
)
// swagger:model PayoutInfoDTO
type payoutInfo struct {
// in Ethereum address format
// required: true
// example: 0x000000000000000000000000000000000000000a
EthAddress string `json:"ethAddress"`
ReferralCode string `json:"referral_code"`
}
type payoutInfoResponse struct {
EthAddress string `json:"eth_address"`
ReferralCode string `json:"referral_code"`
}
// PayoutInfoRegistry allows to register payout info
type PayoutInfoRegistry interface {
GetPayoutInfo(id identity.Identity, signer identity.Signer) (*mysterium.PayoutInfoResponse, error)
UpdatePayoutInfo(id identity.Identity, ethAddress string, referralCode string, signer identity.Signer) error
}
type payoutEndpoint struct {
idm identity.Manager
signerFactory identity.SignerFactory
payoutInfoRegistry PayoutInfoRegistry
}
// NewPayoutEndpoint creates payout api endpoint
func NewPayoutEndpoint(idm identity.Manager, signerFactory identity.SignerFactory, payoutInfoRegistry PayoutInfoRegistry) *payoutEndpoint {
return &payoutEndpoint{idm, signerFactory, payoutInfoRegistry}
}
func (endpoint *payoutEndpoint) GetPayoutInfo(resp http.ResponseWriter, request *http.Request, params httprouter.Params) {
id := identity.FromAddress(params.ByName("id"))
payoutInfo, err := endpoint.payoutInfoRegistry.GetPayoutInfo(id, endpoint.signerFactory(id))
if err != nil {
utils.SendError(resp, err, http.StatusNotFound)
return
}
response := &payoutInfoResponse{
EthAddress: payoutInfo.EthAddress,
ReferralCode: payoutInfo.ReferralCode,
}
utils.WriteAsJSON(response, resp)
}
// swagger:operation PUT /identities/{id}/payout Identity updatePayoutInfo
// ---
// summary: Registers payout info
// description: Registers payout address for identity
// parameters:
// - name: id
// in: path
// description: Identity stored in keystore
// type: string
// required: true
// - in: body
// name: body
// description: Parameter in body (ethAddress) is required
// schema:
// $ref: "#/definitions/PayoutInfoDTO"
// responses:
// 200:
// description: Payout info registered
// 400:
// description: Bad request
// schema:
// "$ref": "#/definitions/ErrorMessageDTO"
// 422:
// description: Parameters validation error
// schema:
// "$ref": "#/definitions/ValidationErrorDTO"
// 500:
// description: Internal server error
// schema:
// "$ref": "#/definitions/ErrorMessageDTO"
func (endpoint *payoutEndpoint) UpdatePayoutInfo(resp http.ResponseWriter, request *http.Request, params httprouter.Params) {
id := identity.FromAddress(params.ByName("id"))
payoutInfoReq, err := toPayoutInfoRequest(request)
if err != nil {
utils.SendError(resp, err, http.StatusBadRequest)
return
}
errorMap := validatePayoutInfoRequest(payoutInfoReq)
if errorMap.HasErrors() {
utils.SendValidationErrorMessage(resp, errorMap)
return
}
err = endpoint.payoutInfoRegistry.UpdatePayoutInfo(
id,
payoutInfoReq.EthAddress,
payoutInfoReq.ReferralCode,
endpoint.signerFactory(id),
)
if err != nil {
utils.SendError(resp, err, http.StatusInternalServerError)
return
}
resp.WriteHeader(http.StatusOK)
}
func toPayoutInfoRequest(req *http.Request) (*payoutInfo, error) {
var payoutReq = &payoutInfo{}
err := json.NewDecoder(req.Body).Decode(&payoutReq)
return payoutReq, err
}
func validatePayoutInfoRequest(req *payoutInfo) (errors *validation.FieldErrorMap) {
errors = validation.NewErrorMap()
if req.EthAddress == "" {
errors.ForField("ethAddress").AddError("required", "Field is required")
}
// TODO: implement validation of eth address
return
}
// AddRoutesForPayout creates payout endpoint on tequilapi service
func AddRoutesForPayout(
router *httprouter.Router,
idm identity.Manager,
signerFactory identity.SignerFactory,
payoutInfoRegistry PayoutInfoRegistry,
) {
idmEnd := NewPayoutEndpoint(idm, signerFactory, payoutInfoRegistry)
router.GET("/identities/:id/payout", idmEnd.GetPayoutInfo)
router.PUT("/identities/:id/payout", idmEnd.UpdatePayoutInfo)
}
| 1 | 14,793 | We are using a different style for JSON tags. `ethAddress` - camalCase `referral_code` - snake_case | mysteriumnetwork-node | go |
@@ -10,6 +10,7 @@ namespace Datadog.Trace.ClrProfiler.Integrations
public static class HttpContextIntegration
{
private const string IntegrationName = "HttpContext";
+ private const string DefaultHttpContextTypeName = "HttpContext";
private static readonly ILog Log = LogProvider.GetLogger(typeof(HttpContextIntegration));
/// <summary> | 1 | using System;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// Tracer integration ambient base for web server integrations.
/// </summary>
public static class HttpContextIntegration
{
private const string IntegrationName = "HttpContext";
private static readonly ILog Log = LogProvider.GetLogger(typeof(HttpContextIntegration));
/// <summary>
/// Entry method for invoking the beginning of every web server request pipeline
/// </summary>
/// <param name="httpContext">Instance being instrumented.</param>
/// <param name="features">Initialize features.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
// [InterceptMethod(
// TargetAssembly = "Microsoft.AspNetCore.Http.Abstractions",
// TargetType = "Microsoft.AspNetCore.Http.DefaultHttpContext",
// TargetSignatureTypes = new[] { ClrNames.Void, ClrNames.Ignore })]
// ***************************************************************
// DISABLED UNTIL WE FIX SCOPING ISSUES AT HTTP CONTEXT LEVEL
// ***************************************************************
public static void Initialize(object httpContext, object features, int opCode, int mdToken, long moduleVersionPtr)
{
var httpContextType = httpContext.GetType();
string methodDef = $"{httpContextType.FullName}.Initialize(IFeatureCollection features)";
Action<object, object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Action<object, object>>
.Start(moduleVersionPtr, mdToken, opCode, nameof(Initialize))
.WithConcreteType(httpContextType)
.WithParameters(features)
.Build();
}
catch (Exception ex)
{
Log.ErrorException($"Error retrieving {methodDef}", ex);
throw;
}
try
{
instrumentedMethod.Invoke(httpContext, features);
}
catch (Exception ex)
{
Log.ErrorException($"Error calling {methodDef}", ex);
throw;
}
if (Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
AspNetAmbientContext.Initialize(httpContext);
}
}
}
}
| 1 | 15,611 | Should this be `"Microsoft.AspNetCore.Http.DefaultHttpContext"`? | DataDog-dd-trace-dotnet | .cs |
@@ -68,7 +68,6 @@ interface ArrayType<T> {
/** Repeatedly group an array into equal sized sub-trees */
default Object grouped(Object array, int groupSize) {
final int arrayLength = lengthOf(array);
- assert arrayLength > groupSize;
final Object results = obj().newInstance(1 + ((arrayLength - 1) / groupSize));
obj().setAt(results, 0, copyRange(array, 0, groupSize));
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*\
G E N E R A T O R C R A F T E D
\*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-*/
import java.io.Serializable;
import java.util.Collection;
/**
* Helper to replace reflective array access.
*
* @author Pap Lőrinc
* @since 2.1.0
*/
interface ArrayType<T> {
@SuppressWarnings("unchecked")
static <T> ArrayType<T> obj() { return (ArrayType<T>) ObjectArrayType.INSTANCE; }
Class<T> type();
int lengthOf(Object array);
T getAt(Object array, int index);
Object empty();
void setAt(Object array, int index, T value) throws ClassCastException;
Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size);
@SuppressWarnings("unchecked")
static <T> ArrayType<T> of(Object array) { return of((Class<T>) array.getClass().getComponentType()); }
static <T> ArrayType<T> of(Class<T> type) { return !type.isPrimitive() ? obj() : ofPrimitive(type); }
@SuppressWarnings("unchecked")
static <T> ArrayType<T> ofPrimitive(Class<T> type) {
if (boolean.class == type) {
return (ArrayType<T>) BooleanArrayType.INSTANCE;
} else if (byte.class == type) {
return (ArrayType<T>) ByteArrayType.INSTANCE;
} else if (char.class == type) {
return (ArrayType<T>) CharArrayType.INSTANCE;
} else if (double.class == type) {
return (ArrayType<T>) DoubleArrayType.INSTANCE;
} else if (float.class == type) {
return (ArrayType<T>) FloatArrayType.INSTANCE;
} else if (int.class == type) {
return (ArrayType<T>) IntArrayType.INSTANCE;
} else if (long.class == type) {
return (ArrayType<T>) LongArrayType.INSTANCE;
} else if (short.class == type) {
return (ArrayType<T>) ShortArrayType.INSTANCE;
} else {
throw new IllegalArgumentException(String.valueOf(type));
}
}
default Object newInstance(int length) { return copy(empty(), length); }
/** System.arrayCopy with same source and destination */
default Object copyRange(Object array, int from, int to) {
final int length = to - from;
return copy(array, length, from, 0, length);
}
/** Repeatedly group an array into equal sized sub-trees */
default Object grouped(Object array, int groupSize) {
final int arrayLength = lengthOf(array);
assert arrayLength > groupSize;
final Object results = obj().newInstance(1 + ((arrayLength - 1) / groupSize));
obj().setAt(results, 0, copyRange(array, 0, groupSize));
for (int start = groupSize, i = 1; start < arrayLength; i++) {
final int nextLength = Math.min(groupSize, arrayLength - (i * groupSize));
obj().setAt(results, i, copyRange(array, start, start + nextLength));
start += nextLength;
}
return results;
}
/** clone the source and set the value at the given position */
default Object copyUpdate(Object array, int index, T element) {
final Object copy = copy(array, index + 1);
setAt(copy, index, element);
return copy;
}
default Object copy(Object array, int minLength) {
final int arrayLength = lengthOf(array);
final int length = Math.max(arrayLength, minLength);
return copy(array, length, 0, 0, arrayLength);
}
/** clone the source and keep everything after the index (pre-padding the values with null) */
default Object copyDrop(Object array, int index) {
final int length = lengthOf(array);
return copy(array, length, index, index, length - index);
}
/** clone the source and keep everything before and including the index */
default Object copyTake(Object array, int lastIndex) {
return copyRange(array, 0, lastIndex + 1);
}
/** Create a single element array */
default Object asArray(T element) {
final Object result = newInstance(1);
setAt(result, 0, element);
return result;
}
/** Store the content of an iterable in an array */
static Object[] asArray(java.util.Iterator<?> it, int length) {
final Object[] array = new Object[length];
for (int i = 0; i < length; i++) {
array[i] = it.next();
}
return array;
}
@SuppressWarnings("unchecked")
static <T> Object[] asArray(Iterable<? extends T> iterable) {
if (iterable instanceof Collection<?>) {
final Collection<? extends T> collection = (Collection<? extends T>) iterable;
return collection.toArray();
} else {
return Collections.withSize(iterable).toArray();
}
}
@SuppressWarnings("unchecked")
static <T> T asPrimitives(Class<?> primitiveClass, Iterable<?> values) {
final Object[] array = Array.ofAll(values).toJavaArray();
assert (array.length == 0) || !primitiveClass.isArray();
final ArrayType<T> type = of((Class<T>) primitiveClass);
final Object results = type.newInstance(array.length);
for (int i = 0; i < array.length; i++) {
type.setAt(results, i, (T) array[i]);
}
return (T) results;
}
final class BooleanArrayType implements ArrayType<Boolean>, Serializable {
private static final long serialVersionUID = 1L;
static final BooleanArrayType INSTANCE = new BooleanArrayType();
static final boolean[] EMPTY = new boolean[0];
private static boolean[] cast(Object array) { return (boolean[]) array; }
@Override
public Class<Boolean> type() { return boolean.class; }
@Override
public boolean[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Boolean getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Boolean value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new boolean[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final boolean[] result = new boolean[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class ByteArrayType implements ArrayType<Byte>, Serializable {
private static final long serialVersionUID = 1L;
static final ByteArrayType INSTANCE = new ByteArrayType();
static final byte[] EMPTY = new byte[0];
private static byte[] cast(Object array) { return (byte[]) array; }
@Override
public Class<Byte> type() { return byte.class; }
@Override
public byte[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Byte getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Byte value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new byte[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final byte[] result = new byte[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class CharArrayType implements ArrayType<Character>, Serializable {
private static final long serialVersionUID = 1L;
static final CharArrayType INSTANCE = new CharArrayType();
static final char[] EMPTY = new char[0];
private static char[] cast(Object array) { return (char[]) array; }
@Override
public Class<Character> type() { return char.class; }
@Override
public char[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Character getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Character value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new char[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final char[] result = new char[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class DoubleArrayType implements ArrayType<Double>, Serializable {
private static final long serialVersionUID = 1L;
static final DoubleArrayType INSTANCE = new DoubleArrayType();
static final double[] EMPTY = new double[0];
private static double[] cast(Object array) { return (double[]) array; }
@Override
public Class<Double> type() { return double.class; }
@Override
public double[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Double getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Double value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new double[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final double[] result = new double[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class FloatArrayType implements ArrayType<Float>, Serializable {
private static final long serialVersionUID = 1L;
static final FloatArrayType INSTANCE = new FloatArrayType();
static final float[] EMPTY = new float[0];
private static float[] cast(Object array) { return (float[]) array; }
@Override
public Class<Float> type() { return float.class; }
@Override
public float[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Float getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Float value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new float[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final float[] result = new float[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class IntArrayType implements ArrayType<Integer>, Serializable {
private static final long serialVersionUID = 1L;
static final IntArrayType INSTANCE = new IntArrayType();
static final int[] EMPTY = new int[0];
private static int[] cast(Object array) { return (int[]) array; }
@Override
public Class<Integer> type() { return int.class; }
@Override
public int[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Integer getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Integer value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new int[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final int[] result = new int[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class LongArrayType implements ArrayType<Long>, Serializable {
private static final long serialVersionUID = 1L;
static final LongArrayType INSTANCE = new LongArrayType();
static final long[] EMPTY = new long[0];
private static long[] cast(Object array) { return (long[]) array; }
@Override
public Class<Long> type() { return long.class; }
@Override
public long[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Long getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Long value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new long[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final long[] result = new long[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class ShortArrayType implements ArrayType<Short>, Serializable {
private static final long serialVersionUID = 1L;
static final ShortArrayType INSTANCE = new ShortArrayType();
static final short[] EMPTY = new short[0];
private static short[] cast(Object array) { return (short[]) array; }
@Override
public Class<Short> type() { return short.class; }
@Override
public short[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Short getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Short value) throws ClassCastException {
if (value != null) {
cast(array)[index] = value;
} else {
throw new ClassCastException();
}
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new short[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final short[] result = new short[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
final class ObjectArrayType implements ArrayType<Object>, Serializable {
private static final long serialVersionUID = 1L;
static final ObjectArrayType INSTANCE = new ObjectArrayType();
static final Object[] EMPTY = new Object[0];
private static Object[] cast(Object array) { return (Object[]) array; }
@Override
public Class<Object> type() { return Object.class; }
@Override
public Object[] empty() { return EMPTY; }
@Override
public int lengthOf(Object array) { return (array != null) ? cast(array).length : 0; }
@Override
public Object getAt(Object array, int index) { return cast(array)[index]; }
@Override
public void setAt(Object array, int index, Object value) {
cast(array)[index] = value;
}
@Override
public Object copy(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
return (size > 0)
? copyNonEmpty(array, arraySize, sourceFrom, destinationFrom, size)
: new Object[arraySize];
}
private static Object copyNonEmpty(Object array, int arraySize, int sourceFrom, int destinationFrom, int size) {
final Object[] result = new Object[arraySize];
System.arraycopy(array, sourceFrom, result, destinationFrom, size); /* has to be near the object allocation to avoid zeroing out the array */
return result;
}
}
} | 1 | 11,456 | removed asserts from `Vector` as it's stable enough and it may hinder inlining, even if turned off :/ | vavr-io-vavr | java |
@@ -47,8 +47,14 @@ class MessageDefinitionStore:
self._messages_definitions[message.msgid] = message
self._msgs_by_category[message.msgid[0]].append(message.msgid)
+ # We disable the message here because MessageDefinitionStore is only
+ # initialized once and due to the size of the class does not run the
+ # risk of creating a large memory leak.
+ # See discussion in: https://github.com/PyCQA/pylint/pull/5673
@functools.lru_cache()
- def get_message_definitions(self, msgid_or_symbol: str) -> List[MessageDefinition]:
+ def get_message_definitions( # pylint: disable=lru-cache-decorating-method
+ self, msgid_or_symbol: str
+ ) -> List[MessageDefinition]:
"""Returns the Message definition for either a numeric or symbolic id.
The cache has no limit as its size will likely stay minimal. For each message we store | 1 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import collections
import functools
from typing import TYPE_CHECKING, Dict, List, Tuple, ValuesView
from pylint.exceptions import UnknownMessageError
from pylint.message.message_definition import MessageDefinition
from pylint.message.message_id_store import MessageIdStore
if TYPE_CHECKING:
from pylint.checkers import BaseChecker
class MessageDefinitionStore:
"""The messages store knows information about every possible message definition but has
no particular state during analysis.
"""
def __init__(self) -> None:
self.message_id_store: MessageIdStore = MessageIdStore()
# Primary registry for all active messages definitions.
# It contains the 1:1 mapping from msgid to MessageDefinition.
# Keys are msgid, values are MessageDefinition
self._messages_definitions: Dict[str, MessageDefinition] = {}
# MessageDefinition kept by category
self._msgs_by_category: Dict[str, List[str]] = collections.defaultdict(list)
@property
def messages(self) -> ValuesView[MessageDefinition]:
"""The list of all active messages."""
return self._messages_definitions.values()
def register_messages_from_checker(self, checker: "BaseChecker") -> None:
"""Register all messages definitions from a checker."""
checker.check_consistency()
for message in checker.messages:
self.register_message(message)
def register_message(self, message: MessageDefinition) -> None:
"""Register a MessageDefinition with consistency in mind."""
self.message_id_store.register_message_definition(
message.msgid, message.symbol, message.old_names
)
self._messages_definitions[message.msgid] = message
self._msgs_by_category[message.msgid[0]].append(message.msgid)
@functools.lru_cache()
def get_message_definitions(self, msgid_or_symbol: str) -> List[MessageDefinition]:
"""Returns the Message definition for either a numeric or symbolic id.
The cache has no limit as its size will likely stay minimal. For each message we store
about 1000 characters, so even if we would have 1000 messages the cache would only
take up ~= 1 Mb.
"""
return [
self._messages_definitions[m]
for m in self.message_id_store.get_active_msgids(msgid_or_symbol)
]
def get_msg_display_string(self, msgid_or_symbol: str) -> str:
"""Generates a user-consumable representation of a message."""
message_definitions = self.get_message_definitions(msgid_or_symbol)
if len(message_definitions) == 1:
return repr(message_definitions[0].symbol)
return repr([md.symbol for md in message_definitions])
def help_message(self, msgids_or_symbols: List[str]) -> None:
"""Display help messages for the given message identifiers"""
for msgids_or_symbol in msgids_or_symbols:
try:
for message_definition in self.get_message_definitions(
msgids_or_symbol
):
print(message_definition.format_help(checkerref=True))
print("")
except UnknownMessageError as ex:
print(ex)
print("")
continue
def list_messages(self) -> None:
"""Output full messages list documentation in ReST format."""
emittable, non_emittable = self.find_emittable_messages()
print("Emittable messages with current interpreter:")
for msg in emittable:
print(msg.format_help(checkerref=False))
print("\nNon-emittable messages with current interpreter:")
for msg in non_emittable:
print(msg.format_help(checkerref=False))
print("")
def find_emittable_messages(
self,
) -> Tuple[List[MessageDefinition], List[MessageDefinition]]:
"""Finds all emittable and non-emittable messages"""
messages = sorted(self._messages_definitions.values(), key=lambda m: m.msgid)
emittable = []
non_emittable = []
for message in messages:
if message.may_be_emitted():
emittable.append(message)
else:
non_emittable.append(message)
return emittable, non_emittable
| 1 | 20,320 | Do we want to raise on the function or on the decorator? | PyCQA-pylint | py |
@@ -38,7 +38,7 @@ class Listen(object):
user_name=j.get('user_name', ""),
timestamp=datetime.utcfromtimestamp(float(j['listened_at'])),
artist_msid=j['track_metadata']['additional_info'].get('artist_msid'),
- album_msid=j['track_metadata']['additional_info'].get('album_msid'),
+ album_msid=j['track_metadata']['additional_info'].get('release_msid'),
recording_msid=j.get('recording_msid'),
data=j.get('track_metadata')
) | 1 | # coding=utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
import ujson
from datetime import datetime
import calendar
class Listen(object):
""" Represents a listen object """
def __init__(self, user_id=None, user_name=None, timestamp=None, artist_msid=None, album_msid=None,
recording_msid=None, data=None):
self.user_id = user_id
self.user_name = user_name
# determine the type of timestamp and do the right thing
if isinstance(timestamp, int) or isinstance(timestamp, float):
self.ts_since_epoch = int(timestamp)
self.timestamp = datetime.utcfromtimestamp(self.ts_since_epoch)
else:
if timestamp:
self.timestamp = timestamp
self.ts_since_epoch = calendar.timegm(self.timestamp.utctimetuple())
else:
self.timestamp = 0
self.ts_since_epoch = 0
self.artist_msid = artist_msid
self.album_msid = album_msid
self.recording_msid = recording_msid
if data is None:
self.data = {'additional_info': {}}
else:
self.data = data
@classmethod
def from_json(cls, j):
"""Factory to make Listen() objects from a dict"""
return cls(user_id=j['user_id'],
user_name=j.get('user_name', ""),
timestamp=datetime.utcfromtimestamp(float(j['listened_at'])),
artist_msid=j['track_metadata']['additional_info'].get('artist_msid'),
album_msid=j['track_metadata']['additional_info'].get('album_msid'),
recording_msid=j.get('recording_msid'),
data=j.get('track_metadata')
)
def to_json(self):
return {
'user_id': self.user_id,
'user_name': self.user_name,
'timestamp': self.timestamp,
'track_metadata': self.data,
'recording_msid': self.recording_msid
}
def validate(self):
return (self.user_id is not None and self.timestamp is not None and self.artist_msid is not None
and self.recording_msid is not None and self.data is not None)
@property
def date(self):
return self.timestamp
def __repr__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
return u"<Listen: user_name: %s, time: %s, artist_msid: %s, album_msid: %s, recording_msid: %s, artist_name: %s, track_name: %s>" % \
(self.user_name, self.ts_since_epoch, self.artist_msid, self.album_msid, self.recording_msid, self.data['artist_name'], self.data['track_name'])
| 1 | 14,140 | How complex is to to replace `album_*` with `release_*` everywhere in the `Listen` class and places that use it? | metabrainz-listenbrainz-server | py |
@@ -96,13 +96,13 @@ func (n *Namespaces) Remove(t NamespaceType) bool {
return true
}
-func (n *Namespaces) Add(t NamespaceType, path string) {
+func (n *Namespaces) Add(t NamespaceType, path string) bool {
i := n.index(t)
if i == -1 {
*n = append(*n, Namespace{Type: t, Path: path})
- return
+ return true
}
- (*n)[i].Path = path
+ return false
}
func (n *Namespaces) index(t NamespaceType) int { | 1 | // +build linux freebsd
package configs
import (
"fmt"
"os"
"sync"
)
const (
NEWNET NamespaceType = "NEWNET"
NEWPID NamespaceType = "NEWPID"
NEWNS NamespaceType = "NEWNS"
NEWUTS NamespaceType = "NEWUTS"
NEWIPC NamespaceType = "NEWIPC"
NEWUSER NamespaceType = "NEWUSER"
)
var (
nsLock sync.Mutex
supportedNamespaces = make(map[NamespaceType]bool)
)
// NsName converts the namespace type to its filename
func NsName(ns NamespaceType) string {
switch ns {
case NEWNET:
return "net"
case NEWNS:
return "mnt"
case NEWPID:
return "pid"
case NEWIPC:
return "ipc"
case NEWUSER:
return "user"
case NEWUTS:
return "uts"
}
return ""
}
// IsNamespaceSupported returns whether a namespace is available or
// not
func IsNamespaceSupported(ns NamespaceType) bool {
nsLock.Lock()
defer nsLock.Unlock()
supported, ok := supportedNamespaces[ns]
if ok {
return supported
}
nsFile := NsName(ns)
// if the namespace type is unknown, just return false
if nsFile == "" {
return false
}
_, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile))
// a namespace is supported if it exists and we have permissions to read it
supported = err == nil
supportedNamespaces[ns] = supported
return supported
}
func NamespaceTypes() []NamespaceType {
return []NamespaceType{
NEWNET,
NEWPID,
NEWNS,
NEWUTS,
NEWIPC,
NEWUSER,
}
}
// Namespace defines configuration for each namespace. It specifies an
// alternate path that is able to be joined via setns.
type Namespace struct {
Type NamespaceType `json:"type"`
Path string `json:"path"`
}
func (n *Namespace) GetPath(pid int) string {
if n.Path != "" {
return n.Path
}
return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type))
}
func (n *Namespaces) Remove(t NamespaceType) bool {
i := n.index(t)
if i == -1 {
return false
}
*n = append((*n)[:i], (*n)[i+1:]...)
return true
}
func (n *Namespaces) Add(t NamespaceType, path string) {
i := n.index(t)
if i == -1 {
*n = append(*n, Namespace{Type: t, Path: path})
return
}
(*n)[i].Path = path
}
func (n *Namespaces) index(t NamespaceType) int {
for i, ns := range *n {
if ns.Type == t {
return i
}
}
return -1
}
func (n *Namespaces) Contains(t NamespaceType) bool {
return n.index(t) != -1
}
func (n *Namespaces) PathOf(t NamespaceType) string {
i := n.index(t)
if i == -1 {
return ""
}
return (*n)[i].Path
}
| 1 | 13,369 | You haven't changed any of the callers of `.Add` to check the return value, so we're now ignoring duplicates. Please fix that. | opencontainers-runc | go |
@@ -10,6 +10,16 @@
* @return {Boolean} the element's hidden status
*/
dom.isHiddenWithCSS = function isHiddenWithCSS(el, descendentVisibilityValue) {
+ const vNode = axe.utils.getNodeFromTree(el);
+
+ if (vNode._isHiddenWithCSS === void 0) {
+ vNode._isHiddenWithCSS = _isHiddenWithCSS(el, descendentVisibilityValue);
+ }
+
+ return vNode._isHiddenWithCSS;
+};
+
+function _isHiddenWithCSS(el, descendentVisibilityValue) {
if (el.nodeType === 9) {
// 9 === Node.DOCUMENT
return false; | 1 | /* global dom */
/**
* Determine whether an element is hidden based on css
* @method isHiddenWithCSS
* @memberof axe.commons.dom
* @instance
* @param {HTMLElement} el The HTML Element
* @param {Boolean} descendentVisibilityValue (Optional) immediate descendant visibility value used for recursive computation
* @return {Boolean} the element's hidden status
*/
dom.isHiddenWithCSS = function isHiddenWithCSS(el, descendentVisibilityValue) {
if (el.nodeType === 9) {
// 9 === Node.DOCUMENT
return false;
}
if (el.nodeType === 11) {
// 11 === Node.DOCUMENT_FRAGMENT_NODE
el = el.host; // swap to host node
}
if (['STYLE', 'SCRIPT'].includes(el.nodeName.toUpperCase())) {
return false;
}
const style = window.getComputedStyle(el, null);
if (!style) {
throw new Error('Style does not exist for the given element.');
}
const displayValue = style.getPropertyValue('display');
if (displayValue === 'none') {
return true;
}
const HIDDEN_VISIBILITY_VALUES = ['hidden', 'collapse'];
const visibilityValue = style.getPropertyValue('visibility');
if (
HIDDEN_VISIBILITY_VALUES.includes(visibilityValue) &&
!descendentVisibilityValue
) {
return true;
}
if (
HIDDEN_VISIBILITY_VALUES.includes(visibilityValue) &&
(descendentVisibilityValue &&
HIDDEN_VISIBILITY_VALUES.includes(descendentVisibilityValue))
) {
return true;
}
const parent = dom.getComposedParent(el);
if (parent && !HIDDEN_VISIBILITY_VALUES.includes(visibilityValue)) {
return dom.isHiddenWithCSS(parent, visibilityValue);
}
return false;
};
| 1 | 15,054 | @straker should this not push to `vnode._cache.isHiddenWithCSS`? | dequelabs-axe-core | js |
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package driver provides the interface for providers of runtimevar. This serves as a contract
-// of how the runtimevar API uses a provider implementation.
+// Package driver defines an interface that the runtimevar package uses to
+// interact with the underlying runtime configuration services.
package driver // import "gocloud.dev/runtimevar/driver"
import ( | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package driver provides the interface for providers of runtimevar. This serves as a contract
// of how the runtimevar API uses a provider implementation.
package driver // import "gocloud.dev/runtimevar/driver"
import (
"context"
"time"
"gocloud.dev/gcerrors"
)
// DefaultWaitDuration is the default value for WaitDuration.
const DefaultWaitDuration = 30 * time.Second
// WaitDuration returns DefaultWaitDuration if d is <= 0, otherwise it returns d.
func WaitDuration(d time.Duration) time.Duration {
if d <= 0 {
return DefaultWaitDuration
}
return d
}
// State represents the current state of a variable.
type State interface {
// Value returns the current variable value.
Value() (interface{}, error)
// UpdateTime returns the update time for the variable.
UpdateTime() time.Time
// As converts i to provider-specific types.
// See https://gocloud.dev/concepts/as/ for background information.
As(interface{}) bool
}
// Watcher watches for updates on a variable and returns an updated Variable object if
// there are changes. A Watcher object is associated with a variable upon construction.
//
// An application can have more than one Watcher, one for each variable. It is typical
// to only have one Watcher per variable.
//
// Many Watcher providers store their configuration data as raw bytes; such
// providers should include a runtimevar.Decoder in their constructor to allow
// users to decode the raw bytes into a particular format (e.g., parsing a
// JSON string).
//
// Providers that don't have raw bytes may dictate the type of the exposed
// Snapshot.Value, or expose custom decoding logic.
type Watcher interface {
// WatchVariable returns the current State of the variable.
// If the State has not changed, it returns nil.
//
// If WatchVariable returns a wait time > 0, the portable type uses
// it as a hint to not call WatchVariable again for the wait time.
//
// Implementations *may* block, but must return if ctx is Done. If the
// variable has changed, then implementations *must* eventually return
// it.
//
// A polling implementation should return (State, <poll interval>) for
// a new State, or (nil, <poll interval>) if State hasn't changed.
//
// An implementation that receives notifications from an external source
// about changes to the underlying variable should:
// 1. If prev != nil, subscribe to change notifications.
// 2. Fetch the current State.
// 3. If prev == nil or if the State has changed, return (State, 0).
// A non-zero wait should be returned if State holds an error, to avoid
// spinning.
// 4. Block until it detects a change or ctx is Done, then fetch and return
// (State, 0).
// Note that the subscription in 1 must occur before 2 to avoid race conditions.
WatchVariable(ctx context.Context, prev State) (state State, wait time.Duration)
// Close cleans up any resources used by the Watcher object.
Close() error
// ErrorAs allows providers to expose provider-specific types for returned
// errors; see State.As for more details.
ErrorAs(error, interface{}) bool
// ErrorCode should return a code that describes the error, which was returned by
// one of the other methods in this interface.
ErrorCode(error) gcerrors.ErrorCode
}
| 1 | 19,314 | "an interface" in conceptual sense vs "interfaces" or "set of interfaces", referring to the Go interfaces - database/sql uses the latter, should we? | google-go-cloud | go |
@@ -335,8 +335,11 @@ public class DownloadService extends Service {
&& String.valueOf(HttpURLConnection.HTTP_GONE).equals(status.getReasonDetailed());
boolean httpBadReq = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& String.valueOf(HttpURLConnection.HTTP_BAD_REQUEST).equals(status.getReasonDetailed());
+ boolean ioError = status.getReason() == DownloadError.ERROR_IO_ERROR;
+ boolean connectError = status.getReason() == DownloadError.ERROR_CONNECTION_ERROR;
- if (httpNotFound || forbidden || notEnoughSpace || wrongFileType || httpGone || httpBadReq ) {
+ if (httpNotFound || forbidden || notEnoughSpace || wrongFileType
+ || httpGone || httpBadReq || ioError || connectError) {
try {
DBWriter.saveFeedItemAutoDownloadFailed(item).get();
} catch (ExecutionException | InterruptedException e) { | 1 | package de.danoeh.antennapod.core.service.download;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.Service;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.os.Binder;
import android.os.Handler;
import android.os.IBinder;
import android.os.Looper;
import android.text.TextUtils;
import android.util.Log;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.VisibleForTesting;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.sync.SyncService;
import org.apache.commons.io.FileUtils;
import org.greenrobot.eventbus.EventBus;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import de.danoeh.antennapod.core.event.DownloadEvent;
import de.danoeh.antennapod.core.event.FeedItemEvent;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.handler.FailedDownloadHandler;
import de.danoeh.antennapod.core.service.download.handler.FeedSyncTask;
import de.danoeh.antennapod.core.service.download.handler.MediaDownloadedHandler;
import de.danoeh.antennapod.core.service.download.handler.PostDownloaderTask;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.util.DownloadError;
/**
* Manages the download of feedfiles in the app. Downloads can be enqueued via the startService intent.
* The argument of the intent is an instance of DownloadRequest in the EXTRA_REQUESTS field of
* the intent.
* After the downloads have finished, the downloaded object will be passed on to a specific handler, depending on the
* type of the feedfile.
*/
public class DownloadService extends Service {
private static final String TAG = "DownloadService";
/**
* Cancels one download. The intent MUST have an EXTRA_DOWNLOAD_URL extra that contains the download URL of the
* object whose download should be cancelled.
*/
public static final String ACTION_CANCEL_DOWNLOAD = "action.de.danoeh.antennapod.core.service.cancelDownload";
/**
* Cancels all running downloads.
*/
public static final String ACTION_CANCEL_ALL_DOWNLOADS = "action.de.danoeh.antennapod.core.service.cancelAllDownloads";
/**
* Extra for ACTION_CANCEL_DOWNLOAD
*/
public static final String EXTRA_DOWNLOAD_URL = "downloadUrl";
/**
* Extra for ACTION_ENQUEUE_DOWNLOAD intent.
*/
public static final String EXTRA_REQUESTS = "downloadRequests";
public static final String EXTRA_CLEANUP_MEDIA = "cleanupMedia";
/**
* Contains all completed downloads that have not been included in the report yet.
*/
private final List<DownloadStatus> reportQueue;
private final ExecutorService syncExecutor;
private final CompletionService<Downloader> downloadExecutor;
private final DownloadRequester requester;
private DownloadServiceNotification notificationManager;
/**
* Currently running downloads.
*/
private final List<Downloader> downloads;
/**
* Number of running downloads.
*/
private AtomicInteger numberOfDownloads;
/**
* True if service is running.
*/
public static boolean isRunning = false;
private Handler handler;
private NotificationUpdater notificationUpdater;
private ScheduledFuture<?> notificationUpdaterFuture;
private ScheduledFuture<?> downloadPostFuture;
private static final int SCHED_EX_POOL_SIZE = 1;
private ScheduledThreadPoolExecutor schedExecutor;
private static DownloaderFactory downloaderFactory = new DefaultDownloaderFactory();
private final IBinder mBinder = new LocalBinder();
private class LocalBinder extends Binder {
public DownloadService getService() {
return DownloadService.this;
}
}
public DownloadService() {
reportQueue = Collections.synchronizedList(new ArrayList<>());
downloads = Collections.synchronizedList(new ArrayList<>());
numberOfDownloads = new AtomicInteger(0);
requester = DownloadRequester.getInstance();
syncExecutor = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r, "SyncThread");
t.setPriority(Thread.MIN_PRIORITY);
return t;
});
Log.d(TAG, "parallel downloads: " + UserPreferences.getParallelDownloads());
downloadExecutor = new ExecutorCompletionService<>(
Executors.newFixedThreadPool(UserPreferences.getParallelDownloads(),
r -> {
Thread t = new Thread(r, "DownloadThread");
t.setPriority(Thread.MIN_PRIORITY);
return t;
}
)
);
schedExecutor = new ScheduledThreadPoolExecutor(SCHED_EX_POOL_SIZE,
r -> {
Thread t = new Thread(r, "DownloadSchedExecutorThread");
t.setPriority(Thread.MIN_PRIORITY);
return t;
}, (r, executor) -> Log.w(TAG, "SchedEx rejected submission of new task")
);
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
if (intent != null && intent.getParcelableArrayListExtra(EXTRA_REQUESTS) != null) {
Notification notification = notificationManager.updateNotifications(
requester.getNumberOfDownloads(), downloads);
startForeground(R.id.notification_downloading, notification);
syncExecutor.execute(() -> onDownloadQueued(intent));
} else if (numberOfDownloads.get() == 0) {
stopForeground(true);
stopSelf();
} else {
Log.d(TAG, "onStartCommand: Unknown intent");
}
return Service.START_NOT_STICKY;
}
@Override
public void onCreate() {
Log.d(TAG, "Service started");
isRunning = true;
handler = new Handler(Looper.getMainLooper());
notificationManager = new DownloadServiceNotification(this);
IntentFilter cancelDownloadReceiverFilter = new IntentFilter();
cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_ALL_DOWNLOADS);
cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_DOWNLOAD);
registerReceiver(cancelDownloadReceiver, cancelDownloadReceiverFilter);
downloadCompletionThread.start();
Notification notification = notificationManager.updateNotifications(
requester.getNumberOfDownloads(), downloads);
startForeground(R.id.notification_downloading, notification);
}
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
@Override
public void onDestroy() {
Log.d(TAG, "Service shutting down");
isRunning = false;
boolean showAutoDownloadReport = UserPreferences.showAutoDownloadReport();
if (UserPreferences.showDownloadReport() || showAutoDownloadReport) {
notificationManager.updateReport(reportQueue, showAutoDownloadReport);
reportQueue.clear();
}
EventBus.getDefault().postSticky(DownloadEvent.refresh(Collections.emptyList()));
downloadCompletionThread.interrupt();
try {
downloadCompletionThread.join(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
cancelNotificationUpdater();
syncExecutor.shutdown();
schedExecutor.shutdown();
if (downloadPostFuture != null) {
downloadPostFuture.cancel(true);
}
unregisterReceiver(cancelDownloadReceiver);
stopForeground(true);
NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
nm.cancel(R.id.notification_downloading);
// if this was the initial gpodder sync, i.e. we just synced the feeds successfully,
// it is now time to sync the episode actions
SyncService.sync(this);
// start auto download in case anything new has shown up
DBTasks.autodownloadUndownloadedItems(getApplicationContext());
}
private final Thread downloadCompletionThread = new Thread("DownloadCompletionThread") {
private static final String TAG = "downloadCompletionThd";
@Override
public void run() {
Log.d(TAG, "downloadCompletionThread was started");
while (!isInterrupted()) {
try {
Downloader downloader = downloadExecutor.take().get();
Log.d(TAG, "Received 'Download Complete' - message.");
if (downloader.getResult().isSuccessful()) {
syncExecutor.execute(() -> {
handleSuccessfulDownload(downloader);
removeDownload(downloader);
numberOfDownloads.decrementAndGet();
queryDownloadsAsync();
});
} else {
handleFailedDownload(downloader);
removeDownload(downloader);
numberOfDownloads.decrementAndGet();
queryDownloadsAsync();
}
} catch (InterruptedException e) {
Log.e(TAG, "DownloadCompletionThread was interrupted");
return;
} catch (ExecutionException e) {
Log.e(TAG, "ExecutionException in DownloadCompletionThread: " + e.getMessage());
return;
}
}
Log.d(TAG, "End of downloadCompletionThread");
}
};
private void handleSuccessfulDownload(Downloader downloader) {
DownloadRequest request = downloader.getDownloadRequest();
DownloadStatus status = downloader.getResult();
final int type = status.getFeedfileType();
if (type == Feed.FEEDFILETYPE_FEED) {
Log.d(TAG, "Handling completed Feed Download");
FeedSyncTask task = new FeedSyncTask(DownloadService.this, request);
boolean success = task.run();
if (success) {
// we create a 'successful' download log if the feed's last refresh failed
List<DownloadStatus> log = DBReader.getFeedDownloadLog(request.getFeedfileId());
if (log.size() > 0 && !log.get(0).isSuccessful()) {
saveDownloadStatus(task.getDownloadStatus());
}
} else {
DBWriter.setFeedLastUpdateFailed(request.getFeedfileId(), true);
saveDownloadStatus(task.getDownloadStatus());
}
} else if (type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
Log.d(TAG, "Handling completed FeedMedia Download");
MediaDownloadedHandler handler = new MediaDownloadedHandler(DownloadService.this, status, request);
handler.run();
saveDownloadStatus(handler.getUpdatedStatus());
}
}
private void handleFailedDownload(Downloader downloader) {
DownloadStatus status = downloader.getResult();
final int type = status.getFeedfileType();
if (!status.isCancelled()) {
if (status.getReason() == DownloadError.ERROR_UNAUTHORIZED) {
notificationManager.postAuthenticationNotification(downloader.getDownloadRequest());
} else if (status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& Integer.parseInt(status.getReasonDetailed()) == 416) {
Log.d(TAG, "Requested invalid range, restarting download from the beginning");
FileUtils.deleteQuietly(new File(downloader.getDownloadRequest().getDestination()));
DownloadRequester.getInstance().download(DownloadService.this, downloader.getDownloadRequest());
} else {
Log.e(TAG, "Download failed");
saveDownloadStatus(status);
syncExecutor.execute(new FailedDownloadHandler(downloader.getDownloadRequest()));
if (type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
FeedItem item = getFeedItemFromId(status.getFeedfileId());
if (item == null) {
return;
}
boolean httpNotFound = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& String.valueOf(HttpURLConnection.HTTP_NOT_FOUND).equals(status.getReasonDetailed());
boolean forbidden = status.getReason() == DownloadError.ERROR_FORBIDDEN
&& String.valueOf(HttpURLConnection.HTTP_FORBIDDEN).equals(status.getReasonDetailed());
boolean notEnoughSpace = status.getReason() == DownloadError.ERROR_NOT_ENOUGH_SPACE;
boolean wrongFileType = status.getReason() == DownloadError.ERROR_FILE_TYPE;
boolean httpGone = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& String.valueOf(HttpURLConnection.HTTP_GONE).equals(status.getReasonDetailed());
boolean httpBadReq = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& String.valueOf(HttpURLConnection.HTTP_BAD_REQUEST).equals(status.getReasonDetailed());
if (httpNotFound || forbidden || notEnoughSpace || wrongFileType || httpGone || httpBadReq ) {
try {
DBWriter.saveFeedItemAutoDownloadFailed(item).get();
} catch (ExecutionException | InterruptedException e) {
Log.d(TAG, "Ignoring exception while setting item download status");
e.printStackTrace();
}
}
// to make lists reload the failed item, we fake an item update
EventBus.getDefault().post(FeedItemEvent.updated(item));
}
}
} else {
// if FeedMedia download has been canceled, fake FeedItem update
// so that lists reload that it
if (status.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
FeedItem item = getFeedItemFromId(status.getFeedfileId());
if (item == null) {
return;
}
EventBus.getDefault().post(FeedItemEvent.updated(item));
}
}
}
private Downloader getDownloader(String downloadUrl) {
for (Downloader downloader : downloads) {
if (downloader.getDownloadRequest().getSource().equals(downloadUrl)) {
return downloader;
}
}
return null;
}
private final BroadcastReceiver cancelDownloadReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_DOWNLOAD)) {
String url = intent.getStringExtra(EXTRA_DOWNLOAD_URL);
if (url == null) {
throw new IllegalArgumentException("ACTION_CANCEL_DOWNLOAD intent needs download url extra");
}
Log.d(TAG, "Cancelling download with url " + url);
Downloader d = getDownloader(url);
if (d != null) {
d.cancel();
DownloadRequest request = d.getDownloadRequest();
DownloadRequester.getInstance().removeDownload(request);
FeedItem item = getFeedItemFromId(request.getFeedfileId());
if (item != null) {
// undo enqueue upon cancel
if (request.isMediaEnqueued()) {
Log.v(TAG, "Undoing enqueue upon cancelling download");
try {
DBWriter.removeQueueItem(getApplicationContext(), false, item).get();
} catch (Throwable t) {
Log.e(TAG, "Unexpected exception during undoing enqueue upon cancel", t);
}
}
EventBus.getDefault().post(FeedItemEvent.updated(item));
}
} else {
Log.e(TAG, "Could not cancel download with url " + url);
}
postDownloaders();
} else if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_ALL_DOWNLOADS)) {
for (Downloader d : downloads) {
d.cancel();
Log.d(TAG, "Cancelled all downloads");
}
postDownloaders();
}
queryDownloads();
}
};
private void onDownloadQueued(Intent intent) {
List<DownloadRequest> requests = intent.getParcelableArrayListExtra(EXTRA_REQUESTS);
if (requests == null) {
throw new IllegalArgumentException(
"ACTION_ENQUEUE_DOWNLOAD intent needs request extra");
}
boolean cleanupMedia = intent.getBooleanExtra(EXTRA_CLEANUP_MEDIA, false);
Log.d(TAG, "Received enqueue request. #requests=" + requests.size()
+ ", cleanupMedia=" + cleanupMedia);
if (cleanupMedia) {
UserPreferences.getEpisodeCleanupAlgorithm()
.makeRoomForEpisodes(getApplicationContext(), requests.size());
}
// #2448: First, add to-download items to the queue before actual download
// so that the resulting queue order is the same as when download is clicked
List<? extends FeedItem> itemsEnqueued;
try {
itemsEnqueued = enqueueFeedItems(requests);
} catch (Exception e) {
Log.e(TAG, "Unexpected exception during enqueue before downloads. Abort download", e);
return;
}
for (DownloadRequest request : requests) {
onDownloadQueued(request, itemsEnqueued);
}
}
private List<? extends FeedItem> enqueueFeedItems(@NonNull List<? extends DownloadRequest> requests)
throws Exception {
List<FeedItem> feedItems = new ArrayList<>();
for (DownloadRequest request : requests) {
if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
long mediaId = request.getFeedfileId();
FeedMedia media = DBReader.getFeedMedia(mediaId);
if (media == null) {
Log.w(TAG, "enqueueFeedItems() : FeedFile Id " + mediaId + " is not found. ignore it.");
continue;
}
feedItems.add(media.getItem());
}
}
return DBTasks.enqueueFeedItemsToDownload(getApplicationContext(), feedItems);
}
private void onDownloadQueued(@NonNull DownloadRequest request,
@NonNull List<? extends FeedItem> itemsEnqueued) {
writeFileUrl(request);
Downloader downloader = downloaderFactory.create(request);
if (downloader != null) {
numberOfDownloads.incrementAndGet();
if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA
&& isEnqueued(request, itemsEnqueued)) {
request.setMediaEnqueued(true);
}
handler.post(() -> {
downloads.add(downloader);
downloadExecutor.submit(downloader);
postDownloaders();
});
}
handler.post(this::queryDownloads);
}
private static boolean isEnqueued(@NonNull DownloadRequest request,
@NonNull List<? extends FeedItem> itemsEnqueued) {
if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
final long mediaId = request.getFeedfileId();
for (FeedItem item : itemsEnqueued) {
if (item.getMedia() != null && item.getMedia().getId() == mediaId) {
return true;
}
}
}
return false;
}
@VisibleForTesting
public static DownloaderFactory getDownloaderFactory() {
return downloaderFactory;
}
// public scope rather than package private,
// because androidTest put classes in the non-standard de.test.antennapod hierarchy
@VisibleForTesting
public static void setDownloaderFactory(DownloaderFactory downloaderFactory) {
DownloadService.downloaderFactory = downloaderFactory;
}
/**
* Remove download from the DownloadRequester list and from the
* DownloadService list.
*/
private void removeDownload(final Downloader d) {
handler.post(() -> {
Log.d(TAG, "Removing downloader: " + d.getDownloadRequest().getSource());
boolean rc = downloads.remove(d);
Log.d(TAG, "Result of downloads.remove: " + rc);
DownloadRequester.getInstance().removeDownload(d.getDownloadRequest());
postDownloaders();
});
}
/**
* Adds a new DownloadStatus object to the list of completed downloads and
* saves it in the database
*
* @param status the download that is going to be saved
*/
private void saveDownloadStatus(DownloadStatus status) {
reportQueue.add(status);
DBWriter.addDownloadStatus(status);
}
/**
* Calls query downloads on the services main thread. This method should be used instead of queryDownloads if it is
* used from a thread other than the main thread.
*/
private void queryDownloadsAsync() {
handler.post(DownloadService.this::queryDownloads);
}
/**
* Check if there's something else to download, otherwise stop.
*/
private void queryDownloads() {
Log.d(TAG, numberOfDownloads.get() + " downloads left");
if (numberOfDownloads.get() <= 0 && DownloadRequester.getInstance().hasNoDownloads()) {
Log.d(TAG, "Number of downloads is " + numberOfDownloads.get() + ", attempting shutdown");
stopForeground(true);
stopSelf();
if (notificationUpdater != null) {
notificationUpdater.run();
} else {
Log.d(TAG, "Skipping notification update");
}
} else {
setupNotificationUpdater();
Notification notification = notificationManager.updateNotifications(
requester.getNumberOfDownloads(), downloads);
startForeground(R.id.notification_downloading, notification);
}
}
@Nullable
private FeedItem getFeedItemFromId(long id) {
FeedMedia media = DBReader.getFeedMedia(id);
if (media != null) {
return media.getItem();
} else {
return null;
}
}
/**
* Creates the destination file and writes FeedMedia File_url directly after starting download
* to make it possible to resume download after the service was killed by the system.
*/
private void writeFileUrl(DownloadRequest request) {
if (request.getFeedfileType() != FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
return;
}
File dest = new File(request.getDestination());
if (!dest.exists()) {
try {
dest.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Unable to create file");
}
}
if (dest.exists()) {
Log.d(TAG, "Writing file url");
FeedMedia media = DBReader.getFeedMedia(request.getFeedfileId());
if (media == null) {
Log.d(TAG, "No media");
return;
}
media.setFile_url(request.getDestination());
try {
DBWriter.setFeedMedia(media).get();
} catch (InterruptedException e) {
Log.e(TAG, "writeFileUrl was interrupted");
} catch (ExecutionException e) {
Log.e(TAG, "ExecutionException in writeFileUrl: " + e.getMessage());
}
}
}
/**
* Schedules the notification updater task if it hasn't been scheduled yet.
*/
private void setupNotificationUpdater() {
if (notificationUpdater == null) {
Log.d(TAG, "Setting up notification updater");
notificationUpdater = new NotificationUpdater();
notificationUpdaterFuture = schedExecutor.scheduleAtFixedRate(notificationUpdater, 1, 1, TimeUnit.SECONDS);
}
}
private void cancelNotificationUpdater() {
boolean result = false;
if (notificationUpdaterFuture != null) {
result = notificationUpdaterFuture.cancel(true);
}
notificationUpdater = null;
notificationUpdaterFuture = null;
Log.d(TAG, "NotificationUpdater cancelled. Result: " + result);
}
private class NotificationUpdater implements Runnable {
public void run() {
Notification n = notificationManager.updateNotifications(requester.getNumberOfDownloads(), downloads);
if (n != null) {
NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(R.id.notification_downloading, n);
Log.d(TAG, "Download progress notification was posted");
}
}
}
private void postDownloaders() {
new PostDownloaderTask(downloads).run();
if (downloadPostFuture == null) {
downloadPostFuture = schedExecutor.scheduleAtFixedRate(
new PostDownloaderTask(downloads), 1, 1, TimeUnit.SECONDS);
}
}
}
| 1 | 18,126 | This now checks nearly all errors that can occur. How about explicitly listing cases where we want it to retry? I think there are not many cases where we want that, as the many added conditions in the last years show :) | AntennaPod-AntennaPod | java |
@@ -247,7 +247,8 @@ func (brq *blockRetrievalQueue) PutInCaches(ctx context.Context,
// checkCaches copies a block into `block` if it's in one of our caches.
func (brq *blockRetrievalQueue) checkCaches(ctx context.Context,
- kmd KeyMetadata, ptr BlockPointer, block Block) (PrefetchStatus, error) {
+ kmd KeyMetadata, ptr BlockPointer, block Block, doSync bool) (
+ PrefetchStatus, error) {
// Attempt to retrieve the block from the cache. This might be a specific
// type where the request blocks are CommonBlocks, but that direction can
// Set correctly. The cache will never have CommonBlocks. | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"container/heap"
"io"
"reflect"
"sync"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const (
defaultBlockRetrievalWorkerQueueSize int = 100
defaultPrefetchWorkerQueueSize int = 2
minimalBlockRetrievalWorkerQueueSize int = 2
minimalPrefetchWorkerQueueSize int = 1
testBlockRetrievalWorkerQueueSize int = 5
testPrefetchWorkerQueueSize int = 1
defaultOnDemandRequestPriority int = 1 << 30
// Channel buffer size can be big because we use the empty struct.
workerQueueSize int = 1<<31 - 1
)
type blockRetrievalPartialConfig interface {
dataVersioner
logMaker
blockCacher
diskBlockCacheGetter
syncedTlfGetterSetter
initModeGetter
}
type blockRetrievalConfig interface {
blockRetrievalPartialConfig
blockGetter() blockGetter
}
type realBlockRetrievalConfig struct {
blockRetrievalPartialConfig
bg blockGetter
}
func (c *realBlockRetrievalConfig) blockGetter() blockGetter {
return c.bg
}
// blockRetrievalRequest represents one consumer's request for a block.
type blockRetrievalRequest struct {
block Block
doneCh chan error
}
// blockRetrieval contains the metadata for a given block retrieval. May
// represent many requests, all of which will be handled at once.
type blockRetrieval struct {
//// Retrieval Metadata
// the block pointer to retrieve
blockPtr BlockPointer
// the key metadata for the request
kmd KeyMetadata
// the context encapsulating all request contexts
ctx *CoalescingContext
// cancel function for the context
cancelFunc context.CancelFunc
// protects requests, cacheLifetime, the prefetch channels, and action
reqMtx sync.RWMutex
// the individual requests for this block pointer: they must be notified
// once the block is returned
requests []*blockRetrievalRequest
// the cache lifetime for the retrieval
cacheLifetime BlockCacheLifetime
// the follow-on action to take once the block is fetched
action BlockRequestAction
//// Queueing Metadata
// the index of the retrieval in the heap
index int
// the priority of the retrieval: larger priorities are processed first
priority int
// state of global request counter when this retrieval was created;
// maintains FIFO
insertionOrder uint64
}
// blockPtrLookup is used to uniquely identify block retrieval requests. The
// reflect.Type is needed because sometimes a request is placed concurrently
// for a specific block type and a generic block type. The requests will both
// cause a retrieval, but branching on type allows us to avoid special casing
// the code.
type blockPtrLookup struct {
bp BlockPointer
t reflect.Type
}
// blockRetrievalQueue manages block retrieval requests. Higher priority
// requests are executed first. Requests are executed in FIFO order within a
// given priority level.
type blockRetrievalQueue struct {
config blockRetrievalConfig
log logger.Logger
// protects ptrs, insertionCount, and the heap
mtx sync.RWMutex
// queued or in progress retrievals
ptrs map[blockPtrLookup]*blockRetrieval
// global counter of insertions to queue
// capacity: ~584 years at 1 billion requests/sec
insertionCount uint64
heap *blockRetrievalHeap
// These are notification channels to maximize the time that each request
// is in the heap, allowing preemption as long as possible. This way, a
// request only exits the heap once a worker is ready.
workerCh chan<- struct{}
prefetchWorkerCh chan<- struct{}
// slices to store the workers so we can terminate them when we're done
workers []*blockRetrievalWorker
// channel to be closed when we're done accepting requests
doneCh chan struct{}
// protects prefetcher
prefetchMtx sync.RWMutex
// prefetcher for handling prefetching scenarios
prefetcher Prefetcher
}
var _ BlockRetriever = (*blockRetrievalQueue)(nil)
// newBlockRetrievalQueue creates a new block retrieval queue. The numWorkers
// parameter determines how many workers can concurrently call Work (more than
// numWorkers will block).
func newBlockRetrievalQueue(numWorkers int, numPrefetchWorkers int,
config blockRetrievalConfig) *blockRetrievalQueue {
workerCh := make(chan struct{}, workerQueueSize)
prefetchWorkerCh := make(chan struct{}, workerQueueSize)
q := &blockRetrievalQueue{
config: config,
log: config.MakeLogger(""),
ptrs: make(map[blockPtrLookup]*blockRetrieval),
heap: &blockRetrievalHeap{},
workerCh: workerCh,
prefetchWorkerCh: prefetchWorkerCh,
doneCh: make(chan struct{}),
workers: make([]*blockRetrievalWorker, 0,
numWorkers+numPrefetchWorkers),
}
q.prefetcher = newBlockPrefetcher(q, config, nil)
for i := 0; i < numWorkers; i++ {
q.workers = append(q.workers, newBlockRetrievalWorker(
config.blockGetter(), q, workerCh))
}
for i := 0; i < numPrefetchWorkers; i++ {
q.workers = append(q.workers, newBlockRetrievalWorker(
config.blockGetter(), q, prefetchWorkerCh))
}
return q
}
func (brq *blockRetrievalQueue) popIfNotEmpty() *blockRetrieval {
brq.mtx.Lock()
defer brq.mtx.Unlock()
if brq.heap.Len() > 0 {
return heap.Pop(brq.heap).(*blockRetrieval)
}
return nil
}
func (brq *blockRetrievalQueue) shutdownRetrieval() {
retrieval := brq.popIfNotEmpty()
if retrieval != nil {
brq.FinalizeRequest(retrieval, nil, io.EOF)
}
}
// notifyWorker notifies workers that there is a new request for processing.
func (brq *blockRetrievalQueue) notifyWorker(priority int) {
// On-demand workers and prefetch workers share the priority queue. This
// allows maximum time for requests to jump the queue, at least until the
// worker actually begins working on it.
//
// Note that the worker being notified won't necessarily work on the exact
// request that caused the notification. It's just a counter. That means
// that sometimes on-demand workers will work on prefetch requests, and
// vice versa. But the numbers should match.
//
// However, there are some pathological scenarios where if all the workers
// of one type are making progress but the other type are not (which is
// highly improbable), requests of one type could starve the other. By
// design, on-demand requests _should_ starve prefetch requests, so this is
// a problem only if prefetch requests can starve on-demand workers. But
// because there are far more on-demand workers than prefetch workers, this
// should never actually happen.
workerCh := brq.workerCh
if priority < defaultOnDemandRequestPriority {
workerCh = brq.prefetchWorkerCh
}
select {
case <-brq.doneCh:
brq.shutdownRetrieval()
// Notify the next queued worker.
case workerCh <- struct{}{}:
default:
panic("notifyWorker() would have blocked, which means we somehow " +
"have around MaxInt32 requests already waiting.")
}
}
// PutInCaches implements the BlockRetriever interface for
// BlockRetrievalQueue.
func (brq *blockRetrievalQueue) PutInCaches(ctx context.Context,
ptr BlockPointer, tlfID tlf.ID, block Block, lifetime BlockCacheLifetime,
prefetchStatus PrefetchStatus) (err error) {
err = brq.config.BlockCache().PutWithPrefetch(ptr, tlfID, block, lifetime,
prefetchStatus)
switch err.(type) {
case nil:
case cachePutCacheFullError:
// Ignore cache full errors and send to the disk cache anyway.
default:
return err
}
dbc := brq.config.DiskBlockCache()
if dbc == nil {
return nil
}
err = dbc.UpdateMetadata(ctx, ptr.ID, prefetchStatus)
switch err.(type) {
case nil:
case NoSuchBlockError:
// TODO: Add the block to the DBC. This is complicated because we
// need the serverHalf.
brq.log.CDebugf(ctx, "Block %s missing for disk block "+
"cache metadata update", ptr.ID)
default:
brq.log.CDebugf(ctx, "Error updating metadata: %+v", err)
}
// All disk cache errors are fatal
return err
}
// checkCaches copies a block into `block` if it's in one of our caches.
func (brq *blockRetrievalQueue) checkCaches(ctx context.Context,
kmd KeyMetadata, ptr BlockPointer, block Block) (PrefetchStatus, error) {
// Attempt to retrieve the block from the cache. This might be a specific
// type where the request blocks are CommonBlocks, but that direction can
// Set correctly. The cache will never have CommonBlocks.
cachedBlock, prefetchStatus, _, err :=
brq.config.BlockCache().GetWithPrefetch(ptr)
if err == nil && cachedBlock != nil {
block.Set(cachedBlock)
return prefetchStatus, nil
}
// Check the disk cache.
dbc := brq.config.DiskBlockCache()
if dbc == nil {
return NoPrefetch, NoSuchBlockError{ptr.ID}
}
blockBuf, serverHalf, prefetchStatus, err := dbc.Get(ctx, kmd.TlfID(),
ptr.ID)
if err != nil {
return NoPrefetch, err
}
if len(blockBuf) == 0 {
return NoPrefetch, NoSuchBlockError{ptr.ID}
}
// Assemble the block from the encrypted block buffer.
err = brq.config.blockGetter().assembleBlock(ctx, kmd, ptr, block, blockBuf,
serverHalf)
if err == nil {
// Cache the block in memory.
brq.config.BlockCache().PutWithPrefetch(ptr, kmd.TlfID(), block,
TransientEntry, prefetchStatus)
}
return prefetchStatus, err
}
// request retrieves blocks asynchronously.
func (brq *blockRetrievalQueue) request(ctx context.Context,
priority int, kmd KeyMetadata, ptr BlockPointer, block Block,
lifetime BlockCacheLifetime, action BlockRequestAction) <-chan error {
brq.log.CDebugf(ctx, "Request of %v, action=%s", ptr, action)
// Only continue if we haven't been shut down
ch := make(chan error, 1)
select {
case <-brq.doneCh:
ch <- io.EOF
if action.PrefetchTracked() {
brq.Prefetcher().CancelPrefetch(ptr.ID)
}
return ch
default:
}
if block == nil {
ch <- errors.New("nil block passed to blockRetrievalQueue.Request")
if action.PrefetchTracked() {
brq.Prefetcher().CancelPrefetch(ptr.ID)
}
return ch
}
// Check caches before locking the mutex.
prefetchStatus, err := brq.checkCaches(ctx, kmd, ptr, block)
if err == nil {
brq.log.CDebugf(ctx, "Found %v in caches: %s", ptr, prefetchStatus)
if action.PrefetchTracked() {
brq.Prefetcher().ProcessBlockForPrefetch(ctx, ptr, block, kmd,
priority, lifetime, prefetchStatus, action)
}
ch <- nil
return ch
}
err = checkDataVersion(brq.config, path{}, ptr)
if err != nil {
if action.PrefetchTracked() {
brq.Prefetcher().CancelPrefetch(ptr.ID)
}
ch <- err
return ch
}
bpLookup := blockPtrLookup{ptr, reflect.TypeOf(block)}
brq.mtx.Lock()
defer brq.mtx.Unlock()
// We might have to retry if the context has been canceled. This loop will
// iterate a maximum of 2 times. It either hits the `break` statement at
// the bottom on the first iteration, or the `continue` statement first
// which causes it to `break` on the next iteration.
var br *blockRetrieval
for {
exists := false
br, exists = brq.ptrs[bpLookup]
if !exists {
// Add to the heap
br = &blockRetrieval{
blockPtr: ptr,
kmd: kmd,
index: -1,
priority: priority,
insertionOrder: brq.insertionCount,
cacheLifetime: lifetime,
action: action,
}
br.ctx, br.cancelFunc = NewCoalescingContext(ctx)
brq.insertionCount++
brq.ptrs[bpLookup] = br
heap.Push(brq.heap, br)
brq.notifyWorker(priority)
} else {
err := br.ctx.AddContext(ctx)
if err == context.Canceled {
// We need to delete the request pointer, but we'll still let
// the existing request be processed by a worker.
delete(brq.ptrs, bpLookup)
continue
}
}
break
}
brq.log.CDebugf(ctx, "Scheduling request of %v", ptr)
br.reqMtx.Lock()
defer br.reqMtx.Unlock()
br.requests = append(br.requests, &blockRetrievalRequest{
block: block,
doneCh: ch,
})
if lifetime > br.cacheLifetime {
br.cacheLifetime = lifetime
}
oldPriority := br.priority
if priority > oldPriority {
br.priority = priority
// If the new request priority is higher, elevate the retrieval in the
// queue. Skip this if the request is no longer in the queue (which
// means it's actively being processed).
if br.index != -1 {
heap.Fix(brq.heap, br.index)
if oldPriority < defaultOnDemandRequestPriority &&
priority >= defaultOnDemandRequestPriority {
// We've crossed the priority threshold for prefetch workers,
// so we now need an on-demand worker to pick up the request.
// This means that we might have up to two workers "activated"
// per request. However, they won't leak because if a worker
// sees an empty queue, it continues merrily along.
brq.notifyWorker(priority)
}
}
}
// Update the action if needed.
brq.log.CDebugf(ctx, "Combining actions %d and %d", action, br.action)
br.action = action.Combine(br.action)
brq.log.CDebugf(ctx, "Got action %d", br.action)
return ch
}
// Request implements the BlockRetriever interface for blockRetrievalQueue.
func (brq *blockRetrievalQueue) Request(ctx context.Context,
priority int, kmd KeyMetadata, ptr BlockPointer, block Block,
lifetime BlockCacheLifetime, action BlockRequestAction) <-chan error {
if brq.config.IsSyncedTlf(kmd.TlfID()) {
action = action.AddSync()
}
return brq.request(ctx, priority, kmd, ptr, block, lifetime, action)
}
// FinalizeRequest is the last step of a retrieval request once a block has
// been obtained. It removes the request from the blockRetrievalQueue,
// preventing more requests from mutating the retrieval, then notifies all
// subscribed requests.
func (brq *blockRetrievalQueue) FinalizeRequest(
retrieval *blockRetrieval, block Block, err error) {
brq.mtx.Lock()
// This might have already been removed if the context has been canceled.
// That's okay, because this will then be a no-op.
bpLookup := blockPtrLookup{retrieval.blockPtr, reflect.TypeOf(block)}
delete(brq.ptrs, bpLookup)
brq.mtx.Unlock()
defer retrieval.cancelFunc()
// This is a lock that exists for the race detector, since there
// shouldn't be any other goroutines accessing the retrieval at this
// point. In `Request`, the requests slice can be modified while locked
// by `brq.mtx`. But once we delete `bpLookup` from `brq.ptrs` here
// (while locked by `brq.mtx`), there is no longer a way for anyone else
// to write `retrieval.requests`. However, the race detector still
// notices that we're reading `retrieval.requests` without a lock, where
// it was written by a different goroutine in `Request`. So, we lock it
// with its own mutex in both places.
retrieval.reqMtx.RLock()
defer retrieval.reqMtx.RUnlock()
// Cache the block and trigger prefetches if there is no error.
if retrieval.action.PrefetchTracked() {
if err == nil {
// We treat this request as not having been prefetched, because the
// only way to get here is if the request wasn't already cached.
// Need to call with context.Background() because the retrieval's
// context will be canceled as soon as this method returns.
brq.Prefetcher().ProcessBlockForPrefetch(context.Background(),
retrieval.blockPtr, block, retrieval.kmd, retrieval.priority,
retrieval.cacheLifetime, NoPrefetch, retrieval.action)
} else {
brq.Prefetcher().CancelPrefetch(retrieval.blockPtr.ID)
}
}
for _, r := range retrieval.requests {
req := r
if block != nil {
// Copy the decrypted block to the caller
req.block.Set(block)
}
// Since we created this channel with a buffer size of 1, this won't
// block.
req.doneCh <- err
}
// Clearing references to the requested blocks seems to plug a
// leak, but not sure why yet.
// TODO: strib fixed this earlier. Should be safe to remove here, but
// follow up in PR.
retrieval.requests = nil
}
// Shutdown is called when we are no longer accepting requests.
func (brq *blockRetrievalQueue) Shutdown() {
select {
case <-brq.doneCh:
default:
// We close `doneCh` first so that new requests coming in get
// finalized immediately rather than racing with dying workers.
close(brq.doneCh)
for _, w := range brq.workers {
w.Shutdown()
}
brq.prefetchMtx.Lock()
defer brq.prefetchMtx.Unlock()
brq.prefetcher.Shutdown()
}
}
// TogglePrefetcher allows upstream components to turn the prefetcher on or
// off. If an error is returned due to a context cancelation, the prefetcher is
// never re-enabled.
func (brq *blockRetrievalQueue) TogglePrefetcher(enable bool,
testSyncCh <-chan struct{}) <-chan struct{} {
// We must hold this lock for the whole function so that multiple calls to
// this function doesn't leak prefetchers.
brq.prefetchMtx.Lock()
defer brq.prefetchMtx.Unlock()
// Allow the caller to block on the current shutdown.
ch := brq.prefetcher.Shutdown()
if enable {
brq.prefetcher = newBlockPrefetcher(brq, brq.config, testSyncCh)
}
return ch
}
// Prefetcher allows us to retrieve the prefetcher.
func (brq *blockRetrievalQueue) Prefetcher() Prefetcher {
brq.prefetchMtx.RLock()
defer brq.prefetchMtx.RUnlock()
return brq.prefetcher
}
| 1 | 20,702 | Can you make this accept an action instead? | keybase-kbfs | go |
@@ -33,3 +33,16 @@ When /^I should see an image with name "([^"]*)"$/ do |image_name|
page.should have_selector("img", src: /#{image_name}/)
end
+When /^a product named "([^"]*)"$/ do |product_name|
+ create(:product, fulfillment_method: "fetch", name: product_name, product_type: "screencast")
+end
+
+When /^a video download product named "([^"]*)"$/ do |product_name|
+ product = create(:product, fulfillment_method: "fetch", name: product_name, product_type: "screencast")
+ create(:download, download_file_name: "test.txt", description: "test desc", product: product)
+ create(:video, product: product)
+end
+
+Given /^there is a github product named "([^"]*)"$/ do |product_name|
+ create(:product, fulfillment_method: "github", name: product_name, product_type: "book")
+end | 1 | When /^I add a download with file name "([^"]*)" and description "([^"]*)"$/ do |file_name, description|
click_link "Add a download"
path = File.join(Rails.root,"tmp/",file_name)
File.open(path, 'w+') do |f|
f.puts "Ths is a test file"
end
attach_file "Download", path
fill_in "Download Description", with: description
end
When /^I remove a download with file name "([^"]*)"$/ do |file_name|
click_link 'remove'
end
Then /^I should see "([^"]*)" in input field$/ do |text|
page.should have_css('input', :value => "#{text}")
end
Then /^I should not see "([^"]*)" in input field$/ do |text|
page.should_not have_css('input', :value => "#{text}")
end
When /^I attach an image name "([^"]*)" to the product$/ do |image_name|
path = File.join(Rails.root, "tmp/", image_name)
test_image_path = File.join(Rails.root,"features/support/files/test.jpg")
FileUtils.cp(test_image_path, path)
attach_file "Product image", path
end
When /^I should see an image with name "([^"]*)"$/ do |image_name|
page.should have_selector("img", src: /#{image_name}/)
end
| 1 | 6,396 | Tab inconsistency here (3 spaces instead of 2 spaces) | thoughtbot-upcase | rb |
@@ -56,14 +56,15 @@ func defaultConfig() Genesis {
EnableGravityChainVoting: true,
},
Rewarding: Rewarding{
- InitBalanceStr: unit.ConvertIotxToRau(1200000000).String(),
+ InitBalanceStr: unit.ConvertIotxToRau(200000000).String(),
BlockRewardStr: unit.ConvertIotxToRau(16).String(),
- EpochRewardStr: unit.ConvertIotxToRau(300000).String(),
+ EpochRewardStr: unit.ConvertIotxToRau(12500).String(),
+ AleutianEpochRewardStr: unit.ConvertIotxToRau(15000).String(),
NumDelegatesForEpochReward: 100,
ExemptAddrStrsFromEpochReward: []string{},
- FoundationBonusStr: unit.ConvertIotxToRau(2880).String(),
+ FoundationBonusStr: unit.ConvertIotxToRau(80).String(),
NumDelegatesForFoundationBonus: 36,
- FoundationBonusLastEpoch: 365,
+ FoundationBonusLastEpoch: 8760,
},
}
} | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package genesis
import (
"flag"
"math/big"
"sort"
"time"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"go.uber.org/config"
"go.uber.org/zap"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
)
// Default contains the default genesis config
var Default = defaultConfig()
var genesisPath string
func init() {
flag.StringVar(&genesisPath, "genesis-path", "", "Genesis path")
initTestDefaultConfig()
}
func defaultConfig() Genesis {
return Genesis{
Blockchain: Blockchain{
Timestamp: 1546329600,
BlockGasLimit: 20000000,
ActionGasLimit: 5000000,
BlockInterval: 10 * time.Second,
NumSubEpochs: 2,
NumDelegates: 24,
NumCandidateDelegates: 36,
TimeBasedRotation: false,
PacificBlockHeight: 432001,
AleutianBlockHeight: 695001,
},
Account: Account{
InitBalanceMap: make(map[string]string),
},
Poll: Poll{
EnableGravityChainVoting: true,
},
Rewarding: Rewarding{
InitBalanceStr: unit.ConvertIotxToRau(1200000000).String(),
BlockRewardStr: unit.ConvertIotxToRau(16).String(),
EpochRewardStr: unit.ConvertIotxToRau(300000).String(),
NumDelegatesForEpochReward: 100,
ExemptAddrStrsFromEpochReward: []string{},
FoundationBonusStr: unit.ConvertIotxToRau(2880).String(),
NumDelegatesForFoundationBonus: 36,
FoundationBonusLastEpoch: 365,
},
}
}
func initTestDefaultConfig() {
Default = defaultConfig()
Default.PacificBlockHeight = 0
for i := 0; i < identityset.Size(); i++ {
addr := identityset.Address(i).String()
value := unit.ConvertIotxToRau(100000000).String()
Default.InitBalanceMap[addr] = value
if uint64(i) < Default.NumDelegates {
Default.Delegates = append(Default.Delegates, Delegate{
OperatorAddrStr: addr,
RewardAddrStr: addr,
VotesStr: value,
})
}
}
}
type (
// Genesis is the root level of genesis config. Genesis config is the network-wide blockchain config. All the nodes
// participating into the same network should use EXACTLY SAME genesis config.
Genesis struct {
Blockchain `yaml:"blockchain"`
Account `ymal:"account"`
Poll `yaml:"poll"`
Rewarding `yaml:"rewarding"`
}
// Blockchain contains blockchain level configs
Blockchain struct {
// Timestamp is the timestamp of the genesis block
Timestamp int64
// BlockGasLimit is the total gas limit could be consumed in a block
BlockGasLimit uint64 `yaml:"blockGasLimit"`
// ActionGasLimit is the per action gas limit cap
ActionGasLimit uint64 `yaml:"actionGasLimit"`
// BlockInterval is the interval between two blocks
BlockInterval time.Duration `yaml:"blockInterval"`
// NumSubEpochs is the number of sub epochs in one epoch of block production
NumSubEpochs uint64 `yaml:"numSubEpochs"`
// NumDelegates is the number of delegates that participate into one epoch of block production
NumDelegates uint64 `yaml:"numDelegates"`
// NumCandidateDelegates is the number of candidate delegates, who may be selected as a delegate via roll dpos
NumCandidateDelegates uint64 `yaml:"numCandidateDelegates"`
// TimeBasedRotation is the flag to enable rotating delegates' time slots on a block height
TimeBasedRotation bool `yaml:"timeBasedRotation"`
// PacificBlockHeight is the start height of using the logic of Pacific version
// TODO: PacificBlockHeight is not added into protobuf definition for backward compatibility
PacificBlockHeight uint64 `yaml:"pacificHeight"`
// AleutianBlockHeight is the start height of adding bloom filter of all events into block header
AleutianBlockHeight uint64 `yaml:"aleutianHeight"`
}
// Account contains the configs for account protocol
Account struct {
// InitBalanceMap is the address and initial balance mapping before the first block.
InitBalanceMap map[string]string `yaml:"initBalances"`
}
// Poll contains the configs for poll protocol
Poll struct {
// EnableGravityChainVoting is a flag whether read voting from gravity chain
EnableGravityChainVoting bool `yaml:"enableGravityChainVoting"`
// GravityChainStartHeight is the height in gravity chain where the init poll result stored
GravityChainStartHeight uint64 `yaml:"gravityChainStartHeight"`
// GravityChainHeightInterval the height interval on gravity chain to pull delegate information
GravityChainHeightInterval uint64 `yaml:"gravityChainHeightInterval"`
// RegisterContractAddress is the address of register contract
RegisterContractAddress string `yaml:"registerContractAddress"`
// StakingContractAddress is the address of staking contract
StakingContractAddress string `yaml:"stakingContractAddress"`
// VoteThreshold is the vote threshold amount in decimal string format
VoteThreshold string `yaml:"voteThreshold"`
// ScoreThreshold is the score threshold amount in decimal string format
ScoreThreshold string `yaml:"scoreThreshold"`
// SelfStakingThreshold is self-staking vote threshold amount in decimal string format
SelfStakingThreshold string `yaml:"selfStakingThreshold"`
// Delegates is a list of delegates with votes
Delegates []Delegate `yaml:"delegates"`
}
// Delegate defines a delegate with address and votes
Delegate struct {
// OperatorAddrStr is the address who will operate the node
OperatorAddrStr string `yaml:"operatorAddr"`
// RewardAddrStr is the address who will get the reward when operator produces blocks
RewardAddrStr string `yaml:"rewardAddr"`
// VotesStr is the score for the operator to rank and weight for rewardee to split epoch reward
VotesStr string `yaml:"votes"`
}
// Rewarding contains the configs for rewarding protocol
Rewarding struct {
// InitBalanceStr is the initial balance of the rewarding protocol in decimal string format
InitBalanceStr string `yaml:"initBalance"`
// BlockReward is the block reward amount in decimal string format
BlockRewardStr string `yaml:"blockReward"`
// EpochReward is the epoch reward amount in decimal string format
EpochRewardStr string `yaml:"epochReward"`
// NumDelegatesForEpochReward is the number of top candidates that will share a epoch reward
NumDelegatesForEpochReward uint64 `yaml:"numDelegatesForEpochReward"`
// ExemptAddrStrsFromEpochReward is the list of addresses in encoded string format that exempt from epoch reward
ExemptAddrStrsFromEpochReward []string `yaml:"exemptAddrsFromEpochReward"`
// FoundationBonusStr is the bootstrap bonus in decimal string format
FoundationBonusStr string `yaml:"foundationBonus"`
// NumDelegatesForFoundationBonus is the number of top candidate that will get the bootstrap bonus
NumDelegatesForFoundationBonus uint64 `yaml:"numDelegatesForFoundationBonus"`
// FoundationBonusLastEpoch is the last epoch number that bootstrap bonus will be granted
FoundationBonusLastEpoch uint64 `yaml:"foundationBonusLastEpoch"`
// ProductivityThreshold is the percentage number that a delegate's productivity needs to reach to get the
// epoch reward
ProductivityThreshold uint64 `yaml:"productivityThreshold"`
}
)
// New constructs a genesis config. It loads the default values, and could be overwritten by values defined in the yaml
// config files
func New() (Genesis, error) {
def := defaultConfig()
opts := make([]config.YAMLOption, 0)
opts = append(opts, config.Static(def))
if genesisPath != "" {
opts = append(opts, config.File(genesisPath))
}
yaml, err := config.NewYAML(opts...)
if err != nil {
return Genesis{}, errors.Wrap(err, "error when constructing a genesis in yaml")
}
var genesis Genesis
if err := yaml.Get(config.Root).Populate(&genesis); err != nil {
return Genesis{}, errors.Wrap(err, "failed to unmarshal yaml genesis to struct")
}
return genesis, nil
}
// Hash is the hash of genesis config
func (g *Genesis) Hash() hash.Hash256 {
gbProto := iotextypes.GenesisBlockchain{
Timestamp: g.Timestamp,
BlockGasLimit: g.BlockGasLimit,
ActionGasLimit: g.ActionGasLimit,
BlockInterval: g.BlockInterval.Nanoseconds(),
NumSubEpochs: g.NumSubEpochs,
NumDelegates: g.NumDelegates,
NumCandidateDelegates: g.NumCandidateDelegates,
TimeBasedRotation: g.TimeBasedRotation,
}
initBalanceAddrs := make([]string, 0)
for initBalanceAddr := range g.InitBalanceMap {
initBalanceAddrs = append(initBalanceAddrs, initBalanceAddr)
}
sort.Strings(initBalanceAddrs)
initBalances := make([]string, 0)
for _, initBalanceAddr := range initBalanceAddrs {
initBalances = append(initBalances, g.InitBalanceMap[initBalanceAddr])
}
aProto := iotextypes.GenesisAccount{
InitBalanceAddrs: initBalanceAddrs,
InitBalances: initBalances,
}
dProtos := make([]*iotextypes.GenesisDelegate, 0)
for _, d := range g.Delegates {
dProto := iotextypes.GenesisDelegate{
OperatorAddr: d.OperatorAddrStr,
RewardAddr: d.RewardAddrStr,
Votes: d.VotesStr,
}
dProtos = append(dProtos, &dProto)
}
pProto := iotextypes.GenesisPoll{
EnableGravityChainVoting: g.EnableGravityChainVoting,
GravityChainStartHeight: g.GravityChainStartHeight,
RegisterContractAddress: g.RegisterContractAddress,
StakingContractAddress: g.StakingContractAddress,
VoteThreshold: g.VoteThreshold,
ScoreThreshold: g.ScoreThreshold,
SelfStakingThreshold: g.SelfStakingThreshold,
Delegates: dProtos,
}
rProto := iotextypes.GenesisRewarding{
InitBalance: g.InitBalanceStr,
BlockReward: g.BlockRewardStr,
EpochReward: g.EpochRewardStr,
NumDelegatesForEpochReward: g.NumDelegatesForEpochReward,
FoundationBonus: g.FoundationBonusStr,
NumDelegatesForFoundationBonus: g.NumDelegatesForFoundationBonus,
FoundationBonusLastEpoch: g.FoundationBonusLastEpoch,
ProductivityThreshold: g.ProductivityThreshold,
}
gProto := iotextypes.Genesis{
Blockchain: &gbProto,
Account: &aProto,
Poll: &pProto,
Rewarding: &rProto,
}
b, err := proto.Marshal(&gProto)
if err != nil {
log.L().Panic("Error when marshaling genesis proto", zap.Error(err))
}
return hash.Hash256b(b)
}
// InitBalances returns the address that have initial balances and the corresponding amounts. The i-th amount is the
// i-th address' balance.
func (a *Account) InitBalances() ([]address.Address, []*big.Int) {
// Make the list always be ordered
addrStrs := make([]string, 0)
for addrStr := range a.InitBalanceMap {
addrStrs = append(addrStrs, addrStr)
}
sort.Strings(addrStrs)
addrs := make([]address.Address, 0)
amounts := make([]*big.Int, 0)
for _, addrStr := range addrStrs {
addr, err := address.FromString(addrStr)
if err != nil {
log.L().Panic("Error when decoding the account protocol init balance address from string.", zap.Error(err))
}
addrs = append(addrs, addr)
amount, ok := big.NewInt(0).SetString(a.InitBalanceMap[addrStr], 10)
if !ok {
log.S().Panicf("Error when casting init balance string %s into big int", a.InitBalanceMap[addrStr])
}
amounts = append(amounts, amount)
}
return addrs, amounts
}
// OperatorAddr is the address of operator
func (d *Delegate) OperatorAddr() address.Address {
addr, err := address.FromString(d.OperatorAddrStr)
if err != nil {
log.L().Panic("Error when decoding the poll protocol operator address from string.", zap.Error(err))
}
return addr
}
// RewardAddr is the address of rewardee, which is allowed to be nil
func (d *Delegate) RewardAddr() address.Address {
if d.RewardAddrStr == "" {
return nil
}
addr, err := address.FromString(d.RewardAddrStr)
if err != nil {
log.L().Panic("Error when decoding the poll protocol rewardee address from string.", zap.Error(err))
}
return addr
}
// Votes returns the votes
func (d *Delegate) Votes() *big.Int {
val, ok := big.NewInt(0).SetString(d.VotesStr, 10)
if !ok {
log.S().Panicf("Error when casting votes string %s into big int", d.VotesStr)
}
return val
}
// InitBalance returns the init balance of the rewarding fund
func (r *Rewarding) InitBalance() *big.Int {
val, ok := big.NewInt(0).SetString(r.InitBalanceStr, 10)
if !ok {
log.S().Panicf("Error when casting init balance string %s into big int", r.InitBalanceStr)
}
return val
}
// BlockReward returns the block reward amount
func (r *Rewarding) BlockReward() *big.Int {
val, ok := big.NewInt(0).SetString(r.BlockRewardStr, 10)
if !ok {
log.S().Panicf("Error when casting block reward string %s into big int", r.BlockRewardStr)
}
return val
}
// EpochReward returns the epoch reward amount
func (r *Rewarding) EpochReward() *big.Int {
val, ok := big.NewInt(0).SetString(r.EpochRewardStr, 10)
if !ok {
log.S().Panicf("Error when casting epoch reward string %s into big int", r.EpochRewardStr)
}
return val
}
// ExemptAddrsFromEpochReward returns the list of addresses that exempt from epoch reward
func (r *Rewarding) ExemptAddrsFromEpochReward() []address.Address {
addrs := make([]address.Address, 0)
for _, addrStr := range r.ExemptAddrStrsFromEpochReward {
addr, err := address.FromString(addrStr)
if err != nil {
log.L().Panic("Error when decoding the rewarding protocol exempt address from string.", zap.Error(err))
}
addrs = append(addrs, addr)
}
return addrs
}
// FoundationBonus returns the bootstrap bonus amount rewarded per epoch
func (r *Rewarding) FoundationBonus() *big.Int {
val, ok := big.NewInt(0).SetString(r.FoundationBonusStr, 10)
if !ok {
log.S().Panicf("Error when casting bootstrap bonus string %s into big int", r.EpochRewardStr)
}
return val
}
| 1 | 18,204 | these change means we are changing epoch to 1 hour? | iotexproject-iotex-core | go |
@@ -46,6 +46,11 @@ import core
import keyboardHandler
import characterProcessing
from . import guiHelper
+
+#: The size that settings panel text descriptions should be wrapped at.
+# Ensure self.scaleSize is used to adjust for OS scaling adjustments.
+PANEL_DESCRIPTION_WIDTH = 544
+
try:
import updateCheck
except RuntimeError: | 1 | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2021 NV Access Limited, Peter Vágner, Aleksey Sadovoy,
# Rui Batista, Joseph Lee, Heiko Folkerts, Zahari Yurukov, Leonard de Ruijter,
# Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger, Bill Dengler, Thomas Stivers,
# Julien Cochuyt, Peter Vágner, Cyrille Bougot, Mesar Hameed, Łukasz Golonka, Aaron Cannon,
# Adriani90, André-Abush Clause, Dawid Pieper, Heiko Folkerts, Takuya Nishimoto, Thomas Stivers,
# jakubl7545, mltony
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import logging
from abc import ABCMeta, abstractmethod
import copy
import os
from enum import IntEnum
import typing
import wx
from vision.providerBase import VisionEnhancementProviderSettings
from wx.lib.expando import ExpandoTextCtrl
import wx.lib.newevent
import winUser
import logHandler
import installer
from synthDriverHandler import changeVoice, getSynth, getSynthList, setSynth, SynthDriver
import config
import languageHandler
import speech
import gui
import gui.contextHelp
import globalVars
from logHandler import log
import nvwave
import audioDucking
import speechDictHandler
import queueHandler
import braille
import brailleTables
import brailleInput
import vision
import vision.providerInfo
import vision.providerBase
from typing import Callable, List, Optional, Any
import core
import keyboardHandler
import characterProcessing
from . import guiHelper
try:
import updateCheck
except RuntimeError:
updateCheck = None
from . import nvdaControls
from autoSettingsUtils.utils import UnsupportedConfigParameterError
from autoSettingsUtils.autoSettings import AutoSettings
from autoSettingsUtils.driverSetting import BooleanDriverSetting, NumericDriverSetting, DriverSetting
import touchHandler
import winVersion
import weakref
import time
import keyLabels
from .dpiScalingHelper import DpiScalingHelperMixinWithoutInit
class SettingsDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog, # wxPython does not seem to call base class initializer, put last in MRO
metaclass=guiHelper.SIPABCMeta
):
"""A settings dialog.
A settings dialog consists of one or more settings controls and OK and Cancel buttons and an optional Apply button.
Action may be taken in response to the OK, Cancel or Apply buttons.
To use this dialog:
* Set L{title} to the title of the dialog.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, override L{postInit} to perform actions after the dialog is created, such as setting the focus. Be
aware that L{postInit} is also called by L{onApply}.
* Optionally, extend one or more of L{onOk}, L{onCancel} or L{onApply} to perform actions in response to the
OK, Cancel or Apply buttons, respectively.
@ivar title: The title of the dialog.
@type title: str
"""
class MultiInstanceError(RuntimeError): pass
class MultiInstanceErrorWithDialog(MultiInstanceError):
dialog: 'SettingsDialog'
def __init__(self, dialog: 'SettingsDialog', *args: object) -> None:
self.dialog = dialog
super().__init__(*args)
class DialogState(IntEnum):
CREATED = 0
DESTROYED = 1
# holds instances of SettingsDialogs as keys, and state as the value
_instances = weakref.WeakKeyDictionary()
title = ""
helpId = "NVDASettings"
shouldSuspendConfigProfileTriggers = True
def __new__(cls, *args, **kwargs):
# We are iterating over instanceItems only once, so it can safely be an iterator.
instanceItems = SettingsDialog._instances.items()
instancesOfSameClass = (
(dlg, state) for dlg, state in instanceItems if isinstance(dlg, cls)
)
firstMatchingInstance, state = next(instancesOfSameClass, (None, None))
multiInstanceAllowed = kwargs.get('multiInstanceAllowed', False)
if log.isEnabledFor(log.DEBUG):
instancesState = dict(SettingsDialog._instances)
log.debug(
"Creating new settings dialog (multiInstanceAllowed:{}). "
"State of _instances {!r}".format(multiInstanceAllowed, instancesState)
)
if state is cls.DialogState.CREATED and not multiInstanceAllowed:
raise SettingsDialog.MultiInstanceErrorWithDialog(
firstMatchingInstance,
"Only one instance of SettingsDialog can exist at a time",
)
if state is cls.DialogState.DESTROYED and not multiInstanceAllowed:
# the dialog has been destroyed by wx, but the instance is still available. This indicates there is something
# keeping it alive.
log.error("Opening new settings dialog while instance still exists: {!r}".format(firstMatchingInstance))
obj = super(SettingsDialog, cls).__new__(cls, *args, **kwargs)
SettingsDialog._instances[obj] = cls.DialogState.CREATED
return obj
def _setInstanceDestroyedState(self):
# prevent race condition with object deletion
# prevent deletion of the object while we work on it.
nonWeak: typing.Dict[SettingsDialog, SettingsDialog.DialogState] = dict(SettingsDialog._instances)
if (
self in SettingsDialog._instances
# Because destroy handlers are use evt.skip, _setInstanceDestroyedState may be called many times
# prevent noisy logging.
and self.DialogState.DESTROYED != SettingsDialog._instances[self]
):
if log.isEnabledFor(log.DEBUG):
instanceStatesGen = (
f"{instance.title} - {state.name}"
for instance, state in nonWeak.items()
)
instancesList = list(instanceStatesGen)
log.debug(
f"Setting state to destroyed for instance: {self.title} - {self.__class__.__qualname__} - {self}\n"
f"Current _instances {instancesList}"
)
SettingsDialog._instances[self] = self.DialogState.DESTROYED
def __init__(
self, parent,
resizeable=False,
hasApplyButton=False,
settingsSizerOrientation=wx.VERTICAL,
multiInstanceAllowed=False
):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param resizeable: True if the settings dialog should be resizable by the user, only set this if
you have tested that the components resize correctly.
@type resizeable: bool
@param hasApplyButton: C{True} to add an apply button to the dialog; defaults to C{False} for backwards compatibility.
@type hasApplyButton: bool
@param settingsSizerOrientation: Either wx.VERTICAL or wx.HORIZONTAL. This controls the orientation of the
sizer that is passed into L{makeSettings}. The default is wx.VERTICAL.
@type settingsSizerOrientation: wx.Orientation
@param multiInstanceAllowed: Whether multiple instances of SettingsDialog may exist.
Note that still only one instance of a particular SettingsDialog subclass may exist at one time.
@type multiInstanceAllowed: bool
"""
if gui._isDebug():
startTime = time.time()
windowStyle = wx.DEFAULT_DIALOG_STYLE
if resizeable:
windowStyle |= wx.RESIZE_BORDER | wx.MAXIMIZE_BOX
super().__init__(parent, title=self.title, style=windowStyle)
self.hasApply = hasApplyButton
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(settingsSizerOrientation)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL | wx.EXPAND, proportion=1)
buttons = wx.OK | wx.CANCEL
if hasApplyButton:
buttons |= wx.APPLY
self.mainSizer.Add(
self.CreateSeparatedButtonSizer(buttons),
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT
)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON, self.onApply, id=wx.ID_APPLY)
self.Bind(wx.EVT_CHAR_HOOK, self._enterActivatesOk_ctrlSActivatesApply)
# Garbage collection normally handles removing the settings instance, however this may not happen immediately
# after a window is closed, or may be blocked by a circular reference. So instead, remove when the window is
# destroyed.
self.Bind(wx.EVT_WINDOW_DESTROY, self._onWindowDestroy)
self.postInit()
if resizeable:
self.SetMinSize(self.mainSizer.GetMinSize())
self.CentreOnScreen()
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def _enterActivatesOk_ctrlSActivatesApply(self, evt):
"""Listens for keyboard input and triggers ok button on enter and triggers apply button when control + S is
pressed. Cancel behavior is built into wx.
Pressing enter will also close the dialog when a list has focus
(e.g. the list of symbols in the symbol pronunciation dialog).
Without this custom handler, enter would propagate to the list control (wx ticket #3725).
"""
if evt.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER):
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
elif self.hasApply and evt.UnicodeKey == ord(u'S') and evt.controlDown:
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_APPLY))
else:
evt.Skip()
@abstractmethod
def makeSettings(self, sizer):
"""Populate the dialog with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def postInit(self):
"""Called after the dialog has been created.
For example, this might be used to set focus to the desired control.
Sub-classes may override this method.
"""
def onOk(self, evt):
"""Take action in response to the OK button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyLater()
self.SetReturnCode(wx.ID_OK)
def onCancel(self, evt):
"""Take action in response to the Cancel button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyLater()
self.SetReturnCode(wx.ID_CANCEL)
def onApply(self, evt):
"""Take action in response to the Apply button being pressed.
Sub-classes may extend or override this method.
This base method should be called to run the postInit method.
"""
self.postInit()
self.SetReturnCode(wx.ID_APPLY)
def _onWindowDestroy(self, evt):
evt.Skip()
self._setInstanceDestroyedState()
# An event and event binder that will notify the containers that they should
# redo the layout in whatever way makes sense for their particular content.
_RWLayoutNeededEvent, EVT_RW_LAYOUT_NEEDED = wx.lib.newevent.NewCommandEvent()
class SettingsPanel(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Panel, # wxPython does not seem to call base class initializer, put last in MRO
metaclass=guiHelper.SIPABCMeta
):
"""A settings panel, to be used in a multi category settings dialog.
A settings panel consists of one or more settings controls.
Action may be taken in response to the parent dialog's OK or Cancel buttons.
To use this panel:
* Set L{title} to the title of the category.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, extend L{onPanelActivated} to perform actions after the category has been selected in the list of categories, such as synthesizer or braille display list population.
* Optionally, extend L{onPanelDeactivated} to perform actions after the category has been deselected (i.e. another category is selected) in the list of categories.
* Optionally, extend one or both of L{onSave} or L{onDiscard} to perform actions in response to the parent dialog's OK or Cancel buttons, respectively.
* Optionally, extend one or both of L{isValid} or L{postSave} to perform validation before or steps after saving, respectively.
@ivar title: The title of the settings panel, also listed in the list of settings categories.
@type title: str
"""
title=""
panelDescription=u""
def __init__(self, parent: wx.Window):
"""
@param parent: The parent for this panel; C{None} for no parent.
"""
if gui._isDebug():
startTime = time.time()
super().__init__(parent)
self._buildGui()
if gui._isDebug():
elapsedSeconds = time.time() - startTime
panelName = self.__class__.__qualname__
log.debug(f"Loading {panelName} took {elapsedSeconds:.2f} seconds")
def _buildGui(self):
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, flag=wx.ALL | wx.EXPAND)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
@abstractmethod
def makeSettings(self, sizer: wx.BoxSizer):
"""Populate the panel with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
"""
raise NotImplementedError
def onPanelActivated(self):
"""Called after the panel has been activated (i.e. de corresponding category is selected in the list of categories).
For example, this might be used for resource intensive tasks.
Sub-classes should extend this method.
"""
self.Show()
def onPanelDeactivated(self):
"""Called after the panel has been deactivated (i.e. another category has been selected in the list of categories).
Sub-classes should extendthis method.
"""
self.Hide()
@abstractmethod
def onSave(self):
"""Take action in response to the parent's dialog OK or apply button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when OK is pressed.
"""
raise NotImplementedError
def isValid(self):
"""Evaluate whether the current circumstances of this panel are valid
and allow saving all the settings in a L{MultiCategorySettingsDialog}.
Sub-classes may extend this method.
@returns: C{True} if validation should continue,
C{False} otherwise.
@rtype: bool
"""
return True
def postSave(self):
"""Take action whenever saving settings for all panels in a L{MultiCategorySettingsDialog} succeeded.
Sub-classes may extend this method.
"""
def onDiscard(self):
"""Take action in response to the parent's dialog Cancel button being pressed.
Sub-classes may override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when Cancel is pressed.
"""
def _sendLayoutUpdatedEvent(self):
"""Notify any wx parents that may be listening that they should redo their layout in whatever way
makes sense for them. It is expected that sub-classes call this method in response to changes in
the number of GUI items in their panel.
"""
event = _RWLayoutNeededEvent(self.GetId())
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
class SettingsPanelAccessible(wx.Accessible):
"""
WX Accessible implementation to set the role of a settings panel to property page,
as well as to set the accessible description based on the panel's description.
"""
Window: SettingsPanel
def GetRole(self, childId):
return (wx.ACC_OK, wx.ROLE_SYSTEM_PROPERTYPAGE)
def GetDescription(self, childId):
return (wx.ACC_OK, self.Window.panelDescription)
class MultiCategorySettingsDialog(SettingsDialog):
"""A settings dialog with multiple settings categories.
A multi category settings dialog consists of a list view with settings categories on the left side,
and a settings panel on the right side of the dialog.
Furthermore, in addition to Ok and Cancel buttons, it has an Apply button by default,
which is different from the default behavior of L{SettingsDialog}.
To use this dialog: set title and populate L{categoryClasses} with subclasses of SettingsPanel.
Make sure that L{categoryClasses} only contains panels that are available on a particular system.
For example, if a certain category of settings is only supported on Windows 10 and higher,
that category should be left out of L{categoryClasses}
"""
title=""
categoryClasses: typing.List[typing.Type[SettingsPanel]] = []
class CategoryUnavailableError(RuntimeError): pass
def __init__(self, parent, initialCategory=None):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param initialCategory: The initial category to select when opening this dialog
@type parent: SettingsPanel
"""
if initialCategory and not issubclass(initialCategory,SettingsPanel):
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise TypeError("initialCategory should be an instance of SettingsPanel")
if initialCategory and initialCategory not in self.categoryClasses:
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise MultiCategorySettingsDialog.CategoryUnavailableError(
"The provided initial category is not a part of this dialog"
)
self.initialCategory = initialCategory
self.currentCategory = None
self.setPostInitFocus = None
# dictionary key is index of category in self.catList, value is the instance.
# Partially filled, check for KeyError
self.catIdToInstanceMap: typing.Dict[int, SettingsPanel] = {}
super(MultiCategorySettingsDialog, self).__init__(
parent,
resizeable=True,
hasApplyButton=True,
settingsSizerOrientation=wx.HORIZONTAL
)
# setting the size must be done after the parent is constructed.
self.SetMinSize(self.scaleSize(self.MIN_SIZE))
self.SetSize(self.scaleSize(self.INITIAL_SIZE))
# the size has changed, so recenter on the screen
self.CentreOnScreen()
# Initial / min size for the dialog. This size was chosen as a medium fit, so the
# smaller settings panels are not surrounded by too much space but most of
# the panels fit. Vertical scrolling is acceptable. Horizontal scrolling less
# so, the width was chosen to eliminate horizontal scroll bars. If a panel
# exceeds the the initial width a debugWarning will be added to the log.
INITIAL_SIZE = (800, 480)
MIN_SIZE = (470, 240) # Min height required to show the OK, Cancel, Apply buttons
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list of categories in a multi category settings dialog.
categoriesLabelText=_("&Categories:")
categoriesLabel = wx.StaticText(self, label=categoriesLabelText)
# since the categories list and the container both expand in height, the y
# portion is essentially a "min" height.
# These sizes are set manually so that the initial proportions within the dialog look correct. If these sizes are
# not given, then I believe the proportion arguments (as given to the gridBagSizer.AddGrowableColumn) are used
# to set their relative sizes. We want the proportion argument to be used for resizing, but not the initial size.
catListDim = (150, 10)
catListDim = self.scaleSize(catListDim)
initialScaledWidth = self.scaleSize(self.INITIAL_SIZE[0])
spaceForBorderWidth = self.scaleSize(20)
catListWidth = catListDim[0]
containerDim = (initialScaledWidth - catListWidth - spaceForBorderWidth, self.scaleSize(10))
self.catListCtrl = nvdaControls.AutoWidthColumnListCtrl(
self,
autoSizeColumn=1,
size=catListDim,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER
)
# This list consists of only one column.
# The provided column header is just a placeholder, as it is hidden due to the wx.LC_NO_HEADER style flag.
self.catListCtrl.InsertColumn(0,categoriesLabelText)
self.container = nvdaControls.TabbableScrolledPanel(
parent = self,
style = wx.TAB_TRAVERSAL | wx.BORDER_THEME,
size=containerDim
)
# Th min size is reset so that they can be reduced to below their "size" constraint.
self.container.SetMinSize((1,1))
self.catListCtrl.SetMinSize((1,1))
self.containerSizer = wx.BoxSizer(wx.VERTICAL)
self.container.SetSizer(self.containerSizer)
for cls in self.categoryClasses:
if not issubclass(cls,SettingsPanel):
raise RuntimeError("Invalid category class %s provided in %s.categoryClasses"%(cls.__name__,self.__class__.__name__))
# It's important here that the listItems are added to catListCtrl in the same order that they exist in categoryClasses.
# the ListItem index / Id is used to index categoryClasses, and used as the key in catIdToInstanceMap
self.catListCtrl.Append((cls.title,))
# populate the GUI with the initial category
initialCatIndex = 0 if not self.initialCategory else self.categoryClasses.index(self.initialCategory)
self._doCategoryChange(initialCatIndex)
self.catListCtrl.Select(initialCatIndex)
# we must focus the initial category in the category list.
self.catListCtrl.Focus(initialCatIndex)
self.setPostInitFocus = self.container.SetFocus if self.initialCategory else self.catListCtrl.SetFocus
self.gridBagSizer=gridBagSizer=wx.GridBagSizer(
hgap=guiHelper.SPACE_BETWEEN_BUTTONS_HORIZONTAL,
vgap=guiHelper.SPACE_BETWEEN_BUTTONS_VERTICAL
)
# add the label, the categories list, and the settings panel to a 2 by 2 grid.
# The label should span two columns, so that the start of the categories list
# and the start of the settings panel are at the same vertical position.
gridBagSizer.Add(categoriesLabel, pos=(0,0), span=(1,2))
gridBagSizer.Add(self.catListCtrl, pos=(1,0), flag=wx.EXPAND)
gridBagSizer.Add(self.container, pos=(1,1), flag=wx.EXPAND)
# Make the row with the listCtrl and settings panel grow vertically.
gridBagSizer.AddGrowableRow(1)
# Make the columns with the listCtrl and settings panel grow horizontally if the dialog is resized.
# They should grow 1:3, since the settings panel is much more important, and already wider
# than the listCtrl.
gridBagSizer.AddGrowableCol(0, proportion=1)
gridBagSizer.AddGrowableCol(1, proportion=3)
sHelper.sizer.Add(gridBagSizer, flag=wx.EXPAND, proportion=1)
self.container.Layout()
self.catListCtrl.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onCategoryChange)
self.Bind(wx.EVT_CHAR_HOOK, self.onCharHook)
self.Bind(EVT_RW_LAYOUT_NEEDED, self._onPanelLayoutChanged)
def _getCategoryPanel(self, catId):
panel = self.catIdToInstanceMap.get(catId, None)
if not panel:
try:
cls = self.categoryClasses[catId]
except IndexError:
raise ValueError("Unable to create panel for unknown category ID: {}".format(catId))
panel = cls(parent=self.container)
panel.Hide()
self.containerSizer.Add(
panel, flag=wx.ALL | wx.EXPAND,
border=guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL
)
self.catIdToInstanceMap[catId] = panel
panelWidth = panel.Size[0]
availableWidth = self.containerSizer.GetSize()[0]
if panelWidth > availableWidth and gui._isDebug():
log.debugWarning(
("Panel width ({1}) too large for: {0} Try to reduce the width of this panel, or increase width of " +
"MultiCategorySettingsDialog.MIN_SIZE"
).format(cls, panel.Size[0])
)
panel.SetLabel(panel.title)
panel.SetAccessible(SettingsPanelAccessible(panel))
return panel
def postInit(self):
# By default after the dialog is created, focus lands on the button group for wx.Dialogs. However this is not where
# we want focus. We only want to modify focus after creation (makeSettings), but postInit is also called after
# onApply, so we reset the setPostInitFocus function.
if self.setPostInitFocus:
self.setPostInitFocus()
self.setPostInitFocus = None
else:
# when postInit is called without a setPostInitFocus ie because onApply was called
# then set the focus to the listCtrl. This is a good starting point for a "fresh state"
self.catListCtrl.SetFocus()
def onCharHook(self,evt):
"""Listens for keyboard input and switches panels for control+tab"""
if not self.catListCtrl:
# Dialog has not yet been constructed.
# Allow another handler to take the event, and return early.
evt.Skip()
return
key = evt.GetKeyCode()
listHadFocus = self.catListCtrl.HasFocus()
if evt.ControlDown() and key==wx.WXK_TAB:
# Focus the categories list. If we don't, the panel won't hide correctly
if not listHadFocus:
self.catListCtrl.SetFocus()
index = self.catListCtrl.GetFirstSelected()
newIndex=index-1 if evt.ShiftDown() else index+1
# Less than first wraps to the last index, greater than last wraps to first index.
newIndex=newIndex % self.catListCtrl.ItemCount
self.catListCtrl.Select(newIndex)
# we must focus the new selection in the category list to trigger the change of category.
self.catListCtrl.Focus(newIndex)
if not listHadFocus and self.currentCategory:
self.currentCategory.SetFocus()
else:
evt.Skip()
def _onPanelLayoutChanged(self,evt):
# call layout and SetupScrolling on the container so that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# when child elements get smaller the scrolledPanel does not
# erase the old contents and must be redrawn
self.container.Refresh()
def _doCategoryChange(self, newCatId):
oldCat = self.currentCategory
# Freeze and Thaw are called to stop visual artifact's while the GUI
# is being rebuilt. Without this, the controls can sometimes be seen being
# added.
self.container.Freeze()
try:
newCat = self._getCategoryPanel(newCatId)
except ValueError as e:
newCatTitle = self.catListCtrl.GetItemText(newCatId)
log.error("Unable to change to category: {}".format(newCatTitle), exc_info=e)
return
if oldCat:
oldCat.onPanelDeactivated()
self.currentCategory = newCat
newCat.onPanelActivated()
# call Layout and SetupScrolling on the container to make sure that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
self.container.Thaw()
def onCategoryChange(self, evt):
currentCat = self.currentCategory
newIndex = evt.GetIndex()
if not currentCat or newIndex != self.categoryClasses.index(currentCat.__class__):
self._doCategoryChange(newIndex)
else:
evt.Skip()
def _validateAllPanels(self):
"""Check if all panels are valid, and can be saved
@note: raises ValueError if a panel is not valid. See c{SettingsPanel.isValid}
"""
for panel in self.catIdToInstanceMap.values():
if panel.isValid() is False:
raise ValueError("Validation for %s blocked saving settings" % panel.__class__.__name__)
def _saveAllPanels(self):
for panel in self.catIdToInstanceMap.values():
panel.onSave()
def _notifyAllPanelsSaveOccurred(self):
for panel in self.catIdToInstanceMap.values():
panel.postSave()
def _doSave(self):
try:
self._validateAllPanels()
self._saveAllPanels()
self._notifyAllPanelsSaveOccurred()
except ValueError:
log.debugWarning("Error while saving settings:", exc_info=True)
return
def onOk(self, evt):
self._doSave()
super(MultiCategorySettingsDialog,self).onOk(evt)
def onCancel(self,evt):
for panel in self.catIdToInstanceMap.values():
panel.onDiscard()
super(MultiCategorySettingsDialog,self).onCancel(evt)
def onApply(self,evt):
self._doSave()
super(MultiCategorySettingsDialog,self).onApply(evt)
class GeneralSettingsPanel(SettingsPanel):
# Translators: This is the label for the general settings panel.
title = _("General")
helpId = "GeneralSettings"
LOG_LEVELS = (
# Translators: One of the log levels of NVDA (the disabled mode turns off logging completely).
(log.OFF, _("disabled")),
# Translators: One of the log levels of NVDA (the info mode shows info as NVDA runs).
(log.INFO, _("info")),
# Translators: One of the log levels of NVDA (the debug warning shows debugging messages and warnings as NVDA runs).
(log.DEBUGWARNING, _("debug warning")),
# Translators: One of the log levels of NVDA (the input/output shows keyboard commands and/or braille commands as well as speech and/or braille output of NVDA).
(log.IO, _("input/output")),
# Translators: One of the log levels of NVDA (the debug mode shows debug messages as NVDA runs).
(log.DEBUG, _("debug"))
)
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.languageNames = languageHandler.getAvailableLanguages(presentational=True)
languageChoices = [x[1] for x in self.languageNames]
# Translators: The label for a setting in general settings to select NVDA's interface language
# (once selected, NVDA must be restarted; the option user default means the user's Windows language
# will be used).
languageLabelText = _("NVDA &Language (requires restart):")
self.languageList=settingsSizerHelper.addLabeledControl(languageLabelText, wx.Choice, choices=languageChoices)
self.bindHelpEvent("GeneralSettingsLanguage", self.languageList)
self.languageList.SetToolTip(wx.ToolTip("Choose the language NVDA's messages and user interface should be presented in."))
try:
self.oldLanguage=config.conf["general"]["language"]
index=[x[0] for x in self.languageNames].index(self.oldLanguage)
self.languageList.SetSelection(index)
except:
pass
if globalVars.appArgs.secure:
self.languageList.Disable()
# Translators: The label for a setting in general settings to save current configuration when NVDA
# exits (if it is not checked, user needs to save configuration before quitting NVDA).
self.saveOnExitCheckBox = wx.CheckBox(self, label=_("&Save configuration when exiting NVDA"))
self.bindHelpEvent("GeneralSettingsSaveConfig", self.saveOnExitCheckBox)
self.saveOnExitCheckBox.SetValue(config.conf["general"]["saveConfigurationOnExit"])
if globalVars.appArgs.secure:
self.saveOnExitCheckBox.Disable()
settingsSizerHelper.addItem(self.saveOnExitCheckBox)
# Translators: The label for a setting in general settings to ask before quitting NVDA (if not checked, NVDA will exit without asking the user for action).
self.askToExitCheckBox=wx.CheckBox(self,label=_("Sho&w exit options when exiting NVDA"))
self.askToExitCheckBox.SetValue(config.conf["general"]["askToExit"])
settingsSizerHelper.addItem(self.askToExitCheckBox)
self.bindHelpEvent("GeneralSettingsShowExitOptions", self.askToExitCheckBox)
# Translators: The label for a setting in general settings to play sounds when NVDA starts or exits.
self.playStartAndExitSoundsCheckBox=wx.CheckBox(self,label=_("&Play sounds when starting or exiting NVDA"))
self.bindHelpEvent("GeneralSettingsPlaySounds", self.playStartAndExitSoundsCheckBox)
self.playStartAndExitSoundsCheckBox.SetValue(config.conf["general"]["playStartAndExitSounds"])
settingsSizerHelper.addItem(self.playStartAndExitSoundsCheckBox)
# Translators: The label for a setting in general settings to select logging level of NVDA as it runs
# (available options and what they are logging are found under comments for the logging level messages
# themselves).
logLevelLabelText=_("L&ogging level:")
logLevelChoices = [name for level, name in self.LOG_LEVELS]
self.logLevelList = settingsSizerHelper.addLabeledControl(logLevelLabelText, wx.Choice, choices=logLevelChoices)
self.bindHelpEvent("GeneralSettingsLogLevel", self.logLevelList)
curLevel = log.getEffectiveLevel()
if logHandler.isLogLevelForced():
self.logLevelList.Disable()
for index, (level, name) in enumerate(self.LOG_LEVELS):
if level == curLevel:
self.logLevelList.SetSelection(index)
break
else:
log.debugWarning("Could not set log level list to current log level")
# Translators: The label for a setting in general settings to allow NVDA to start after logging onto
# Windows (if checked, NVDA will start automatically after logging into Windows; if not, user must
# start NVDA by pressing the shortcut key (CTRL+Alt+N by default).
self.startAfterLogonCheckBox = wx.CheckBox(self, label=_("St&art NVDA after I sign in"))
self.startAfterLogonCheckBox.SetValue(config.getStartAfterLogon())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
settingsSizerHelper.addItem(self.startAfterLogonCheckBox)
self.bindHelpEvent("GeneralSettingsStartAfterLogOn", self.startAfterLogonCheckBox)
self.startOnLogonScreenCheckBox = wx.CheckBox(
self,
# Translators: The label for a setting in general settings to
# allow NVDA to come up in Windows login screen (useful if user
# needs to enter passwords or if multiple user accounts are present
# to allow user to choose the correct account).
label=_("Use NVDA during sign-in (requires administrator privileges)")
)
self.bindHelpEvent("GeneralSettingsStartOnLogOnScreen", self.startOnLogonScreenCheckBox)
self.startOnLogonScreenCheckBox.SetValue(config.getStartOnLogonScreen())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startOnLogonScreenCheckBox.Disable()
settingsSizerHelper.addItem(self.startOnLogonScreenCheckBox)
self.copySettingsButton = wx.Button(
self,
label=_(
# Translators: The label for a button in general settings to copy
# current user settings to system settings (to allow current
# settings to be used in secure screens such as User Account
# Control (UAC) dialog).
"Use currently saved settings during sign-in and on secure screens"
" (requires administrator privileges)"
)
)
self.bindHelpEvent("GeneralSettingsCopySettings", self.copySettingsButton)
self.copySettingsButton.Bind(wx.EVT_BUTTON,self.onCopySettings)
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.copySettingsButton.Disable()
settingsSizerHelper.addItem(self.copySettingsButton)
if updateCheck:
# Translators: The label of a checkbox in general settings to toggle automatic checking for updated versions of NVDA (if not checked, user must check for updates manually).
item=self.autoCheckForUpdatesCheckBox=wx.CheckBox(self,label=_("Automatically check for &updates to NVDA"))
self.bindHelpEvent("GeneralSettingsCheckForUpdates", self.autoCheckForUpdatesCheckBox)
item.Value=config.conf["update"]["autoCheck"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup"))
self.bindHelpEvent("GeneralSettingsNotifyPendingUpdates", self.notifyForPendingUpdateCheckBox)
item.Value=config.conf["update"]["startupNotification"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle allowing of usage stats gathering
item=self.allowUsageStatsCheckBox=wx.CheckBox(self,label=_("Allow the NVDA project to gather NVDA usage statistics"))
self.bindHelpEvent("GeneralSettingsGatherUsageStats", self.allowUsageStatsCheckBox)
item.Value=config.conf["update"]["allowUsageStats"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
def onCopySettings(self,evt):
addonsDirPath = os.path.join(globalVars.appArgs.configPath, 'addons')
if os.path.isdir(addonsDirPath) and 0 < len(os.listdir(addonsDirPath)):
message = _(
# Translators: A message to warn the user when attempting to copy current
# settings to system settings.
"Add-ons were detected in your user settings directory. "
"Copying these to the system profile could be a security risk. "
"Do you still wish to copy your settings?"
)
# Translators: The title of the warning dialog displayed when trying to
# copy settings for use in secure screens.
title = _("Warning")
style = wx.YES | wx.NO | wx.ICON_WARNING
if wx.NO == gui.messageBox(message, title, style, self):
return
progressDialog = gui.IndeterminateProgressDialog(
gui.mainFrame,
# Translators: The title of the dialog presented while settings are being copied
_("Copying Settings"),
# Translators: The message displayed while settings are being copied
# to the system configuration (for use on Windows logon etc)
_("Please wait while settings are copied to the system configuration.")
)
while True:
try:
gui.ExecAndPump(config.setSystemConfigToCurrentConfig)
res=True
break
except installer.RetriableFailure:
log.debugWarning("Error when copying settings to system config",exc_info=True)
# Translators: a message dialog asking to retry or cancel when copying settings fails
message=_("Unable to copy a file. Perhaps it is currently being used by another process or you have run out of disc space on the drive you are copying to.")
# Translators: the title of a retry cancel dialog when copying settings fails
title=_("Error Copying")
if winUser.MessageBox(None,message,title,winUser.MB_RETRYCANCEL)==winUser.IDRETRY:
continue
res=False
break
except:
log.debugWarning("Error when copying settings to system config",exc_info=True)
res=False
break
progressDialog.done()
del progressDialog
if not res:
# Translators: The message displayed when errors were found while trying to copy current configuration to system settings.
gui.messageBox(_("Error copying NVDA user settings"),_("Error"),wx.OK|wx.ICON_ERROR,self)
else:
# Translators: The message displayed when copying configuration to system settings was successful.
gui.messageBox(_("Successfully copied NVDA user settings"),_("Success"),wx.OK|wx.ICON_INFORMATION,self)
def onSave(self):
newLanguage=[x[0] for x in self.languageNames][self.languageList.GetSelection()]
config.conf["general"]["language"]=newLanguage
config.conf["general"]["saveConfigurationOnExit"]=self.saveOnExitCheckBox.IsChecked()
config.conf["general"]["askToExit"]=self.askToExitCheckBox.IsChecked()
config.conf["general"]["playStartAndExitSounds"]=self.playStartAndExitSoundsCheckBox.IsChecked()
logLevel=self.LOG_LEVELS[self.logLevelList.GetSelection()][0]
if not logHandler.isLogLevelForced():
config.conf["general"]["loggingLevel"] = logging.getLevelName(logLevel)
logHandler.setLogLevelFromConfig()
if self.startAfterLogonCheckBox.IsEnabled():
config.setStartAfterLogon(self.startAfterLogonCheckBox.GetValue())
if self.startOnLogonScreenCheckBox.IsEnabled():
try:
config.setStartOnLogonScreen(self.startOnLogonScreenCheckBox.GetValue())
except (WindowsError, RuntimeError):
gui.messageBox(_("This change requires administrator privileges."), _("Insufficient Privileges"), style=wx.OK | wx.ICON_ERROR, parent=self)
if updateCheck:
config.conf["update"]["autoCheck"]=self.autoCheckForUpdatesCheckBox.IsChecked()
config.conf["update"]["allowUsageStats"]=self.allowUsageStatsCheckBox.IsChecked()
config.conf["update"]["startupNotification"]=self.notifyForPendingUpdateCheckBox.IsChecked()
updateCheck.terminate()
updateCheck.initialize()
def postSave(self):
if self.oldLanguage != config.conf["general"]["language"]:
LanguageRestartDialog(self).ShowModal()
class LanguageRestartDialog(
gui.contextHelp.ContextHelpMixin,
wx.Dialog, # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "GeneralSettingsLanguage"
def __init__(self, parent):
# Translators: The title of the dialog which appears when the user changed NVDA's interface language.
super(LanguageRestartDialog, self).__init__(parent, title=_("Language Configuration Change"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: The message displayed after NVDA interface language has been changed.
sHelper.addItem(wx.StaticText(self, label=_("NVDA must be restarted for the new language to take effect.")))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartNowButton = bHelper.addButton(self, label=_("Restart &now"))
restartNowButton.Bind(wx.EVT_BUTTON, self.onRestartNowButton)
restartNowButton.SetFocus()
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartLaterButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("Restart &later"))
restartLaterButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy())
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
def onRestartNowButton(self, evt):
self.Destroy()
config.conf.save()
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
class SpeechSettingsPanel(SettingsPanel):
# Translators: This is the label for the speech panel
title = _("Speech")
helpId = "SpeechSettings"
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the synthesizer on the speech panel.
synthLabel = _("&Synthesizer")
synthBoxSizer = wx.StaticBoxSizer(wx.HORIZONTAL, self, label=synthLabel)
synthBox = synthBoxSizer.GetStaticBox()
synthGroup = guiHelper.BoxSizerHelper(self, sizer=synthBoxSizer)
settingsSizerHelper.addItem(synthGroup)
# Use a ExpandoTextCtrl because even when readonly it accepts focus from keyboard, which
# standard readonly TextCtrl does not. ExpandoTextCtrl is a TE_MULTILINE control, however
# by default it renders as a single line. Standard TextCtrl with TE_MULTILINE has two lines,
# and a vertical scroll bar. This is not neccessary for the single line of text we wish to
# display here.
synthDesc = getSynth().description
self.synthNameCtrl = ExpandoTextCtrl(
synthBox,
size=(self.scaleSize(250), -1),
value=synthDesc,
style=wx.TE_READONLY,
)
self.synthNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeSynth)
# Translators: This is the label for the button used to change synthesizer,
# it appears in the context of a synthesizer group on the speech settings panel.
changeSynthBtn = wx.Button(synthBox, label=_("C&hange..."))
self.bindHelpEvent("SpeechSettingsChange", self.synthNameCtrl)
self.bindHelpEvent("SpeechSettingsChange", changeSynthBtn)
synthGroup.addItem(
guiHelper.associateElements(
self.synthNameCtrl,
changeSynthBtn
)
)
changeSynthBtn.Bind(wx.EVT_BUTTON,self.onChangeSynth)
self.voicePanel = VoiceSettingsPanel(self)
settingsSizerHelper.addItem(self.voicePanel)
def _enterTriggersOnChangeSynth(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeSynth(evt)
else:
evt.Skip()
def onChangeSynth(self, evt):
changeSynth = SynthesizerSelectionDialog(self, multiInstanceAllowed=True)
ret = changeSynth.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentSynth(self):
synthDesc = getSynth().description
self.synthNameCtrl.SetValue(synthDesc)
def onPanelActivated(self):
# call super after all panel updates have been completed, we dont want the panel to show until this is complete.
self.voicePanel.onPanelActivated()
super(SpeechSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.voicePanel.onPanelDeactivated()
super(SpeechSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.voicePanel.onDiscard()
def onSave(self):
self.voicePanel.onSave()
class SynthesizerSelectionDialog(SettingsDialog):
# Translators: This is the label for the synthesizer selection dialog
title = _("Select Synthesizer")
helpId = "SynthesizerSelection"
synthNames = []
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label for the select
# synthesizer combobox in the synthesizer dialog.
synthListLabelText=_("&Synthesizer:")
self.synthList = settingsSizerHelper.addLabeledControl(synthListLabelText, wx.Choice, choices=[])
self.bindHelpEvent("SelectSynthesizerSynthesizer", self.synthList)
self.updateSynthesizerList()
# Translators: This is the label for the select output
# device combo in the synthesizer dialog. Examples of
# of an output device are default soundcard, usb
# headphones, etc.
deviceListLabelText = _("Audio output &device:")
deviceNames=nvwave.getOutputDeviceNames()
# #11349: On Windows 10 20H1 and 20H2, Microsoft Sound Mapper returns an empty string.
if deviceNames[0] in ("", "Microsoft Sound Mapper"):
# Translators: name for default (Microsoft Sound Mapper) audio output device.
deviceNames[0] = _("Microsoft Sound Mapper")
self.deviceList = settingsSizerHelper.addLabeledControl(deviceListLabelText, wx.Choice, choices=deviceNames)
self.bindHelpEvent("SelectSynthesizerOutputDevice", self.deviceList)
try:
selection = deviceNames.index(config.conf["speech"]["outputDevice"])
except ValueError:
selection = 0
self.deviceList.SetSelection(selection)
# Translators: This is a label for the audio ducking combo box in the Synthesizer Settings dialog.
duckingListLabelText = _("Audio d&ucking mode:")
self.duckingList=settingsSizerHelper.addLabeledControl(duckingListLabelText, wx.Choice, choices=audioDucking.audioDuckingModes)
self.bindHelpEvent("SelectSynthesizerDuckingMode", self.duckingList)
index=config.conf['audio']['audioDuckingMode']
self.duckingList.SetSelection(index)
if not audioDucking.isAudioDuckingSupported():
self.duckingList.Disable()
def postInit(self):
# Finally, ensure that focus is on the synthlist
self.synthList.SetFocus()
def updateSynthesizerList(self):
driverList=getSynthList()
self.synthNames=[x[0] for x in driverList]
options=[x[1] for x in driverList]
self.synthList.Clear()
self.synthList.AppendItems(options)
try:
index=self.synthNames.index(getSynth().name)
self.synthList.SetSelection(index)
except:
pass
def onOk(self, evt):
if not self.synthNames:
# The list of synths has not been populated yet, so we didn't change anything in this panel
return
config.conf["speech"]["outputDevice"]=self.deviceList.GetStringSelection()
newSynth=self.synthNames[self.synthList.GetSelection()]
if not setSynth(newSynth):
# Translators: This message is presented when
# NVDA is unable to load the selected
# synthesizer.
gui.messageBox(_("Could not load the %s synthesizer.")%newSynth,_("Synthesizer Error"),wx.OK|wx.ICON_WARNING,self)
return
if audioDucking.isAudioDuckingSupported():
index=self.duckingList.GetSelection()
config.conf['audio']['audioDuckingMode']=index
audioDucking.setAudioDuckingMode(index)
# Reinitialize the tones module to update the audio device
import tones
tones.terminate()
tones.initialize()
if self.IsModal():
# Hack: we need to update the synth in our parent window before closing.
# Otherwise, NVDA will report the old synth even though the new synth is reflected visually.
self.Parent.updateCurrentSynth()
super(SynthesizerSelectionDialog, self).onOk(evt)
class DriverSettingChanger(object):
"""Functor which acts as callback for GUI events."""
def __init__(self,driver,setting):
self._driverRef=weakref.ref(driver)
self.setting=setting
@property
def driver(self):
return self._driverRef()
def __call__(self,evt):
evt.Skip() # allow other handlers to also process this event.
val=evt.GetSelection()
setattr(self.driver,self.setting.id,val)
class StringDriverSettingChanger(DriverSettingChanger):
"""Same as L{DriverSettingChanger} but handles combobox events."""
def __init__(self,driver,setting,container):
self.container=container
super(StringDriverSettingChanger,self).__init__(driver,setting)
def __call__(self,evt):
evt.Skip() # allow other handlers to also process this event.
# Quick workaround to deal with voice changes.
if self.setting.id == "voice":
# Cancel speech first so that the voice will change immediately instead of the change being queued.
speech.cancelSpeech()
changeVoice(
self.driver,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
self.container.updateDriverSettings(changedSetting=self.setting.id)
else:
setattr(
self.driver,
self.setting.id,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
class AutoSettingsMixin(metaclass=ABCMeta):
"""
Mixin class that provides support for driver/vision provider specific gui settings.
Derived classes should implement:
- L{getSettings}
- L{settingsSizer}
Derived classes likely need to inherit from L{SettingsPanel}, in particular
the following methods must be provided:
- makeSettings
- onPanelActivated
@note: This mixin uses self.lastControl and self.sizerDict to keep track of the
controls added / and maintain ordering.
If you plan to maintain other controls in the same panel care will need to be taken.
"""
def __init__(self, *args, **kwargs):
"""
Mixin init, forwards args to other base class.
The other base class is likely L{gui.SettingsPanel}.
@param args: Positional args to passed to other base class.
@param kwargs: Keyword args to passed to other base class.
"""
self.sizerDict = {}
self.lastControl = None
super(AutoSettingsMixin, self).__init__(*args, **kwargs)
# because settings instances can be of type L{Driver} as well, we have to handle
# showing settings for non-instances. Because of this, we must reacquire a reference
# to the settings class whenever we wish to use it (via L{getSettings}) in case the instance changes.
# We also use the weakref to refresh the gui when an instance dies.
self._currentSettingsRef = weakref.ref(
self.getSettings(),
lambda ref: wx.CallAfter(self.refreshGui)
)
settingsSizer: wx.BoxSizer
@abstractmethod
def getSettings(self) -> AutoSettings:
...
@abstractmethod
def makeSettings(self, sizer: wx.BoxSizer):
"""Populate the panel with settings controls.
@note: Normally classes also inherit from settingsDialogs.SettingsPanel.
@param sizer: The sizer to which to add the settings controls.
"""
...
def _getSettingsStorage(self) -> Any:
""" Override to change storage object for setting values."""
return self.getSettings()
@property
def hasOptions(self) -> bool:
return bool(self.getSettings().supportedSettings)
@classmethod
def _setSliderStepSizes(cls, slider, setting):
slider.SetLineSize(setting.minStep)
slider.SetPageSize(setting.largeStep)
def _getSettingControlHelpId(self, controlId):
"""Define the helpId associated to this control.
"""
return self.helpId
def _makeSliderSettingControl(
self,
setting: NumericDriverSetting,
settingsStorage: Any
) -> wx.BoxSizer:
"""Constructs appropriate GUI controls for given L{DriverSetting} such as label and slider.
@param setting: Setting to construct controls for
@param settingsStorage: where to get initial values / set values.
This param must have an attribute with a name matching setting.id.
In most cases it will be of type L{AutoSettings}
@return: wx.BoxSizer containing newly created controls.
"""
labeledControl = guiHelper.LabeledControlHelper(
self,
f"{setting.displayNameWithAccelerator}:",
nvdaControls.EnhancedInputSlider,
minValue=setting.minVal,
maxValue=setting.maxVal
)
lSlider=labeledControl.control
setattr(self, f"{setting.id}Slider", lSlider)
lSlider.Bind(wx.EVT_SLIDER, DriverSettingChanger(
settingsStorage, setting
))
self.bindHelpEvent(
self._getSettingControlHelpId(setting.id),
lSlider
)
self._setSliderStepSizes(lSlider, setting)
lSlider.SetValue(getattr(settingsStorage, setting.id))
if self.lastControl:
lSlider.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lSlider
return labeledControl.sizer
def _makeStringSettingControl(
self,
setting: DriverSetting,
settingsStorage: Any
):
"""
Same as L{_makeSliderSettingControl} but for string settings displayed in a wx.Choice control
Options for the choice control come from the availableXstringvalues property
(Dict[id, StringParameterInfo]) on the instance returned by self.getSettings()
The id of the value is stored on settingsStorage.
Returns sizer with label and combobox.
"""
labelText = f"{setting.displayNameWithAccelerator}:"
stringSettingAttribName = f"_{setting.id}s"
setattr(
self,
stringSettingAttribName,
# Settings are stored as an ordered dict.
# Therefore wrap this inside a list call.
list(getattr(
self.getSettings(),
f"available{setting.id.capitalize()}s"
).values())
)
stringSettings = getattr(self, stringSettingAttribName)
labeledControl = guiHelper.LabeledControlHelper(
self,
labelText,
wx.Choice,
choices=[x.displayName for x in stringSettings]
)
lCombo = labeledControl.control
setattr(self, f"{setting.id}List", lCombo)
self.bindHelpEvent(
self._getSettingControlHelpId(setting.id),
lCombo
)
try:
cur = getattr(settingsStorage, setting.id)
selectionIndex = [
x.id for x in stringSettings
].index(cur)
lCombo.SetSelection(selectionIndex)
except ValueError:
pass
lCombo.Bind(
wx.EVT_CHOICE,
StringDriverSettingChanger(settingsStorage, setting, self)
)
if self.lastControl:
lCombo.MoveAfterInTabOrder(self.lastControl)
self.lastControl = lCombo
return labeledControl.sizer
def _makeBooleanSettingControl(
self,
setting: BooleanDriverSetting,
settingsStorage: Any
):
"""
Same as L{_makeSliderSettingControl} but for boolean settings. Returns checkbox.
"""
checkbox = wx.CheckBox(self, label=setting.displayNameWithAccelerator)
setattr(self, f"{setting.id}Checkbox", checkbox)
settingsStorageProxy = weakref.proxy(settingsStorage)
self.bindHelpEvent(self._getSettingControlHelpId(setting.id), checkbox)
def _onCheckChanged(evt: wx.CommandEvent):
evt.Skip() # allow other handlers to also process this event.
setattr(settingsStorageProxy, setting.id, evt.IsChecked())
checkbox.Bind(wx.EVT_CHECKBOX, _onCheckChanged)
checkbox.SetValue(getattr(
settingsStorage,
setting.id
))
if self.lastControl:
checkbox.MoveAfterInTabOrder(self.lastControl)
self.lastControl=checkbox
return checkbox
def updateDriverSettings(self, changedSetting=None):
"""
Creates, hides or updates existing GUI controls for all of supported settings.
"""
settingsInst = self.getSettings()
settingsStorage = self._getSettingsStorage()
# firstly check already created options
for name, sizer in self.sizerDict.items():
if name == changedSetting:
# Changing a setting shouldn't cause that setting itself to disappear.
continue
if not settingsInst.isSupported(name):
self.settingsSizer.Hide(sizer)
# Create new controls, update already existing
if gui._isDebug():
log.debug(f"Current sizerDict: {self.sizerDict!r}")
log.debug(f"Current supportedSettings: {self.getSettings().supportedSettings!r}")
for setting in settingsInst.supportedSettings:
if setting.id == changedSetting:
# Changing a setting shouldn't cause that setting's own values to change.
continue
if setting.id in self.sizerDict: # update a value
self._updateValueForControl(setting, settingsStorage)
else: # create a new control
self._createNewControl(setting, settingsStorage)
# Update graphical layout of the dialog
self.settingsSizer.Layout()
def _createNewControl(self, setting, settingsStorage):
settingMaker = self._getSettingMaker(setting)
try:
s = settingMaker(setting, settingsStorage)
except UnsupportedConfigParameterError:
log.debugWarning(f"Unsupported setting {setting.id}; ignoring", exc_info=True)
else:
self.sizerDict[setting.id] = s
self.settingsSizer.Insert(
len(self.sizerDict) - 1,
s,
border=10,
flag=wx.BOTTOM
)
def _getSettingMaker(self, setting):
if isinstance(setting, NumericDriverSetting):
settingMaker = self._makeSliderSettingControl
elif isinstance(setting, BooleanDriverSetting):
settingMaker = self._makeBooleanSettingControl
else:
settingMaker = self._makeStringSettingControl
return settingMaker
def _updateValueForControl(self, setting, settingsStorage):
self.settingsSizer.Show(self.sizerDict[setting.id])
if isinstance(setting, NumericDriverSetting):
getattr(self, f"{setting.id}Slider").SetValue(
getattr(settingsStorage, setting.id)
)
elif isinstance(setting, BooleanDriverSetting):
getattr(self, f"{setting.id}Checkbox").SetValue(
getattr(settingsStorage, setting.id)
)
else:
options = getattr(self, f"_{setting.id}s")
lCombo = getattr(self, f"{setting.id}List")
try:
cur = getattr(settingsStorage, setting.id)
indexOfItem = [x.id for x in options].index(cur)
lCombo.SetSelection(indexOfItem)
except ValueError:
pass
def onDiscard(self):
# unbind change events for string settings as wx closes combo boxes on cancel
settingsInst = self.getSettings()
for setting in settingsInst.supportedSettings:
if isinstance(setting, (NumericDriverSetting, BooleanDriverSetting)):
continue
getattr(self, f"{setting.id}List").Unbind(wx.EVT_CHOICE)
# restore settings
settingsInst.loadSettings()
def onSave(self):
self.getSettings().saveSettings()
def refreshGui(self):
if not self._currentSettingsRef():
if gui._isDebug():
log.debug("refreshing panel")
self.sizerDict.clear()
self.settingsSizer.Clear(delete_windows=True)
self._currentSettingsRef = weakref.ref(
self.getSettings(),
lambda ref: wx.CallAfter(self.refreshGui)
)
self.makeSettings(self.settingsSizer)
def onPanelActivated(self):
"""Called after the panel has been activated
@note: Normally classes also inherit from settingsDialogs.SettingsPanel.
"""
self.refreshGui()
super().onPanelActivated()
class VoiceSettingsPanel(AutoSettingsMixin, SettingsPanel):
# Translators: This is the label for the voice settings panel.
title = _("Voice")
helpId = "SpeechSettings"
@property
def driver(self):
synth: SynthDriver = getSynth()
return synth
def getSettings(self) -> AutoSettings:
return self.driver
def _getSettingControlHelpId(self, controlId):
standardSettings = ['voice', 'variant', 'rate', 'rateBoost', 'pitch', 'inflection', 'volume']
if controlId in standardSettings:
capitalizedId = controlId[0].upper() + controlId[1:]
return f"{self.helpId}{capitalizedId}"
else:
return self.helpId
def makeSettings(self, settingsSizer):
# Construct synthesizer settings
self.updateDriverSettings()
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
autoLanguageSwitchingText = _("Automatic language switching (when supported)")
self.autoLanguageSwitchingCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(
self,
label=autoLanguageSwitchingText
))
self.bindHelpEvent("SpeechSettingsLanguageSwitching", self.autoLanguageSwitchingCheckbox)
self.autoLanguageSwitchingCheckbox.SetValue(
config.conf["speech"]["autoLanguageSwitching"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, different voices for dialects will be used to
# read text in that dialect).
autoDialectSwitchingText = _("Automatic dialect switching (when supported)")
self.autoDialectSwitchingCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=autoDialectSwitchingText)
)
self.bindHelpEvent("SpeechSettingsDialectSwitching", self.autoDialectSwitchingCheckbox)
self.autoDialectSwitchingCheckbox.SetValue(
config.conf["speech"]["autoDialectSwitching"]
)
# Translators: This is the label for a combobox in the
# voice settings panel (possible choices are none, some, most and all).
punctuationLabelText = _("Punctuation/symbol &level:")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
symbolLevelChoices = [
symbolLevelLabels[level] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS
]
self.symbolLevelList = settingsSizerHelper.addLabeledControl(
punctuationLabelText, wx.Choice, choices=symbolLevelChoices
)
self.bindHelpEvent("SpeechSettingsSymbolLevel", self.symbolLevelList)
curLevel = config.conf["speech"]["symbolLevel"]
self.symbolLevelList.SetSelection(
characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS.index(curLevel)
)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
trustVoiceLanguageText = _("Trust voice's language when processing characters and symbols")
self.trustVoiceLanguageCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=trustVoiceLanguageText)
)
self.bindHelpEvent("SpeechSettingsTrust", self.trustVoiceLanguageCheckbox)
self.trustVoiceLanguageCheckbox.SetValue(config.conf["speech"]["trustVoiceLanguage"])
includeCLDRText = _(
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, data from the unicode CLDR will be used
# to speak emoji descriptions).
"Include Unicode Consortium data (including emoji) when processing characters and symbols"
)
self.includeCLDRCheckbox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=includeCLDRText)
)
self.bindHelpEvent(
"SpeechSettingsCLDR",
self.includeCLDRCheckbox
)
self.includeCLDRCheckbox.SetValue(config.conf["speech"]["includeCLDR"])
minPitchChange = int(config.conf.getConfigValidation(
("speech", self.driver.name, "capPitchChange")
).kwargs["min"])
maxPitchChange = int(config.conf.getConfigValidation(
("speech", self.driver.name, "capPitchChange")
).kwargs["max"])
# Translators: This is a label for a setting in voice settings (an edit box to change
# voice pitch for capital letters; the higher the value, the pitch will be higher).
capPitchChangeLabelText = _("Capital pitch change percentage")
self.capPitchChangeEdit = settingsSizerHelper.addLabeledControl(
capPitchChangeLabelText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minPitchChange,
max=maxPitchChange,
initial=config.conf["speech"][self.driver.name]["capPitchChange"])
self.bindHelpEvent(
"SpeechSettingsCapPitchChange",
self.capPitchChangeEdit
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
sayCapForCapsText = _("Say &cap before capitals")
self.sayCapForCapsCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=sayCapForCapsText)
)
self.bindHelpEvent("SpeechSettingsSayCapBefore", self.sayCapForCapsCheckBox)
self.sayCapForCapsCheckBox.SetValue(
config.conf["speech"][self.driver.name]["sayCapForCapitals"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
beepForCapsText =_("&Beep for capitals")
self.beepForCapsCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=beepForCapsText)
)
self.bindHelpEvent(
"SpeechSettingsBeepForCaps",
self.beepForCapsCheckBox
)
self.beepForCapsCheckBox.SetValue(
config.conf["speech"][self.driver.name]["beepForCapitals"]
)
# Translators: This is the label for a checkbox in the
# voice settings panel.
useSpellingFunctionalityText = _("Use &spelling functionality if supported")
self.useSpellingFunctionalityCheckBox = settingsSizerHelper.addItem(
wx.CheckBox(self, label=useSpellingFunctionalityText)
)
self.bindHelpEvent("SpeechSettingsUseSpelling", self.useSpellingFunctionalityCheckBox)
self.useSpellingFunctionalityCheckBox.SetValue(
config.conf["speech"][self.driver.name]["useSpellingFunctionality"]
)
def onSave(self):
AutoSettingsMixin.onSave(self)
config.conf["speech"]["autoLanguageSwitching"] = self.autoLanguageSwitchingCheckbox.IsChecked()
config.conf["speech"]["autoDialectSwitching"] = self.autoDialectSwitchingCheckbox.IsChecked()
config.conf["speech"]["symbolLevel"] = characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS[
self.symbolLevelList.GetSelection()
].value
config.conf["speech"]["trustVoiceLanguage"] = self.trustVoiceLanguageCheckbox.IsChecked()
currentIncludeCLDR = config.conf["speech"]["includeCLDR"]
config.conf["speech"]["includeCLDR"] = newIncludeCldr = self.includeCLDRCheckbox.IsChecked()
if currentIncludeCLDR is not newIncludeCldr:
# Either included or excluded CLDR data, so clear the cache.
characterProcessing.clearSpeechSymbols()
config.conf["speech"][self.driver.name]["capPitchChange"]=self.capPitchChangeEdit.Value
config.conf["speech"][self.driver.name]["sayCapForCapitals"]=self.sayCapForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["beepForCapitals"]=self.beepForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["useSpellingFunctionality"]=self.useSpellingFunctionalityCheckBox.IsChecked()
class KeyboardSettingsPanel(SettingsPanel):
# Translators: This is the label for the keyboard settings panel.
title = _("Keyboard")
helpId = "KeyboardSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a combobox in the
# keyboard settings panel.
kbdLabelText = _("&Keyboard layout:")
layouts=keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames=sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList=sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
self.bindHelpEvent("KeyboardSettingsLayout", self.kbdList)
try:
index=self.kbdNames.index(config.conf['keyboard']['keyboardLayout'])
self.kbdList.SetSelection(index)
except:
log.debugWarning("Could not set Keyboard layout list to current layout",exc_info=True)
#Translators: This is the label for a list of checkboxes
# controlling which keys are NVDA modifier keys.
modifierBoxLabel = _("&Select NVDA Modifier Keys")
self.modifierChoices = [keyLabels.localizedKeyLabels[key] for key in keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS]
self.modifierList=sHelper.addLabeledControl(modifierBoxLabel, nvdaControls.CustomCheckListBox, choices=self.modifierChoices)
checkedItems = []
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
if config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
if config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
self.modifierList.CheckedItems = checkedItems
self.modifierList.Select(0)
self.bindHelpEvent("KeyboardSettingsModifiers", self.modifierList)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
charsText = _("Speak typed &characters")
self.charsCheckBox=sHelper.addItem(wx.CheckBox(self,label=charsText))
self.bindHelpEvent(
"KeyboardSettingsSpeakTypedCharacters",
self.charsCheckBox
)
self.charsCheckBox.SetValue(config.conf["keyboard"]["speakTypedCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speakTypedWordsText = _("Speak typed &words")
self.wordsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speakTypedWordsText))
self.bindHelpEvent("KeyboardSettingsSpeakTypedWords", self.wordsCheckBox)
self.wordsCheckBox.SetValue(config.conf["keyboard"]["speakTypedWords"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForCharText = _("Speech &interrupt for typed characters")
self.speechInterruptForCharsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForCharText))
self.bindHelpEvent("KeyboardSettingsSpeechInteruptForCharacters", self.speechInterruptForCharsCheckBox)
self.speechInterruptForCharsCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForEnterText = _("Speech i&nterrupt for Enter key")
self.speechInterruptForEnterCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForEnterText))
self.speechInterruptForEnterCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForEnter"])
self.bindHelpEvent("KeyboardSettingsSpeechInteruptForEnter", self.speechInterruptForEnterCheckBox)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
allowSkimReadingInSayAllText = _("Allow skim &reading in Say All")
self.skimReadingInSayAllCheckBox=sHelper.addItem(wx.CheckBox(self,label=allowSkimReadingInSayAllText))
self.bindHelpEvent("KeyboardSettingsSkimReading", self.skimReadingInSayAllCheckBox)
self.skimReadingInSayAllCheckBox.SetValue(config.conf["keyboard"]["allowSkimReadingInSayAll"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
beepForLowercaseWithCapsLockText = _("&Beep if typing lowercase letters when caps lock is on")
self.beepLowercaseCheckBox=sHelper.addItem(wx.CheckBox(self,label=beepForLowercaseWithCapsLockText))
self.bindHelpEvent("KeyboardSettingsBeepLowercase", self.beepLowercaseCheckBox)
self.beepLowercaseCheckBox.SetValue(config.conf["keyboard"]["beepForLowercaseWithCapslock"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
commandKeysText = _("Speak c&ommand keys")
self.commandKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=commandKeysText))
self.bindHelpEvent("KeyboardSettingsSpeakCommandKeys", self.commandKeysCheckBox)
self.commandKeysCheckBox.SetValue(config.conf["keyboard"]["speakCommandKeys"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
alertForSpellingErrorsText = _("Play sound for &spelling errors while typing")
self.alertForSpellingErrorsCheckBox=sHelper.addItem(wx.CheckBox(self,label=alertForSpellingErrorsText))
self.bindHelpEvent("KeyboardSettingsAlertForSpellingErrors", self.alertForSpellingErrorsCheckBox)
self.alertForSpellingErrorsCheckBox.SetValue(config.conf["keyboard"]["alertForSpellingErrors"])
if not config.conf["documentFormatting"]["reportSpellingErrors"]:
self.alertForSpellingErrorsCheckBox.Disable()
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
handleInjectedKeysText = _("Handle keys from other &applications")
self.handleInjectedKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedKeysText))
self.bindHelpEvent("KeyboardSettingsHandleKeys", self.handleInjectedKeysCheckBox)
self.handleInjectedKeysCheckBox.SetValue(config.conf["keyboard"]["handleInjectedKeys"])
def isValid(self):
# #2871: check whether at least one key is the nvda key.
if not self.modifierList.CheckedItems:
log.debugWarning("No NVDA key set")
gui.messageBox(
# Translators: Message to report wrong configuration of the NVDA key
_("At least one key must be used as the NVDA key."),
# Translators: The title of the message box
_("Error"), wx.OK|wx.ICON_ERROR,self)
return False
return super(KeyboardSettingsPanel, self).isValid()
def onSave(self):
layout=self.kbdNames[self.kbdList.GetSelection()]
config.conf['keyboard']['keyboardLayout']=layout
config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]= self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
config.conf["keyboard"]["speakTypedCharacters"]=self.charsCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedWords"]=self.wordsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForCharacters"]=self.speechInterruptForCharsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForEnter"]=self.speechInterruptForEnterCheckBox.IsChecked()
config.conf["keyboard"]["allowSkimReadingInSayAll"]=self.skimReadingInSayAllCheckBox.IsChecked()
config.conf["keyboard"]["beepForLowercaseWithCapslock"]=self.beepLowercaseCheckBox.IsChecked()
config.conf["keyboard"]["speakCommandKeys"]=self.commandKeysCheckBox.IsChecked()
config.conf["keyboard"]["alertForSpellingErrors"]=self.alertForSpellingErrorsCheckBox.IsChecked()
config.conf["keyboard"]["handleInjectedKeys"]=self.handleInjectedKeysCheckBox.IsChecked()
class MouseSettingsPanel(SettingsPanel):
# Translators: This is the label for the mouse settings panel.
title = _("Mouse")
helpId = "MouseSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
shapeChangesText = _("Report mouse &shape changes")
self.shapeCheckBox=sHelper.addItem(wx.CheckBox(self,label=shapeChangesText))
self.bindHelpEvent("MouseSettingsShape", self.shapeCheckBox)
self.shapeCheckBox.SetValue(config.conf["mouse"]["reportMouseShapeChanges"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
mouseTrackingText=_("Enable mouse &tracking")
self.mouseTrackingCheckBox=sHelper.addItem(wx.CheckBox(self,label=mouseTrackingText))
self.bindHelpEvent("MouseSettingsTracking", self.mouseTrackingCheckBox)
self.mouseTrackingCheckBox.SetValue(config.conf["mouse"]["enableMouseTracking"])
# Translators: This is the label for a combobox in the
# mouse settings panel.
textUnitLabelText=_("Text &unit resolution:")
import textInfos
self.textUnits=textInfos.MOUSE_TEXT_RESOLUTION_UNITS
textUnitsChoices = [textInfos.unitLabels[x] for x in self.textUnits]
self.textUnitComboBox=sHelper.addLabeledControl(textUnitLabelText, wx.Choice, choices=textUnitsChoices)
self.bindHelpEvent("MouseSettingsTextUnit", self.textUnitComboBox)
try:
index=self.textUnits.index(config.conf["mouse"]["mouseTextUnit"])
except:
index=0
self.textUnitComboBox.SetSelection(index)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
reportObjectRoleText = _("Report &role when mouse enters object")
self.reportObjectRoleCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportObjectRoleText))
self.bindHelpEvent("MouseSettingsRole", self.reportObjectRoleCheckBox)
self.reportObjectRoleCheckBox.SetValue(config.conf["mouse"]["reportObjectRoleOnMouseEnter"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioText = _("&Play audio coordinates when mouse moves")
self.audioCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioText))
self.bindHelpEvent("MouseSettingsAudio", self.audioCheckBox)
self.audioCheckBox.SetValue(config.conf["mouse"]["audioCoordinatesOnMouseMove"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioDetectBrightnessText = _("&Brightness controls audio coordinates volume")
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.bindHelpEvent("MouseSettingsBrightness", self.audioDetectBrightnessCheckBox)
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
ignoreInjectedMouseInputText = _("Ignore mouse input from other &applications")
self.ignoreInjectedMouseInputCheckBox=sHelper.addItem(wx.CheckBox(self,label=ignoreInjectedMouseInputText))
self.bindHelpEvent("MouseSettingsHandleMouseControl", self.ignoreInjectedMouseInputCheckBox)
self.ignoreInjectedMouseInputCheckBox.SetValue(config.conf["mouse"]["ignoreInjectedMouseInput"])
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked()
config.conf["mouse"]["mouseTextUnit"]=self.textUnits[self.textUnitComboBox.GetSelection()]
config.conf["mouse"]["reportObjectRoleOnMouseEnter"]=self.reportObjectRoleCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinatesOnMouseMove"]=self.audioCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinates_detectBrightness"]=self.audioDetectBrightnessCheckBox.IsChecked()
config.conf["mouse"]["ignoreInjectedMouseInput"]=self.ignoreInjectedMouseInputCheckBox.IsChecked()
class ReviewCursorPanel(SettingsPanel):
# Translators: This is the label for the review cursor settings panel.
title = _("Review Cursor")
helpId = "ReviewCursorSettings"
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followFocusCheckBox = wx.CheckBox(self, label=_("Follow system &focus"))
self.bindHelpEvent("ReviewCursorFollowFocus", self.followFocusCheckBox)
self.followFocusCheckBox.SetValue(config.conf["reviewCursor"]["followFocus"])
settingsSizer.Add(self.followFocusCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followCaretCheckBox = wx.CheckBox(self, label=_("Follow System &Caret"))
self.bindHelpEvent("ReviewCursorFollowCaret", self.followCaretCheckBox)
self.followCaretCheckBox.SetValue(config.conf["reviewCursor"]["followCaret"])
settingsSizer.Add(self.followCaretCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followMouseCheckBox = wx.CheckBox(self, label=_("Follow &mouse cursor"))
self.bindHelpEvent("ReviewCursorFollowMouse", self.followMouseCheckBox)
self.followMouseCheckBox.SetValue(config.conf["reviewCursor"]["followMouse"])
settingsSizer.Add(self.followMouseCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.simpleReviewModeCheckBox = wx.CheckBox(self, label=_("&Simple review mode"))
self.bindHelpEvent("ReviewCursorSimple", self.simpleReviewModeCheckBox)
self.simpleReviewModeCheckBox.SetValue(config.conf["reviewCursor"]["simpleReviewMode"])
settingsSizer.Add(self.simpleReviewModeCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["reviewCursor"]["followFocus"]=self.followFocusCheckBox.IsChecked()
config.conf["reviewCursor"]["followCaret"]=self.followCaretCheckBox.IsChecked()
config.conf["reviewCursor"]["followMouse"]=self.followMouseCheckBox.IsChecked()
config.conf["reviewCursor"]["simpleReviewMode"]=self.simpleReviewModeCheckBox.IsChecked()
class InputCompositionPanel(SettingsPanel):
# Translators: This is the label for the Input Composition settings panel.
title = _("Input Composition")
helpId = "InputCompositionSettings"
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.autoReportAllCandidatesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatically report all available &candidates"))
self.bindHelpEvent("InputCompositionReportAllCandidates", self.autoReportAllCandidatesCheckBox)
self.autoReportAllCandidatesCheckBox.SetValue(config.conf["inputComposition"]["autoReportAllCandidates"])
settingsSizer.Add(self.autoReportAllCandidatesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.announceSelectedCandidateCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Announce &selected candidate"))
self.bindHelpEvent("InputCompositionAnnounceSelectedCandidate", self.announceSelectedCandidateCheckBox)
self.announceSelectedCandidateCheckBox.SetValue(config.conf["inputComposition"]["announceSelectedCandidate"])
settingsSizer.Add(self.announceSelectedCandidateCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.candidateIncludesShortCharacterDescriptionCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Always include short character &description when announcing candidates"))
self.bindHelpEvent(
"InputCompositionCandidateIncludesShortCharacterDescription",
self.candidateIncludesShortCharacterDescriptionCheckBox
)
self.candidateIncludesShortCharacterDescriptionCheckBox.SetValue(config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"])
settingsSizer.Add(self.candidateIncludesShortCharacterDescriptionCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportReadingStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &reading string"))
self.bindHelpEvent(
"InputCompositionReadingStringChanges",
self.reportReadingStringChangesCheckBox
)
self.reportReadingStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportReadingStringChanges"])
settingsSizer.Add(self.reportReadingStringChangesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportCompositionStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &composition string"))
self.bindHelpEvent(
"InputCompositionCompositionStringChanges",
self.reportCompositionStringChangesCheckBox
)
self.reportCompositionStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportCompositionStringChanges"])
settingsSizer.Add(self.reportCompositionStringChangesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["inputComposition"]["autoReportAllCandidates"]=self.autoReportAllCandidatesCheckBox.IsChecked()
config.conf["inputComposition"]["announceSelectedCandidate"]=self.announceSelectedCandidateCheckBox.IsChecked()
config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]=self.candidateIncludesShortCharacterDescriptionCheckBox.IsChecked()
config.conf["inputComposition"]["reportReadingStringChanges"]=self.reportReadingStringChangesCheckBox.IsChecked()
config.conf["inputComposition"]["reportCompositionStringChanges"]=self.reportCompositionStringChangesCheckBox.IsChecked()
class ObjectPresentationPanel(SettingsPanel):
# Translators: This is the label for the object presentation panel.
title = _("Object Presentation")
helpId = "ObjectPresentationSettings"
progressLabels = (
# Translators: An option for progress bar output in the Object Presentation dialog
# which disables reporting of progress bars.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("off", _("off")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by speaking.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("speak", _("Speak")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("beep", _("Beep")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by both speaking and beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("both", _("Speak and beep")),
)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportToolTipsText = _("Report &tooltips")
self.tooltipCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportToolTipsText))
self.bindHelpEvent("ObjectPresentationReportToolTips", self.tooltipCheckBox)
self.tooltipCheckBox.SetValue(config.conf["presentation"]["reportTooltips"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
balloonText = _("Report ¬ifications")
self.balloonCheckBox=sHelper.addItem(wx.CheckBox(self,label=balloonText))
self.bindHelpEvent("ObjectPresentationReportNotifications", self.balloonCheckBox)
self.balloonCheckBox.SetValue(config.conf["presentation"]["reportHelpBalloons"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
shortcutText = _("Report object shortcut &keys")
self.shortcutCheckBox=sHelper.addItem(wx.CheckBox(self,label=shortcutText))
self.bindHelpEvent("ObjectPresentationShortcutKeys", self.shortcutCheckBox)
self.shortcutCheckBox.SetValue(config.conf["presentation"]["reportKeyboardShortcuts"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
positionInfoText = _("Report object &position information")
self.positionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=positionInfoText))
self.bindHelpEvent("ObjectPresentationPositionInfo", self.positionInfoCheckBox)
self.positionInfoCheckBox.SetValue(config.conf["presentation"]["reportObjectPositionInformation"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
guessPositionInfoText = _("&Guess object position information when unavailable")
self.guessPositionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=guessPositionInfoText))
self.bindHelpEvent("ObjectPresentationGuessPositionInfo", self.guessPositionInfoCheckBox)
self.guessPositionInfoCheckBox.SetValue(config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
descriptionText = _("Report object &descriptions")
self.descriptionCheckBox=sHelper.addItem(wx.CheckBox(self,label=descriptionText))
self.bindHelpEvent("ObjectPresentationReportDescriptions", self.descriptionCheckBox)
self.descriptionCheckBox.SetValue(config.conf["presentation"]["reportObjectDescriptions"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
progressLabelText = _("Progress &bar output:")
progressChoices = [name for setting, name in self.progressLabels]
self.progressList=sHelper.addLabeledControl(progressLabelText, wx.Choice, choices=progressChoices)
self.bindHelpEvent("ObjectPresentationProgressBarOutput", self.progressList)
for index, (setting, name) in enumerate(self.progressLabels):
if setting == config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]:
self.progressList.SetSelection(index)
break
else:
log.debugWarning("Could not set progress list to current report progress bar updates setting")
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportBackgroundProgressBarsText = _("Report backg&round progress bars")
self.reportBackgroundProgressBarsCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportBackgroundProgressBarsText))
self.bindHelpEvent(
"ObjectPresentationReportBackgroundProgressBars",
self.reportBackgroundProgressBarsCheckBox
)
self.reportBackgroundProgressBarsCheckBox.SetValue(config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
dynamicContentText = _("Report dynamic &content changes")
self.dynamicContentCheckBox=sHelper.addItem(wx.CheckBox(self,label=dynamicContentText))
self.bindHelpEvent(
"ObjectPresentationReportDynamicContent",
self.dynamicContentCheckBox
)
self.dynamicContentCheckBox.SetValue(config.conf["presentation"]["reportDynamicContentChanges"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
autoSuggestionsLabelText = _("Play a sound when &auto-suggestions appear")
self.autoSuggestionSoundsCheckBox=sHelper.addItem(wx.CheckBox(self,label=autoSuggestionsLabelText))
self.bindHelpEvent(
"ObjectPresentationSuggestionSounds",
self.autoSuggestionSoundsCheckBox
)
self.autoSuggestionSoundsCheckBox.SetValue(config.conf["presentation"]["reportAutoSuggestionsWithSound"])
def onSave(self):
config.conf["presentation"]["reportTooltips"]=self.tooltipCheckBox.IsChecked()
config.conf["presentation"]["reportHelpBalloons"]=self.balloonCheckBox.IsChecked()
config.conf["presentation"]["reportKeyboardShortcuts"]=self.shortcutCheckBox.IsChecked()
config.conf["presentation"]["reportObjectPositionInformation"]=self.positionInfoCheckBox.IsChecked()
config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"]=self.guessPositionInfoCheckBox.IsChecked()
config.conf["presentation"]["reportObjectDescriptions"]=self.descriptionCheckBox.IsChecked()
config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=self.progressLabels[self.progressList.GetSelection()][0]
config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]=self.reportBackgroundProgressBarsCheckBox.IsChecked()
config.conf["presentation"]["reportDynamicContentChanges"]=self.dynamicContentCheckBox.IsChecked()
config.conf["presentation"]["reportAutoSuggestionsWithSound"]=self.autoSuggestionSoundsCheckBox.IsChecked()
class BrowseModePanel(SettingsPanel):
# Translators: This is the label for the browse mode settings panel.
title = _("Browse Mode")
helpId = "BrowseModeSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
maxLengthLabelText = _("&Maximum number of characters on one line")
self.maxLengthEdit = sHelper.addLabeledControl(maxLengthLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.maxLineLength
min=10, max=250,
initial=config.conf["virtualBuffers"]["maxLineLength"])
self.bindHelpEvent("BrowseModeSettingsMaxLength", self.maxLengthEdit)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
pageLinesLabelText = _("&Number of lines per page")
self.pageLinesEdit = sHelper.addLabeledControl(pageLinesLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.linesPerPage
min=5, max=150,
initial=config.conf["virtualBuffers"]["linesPerPage"])
self.bindHelpEvent("BrowseModeSettingsPageLines", self.pageLinesEdit)
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
useScreenLayoutText = _("Use &screen layout (when supported)")
self.useScreenLayoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=useScreenLayoutText))
self.bindHelpEvent("BrowseModeSettingsScreenLayout", self.useScreenLayoutCheckBox)
self.useScreenLayoutCheckBox.SetValue(config.conf["virtualBuffers"]["useScreenLayout"])
# Translators: The label for a checkbox in browse mode settings to
# enable browse mode on page load.
enableOnPageLoadText = _("&Enable browse mode on page load")
self.enableOnPageLoadCheckBox = sHelper.addItem(wx.CheckBox(self, label=enableOnPageLoadText))
self.bindHelpEvent("BrowseModeSettingsEnableOnPageLoad", self.enableOnPageLoadCheckBox)
self.enableOnPageLoadCheckBox.SetValue(config.conf["virtualBuffers"]["enableOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoSayAllText = _("Automatic &Say All on page load")
self.autoSayAllCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoSayAllText))
self.bindHelpEvent("BrowseModeSettingsAutoSayAll", self.autoSayAllCheckBox)
self.autoSayAllCheckBox.SetValue(config.conf["virtualBuffers"]["autoSayAllOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
layoutTablesText = _("Include l&ayout tables")
self.layoutTablesCheckBox = sHelper.addItem(wx.CheckBox(self, label =layoutTablesText))
self.bindHelpEvent("BrowseModeSettingsIncludeLayoutTables", self.layoutTablesCheckBox)
self.layoutTablesCheckBox.SetValue(config.conf["documentFormatting"]["includeLayoutTables"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnFocusChangeText = _("Automatic focus mode for focus changes")
self.autoPassThroughOnFocusChangeCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnFocusChangeText))
self.bindHelpEvent(
"BrowseModeSettingsAutoPassThroughOnFocusChange",
self.autoPassThroughOnFocusChangeCheckBox
)
self.autoPassThroughOnFocusChangeCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnCaretMoveText = _("Automatic focus mode for caret movement")
self.autoPassThroughOnCaretMoveCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnCaretMoveText))
self.bindHelpEvent(
"BrowseModeSettingsAutoPassThroughOnCaretMove",
self.autoPassThroughOnCaretMoveCheckBox
)
self.autoPassThroughOnCaretMoveCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
passThroughAudioIndicationText = _("Audio indication of focus and browse modes")
self.passThroughAudioIndicationCheckBox = sHelper.addItem(wx.CheckBox(self, label=passThroughAudioIndicationText))
self.bindHelpEvent(
"BrowseModeSettingsPassThroughAudioIndication",
self.passThroughAudioIndicationCheckBox
)
self.passThroughAudioIndicationCheckBox.SetValue(config.conf["virtualBuffers"]["passThroughAudioIndication"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
trapNonCommandGesturesText = _("&Trap all non-command gestures from reaching the document")
self.trapNonCommandGesturesCheckBox = sHelper.addItem(wx.CheckBox(self, label=trapNonCommandGesturesText))
self.bindHelpEvent(
"BrowseModeSettingsTrapNonCommandGestures",
self.trapNonCommandGesturesCheckBox
)
self.trapNonCommandGesturesCheckBox.SetValue(config.conf["virtualBuffers"]["trapNonCommandGestures"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoFocusFocusableElementsText = _("Automatically set system &focus to focusable elements")
self.autoFocusFocusableElementsCheckBox = sHelper.addItem(
wx.CheckBox(self, label=autoFocusFocusableElementsText)
)
self.bindHelpEvent(
"BrowseModeSettingsAutoFocusFocusableElements",
self.autoFocusFocusableElementsCheckBox
)
self.autoFocusFocusableElementsCheckBox.SetValue(
config.conf["virtualBuffers"]["autoFocusFocusableElements"]
)
def onSave(self):
config.conf["virtualBuffers"]["maxLineLength"]=self.maxLengthEdit.GetValue()
config.conf["virtualBuffers"]["linesPerPage"]=self.pageLinesEdit.GetValue()
config.conf["virtualBuffers"]["useScreenLayout"]=self.useScreenLayoutCheckBox.IsChecked()
config.conf["virtualBuffers"]["enableOnPageLoad"] = self.enableOnPageLoadCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoSayAllOnPageLoad"]=self.autoSayAllCheckBox.IsChecked()
config.conf["documentFormatting"]["includeLayoutTables"]=self.layoutTablesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]=self.autoPassThroughOnFocusChangeCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]=self.autoPassThroughOnCaretMoveCheckBox.IsChecked()
config.conf["virtualBuffers"]["passThroughAudioIndication"]=self.passThroughAudioIndicationCheckBox.IsChecked()
config.conf["virtualBuffers"]["trapNonCommandGestures"]=self.trapNonCommandGesturesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoFocusFocusableElements"] = (
self.autoFocusFocusableElementsCheckBox.IsChecked()
)
class DocumentFormattingPanel(SettingsPanel):
# Translators: This is the label for the document formatting panel.
title = _("Document Formatting")
helpId = "DocumentFormattingSettings"
# Translators: This is a label appearing on the document formatting settings panel.
panelDescription = _("The following options control the types of document formatting reported by NVDA.")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
sHelper.addItem(wx.StaticText(self, label=self.panelDescription))
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
fontGroupText = _("Font")
fontGroupSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=fontGroupText)
fontGroupBox = fontGroupSizer.GetStaticBox()
fontGroup = guiHelper.BoxSizerHelper(self, sizer=fontGroupSizer)
sHelper.addItem(fontGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontNameText = _("&Font name")
self.fontNameCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=fontNameText))
self.fontNameCheckBox.SetValue(config.conf["documentFormatting"]["reportFontName"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontSizeText = _("Font &size")
self.fontSizeCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=fontSizeText))
self.fontSizeCheckBox.SetValue(config.conf["documentFormatting"]["reportFontSize"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontAttributesText = _("Font attrib&utes")
self.fontAttrsCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=fontAttributesText))
self.fontAttrsCheckBox.SetValue(config.conf["documentFormatting"]["reportFontAttributes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
superscriptsAndSubscriptsText = _("Su&perscripts and subscripts")
self.superscriptsAndSubscriptsCheckBox = fontGroup.addItem(
wx.CheckBox(fontGroupBox, label=superscriptsAndSubscriptsText)
)
self.superscriptsAndSubscriptsCheckBox.SetValue(
config.conf["documentFormatting"]["reportSuperscriptsAndSubscripts"]
)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
emphasisText=_("E&mphasis")
self.emphasisCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=emphasisText))
self.emphasisCheckBox.SetValue(config.conf["documentFormatting"]["reportEmphasis"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
highlightText = _("Highlighted (mar&ked) text")
self.highlightCheckBox = fontGroup.addItem(
wx.CheckBox(fontGroupBox, label=highlightText)
)
self.highlightCheckBox.SetValue(
config.conf["documentFormatting"]["reportHighlight"]
)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
styleText =_("St&yle")
self.styleCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=styleText))
self.styleCheckBox.SetValue(config.conf["documentFormatting"]["reportStyle"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
colorsText = _("&Colors")
self.colorCheckBox = fontGroup.addItem(wx.CheckBox(fontGroupBox, label=colorsText))
self.colorCheckBox.SetValue(config.conf["documentFormatting"]["reportColor"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
documentInfoGroupText = _("Document information")
docInfoSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=documentInfoGroupText)
docInfoBox = docInfoSizer.GetStaticBox()
docInfoGroup = guiHelper.BoxSizerHelper(self, sizer=docInfoSizer)
sHelper.addItem(docInfoGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
commentsText = _("No&tes and comments")
self.commentsCheckBox = docInfoGroup.addItem(wx.CheckBox(docInfoBox, label=commentsText))
self.commentsCheckBox.SetValue(config.conf["documentFormatting"]["reportComments"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
revisionsText = _("&Editor revisions")
self.revisionsCheckBox = docInfoGroup.addItem(wx.CheckBox(docInfoBox, label=revisionsText))
self.revisionsCheckBox.SetValue(config.conf["documentFormatting"]["reportRevisions"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
spellingErrorText = _("Spelling e&rrors")
self.spellingErrorsCheckBox = docInfoGroup.addItem(wx.CheckBox(docInfoBox, label=spellingErrorText))
self.spellingErrorsCheckBox.SetValue(config.conf["documentFormatting"]["reportSpellingErrors"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
pageAndSpaceGroupText = _("Pages and spacing")
pageAndSpaceSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=pageAndSpaceGroupText)
pageAndSpaceBox = pageAndSpaceSizer.GetStaticBox()
pageAndSpaceGroup = guiHelper.BoxSizerHelper(self, sizer=pageAndSpaceSizer)
sHelper.addItem(pageAndSpaceGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
pageText = _("&Pages")
self.pageCheckBox = pageAndSpaceGroup.addItem(wx.CheckBox(pageAndSpaceBox, label=pageText))
self.pageCheckBox.SetValue(config.conf["documentFormatting"]["reportPage"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
lineText = _("Line &numbers")
self.lineNumberCheckBox = pageAndSpaceGroup.addItem(wx.CheckBox(pageAndSpaceBox, label=lineText))
self.lineNumberCheckBox.SetValue(config.conf["documentFormatting"]["reportLineNumber"])
# Translators: This is the label for a combobox controlling the reporting of line indentation in the
# Document Formatting dialog (possible choices are Off, Speech, Tones, or Both.
lineIndentationText = _("Line &indentation reporting:")
indentChoices=[
#Translators: A choice in a combo box in the document formatting dialog to report No line Indentation.
_("Off"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with Speech.
pgettext('line indentation setting', "Speech"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with tones.
_("Tones"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with both Speech and tones.
_("Both Speech and Tones")
]
self.lineIndentationCombo = pageAndSpaceGroup.addLabeledControl(lineIndentationText, wx.Choice, choices=indentChoices)
self.bindHelpEvent(
"DocumentFormattingSettingsLineIndentation",
self.lineIndentationCombo
)
#We use bitwise operations because it saves us a four way if statement.
curChoice = config.conf["documentFormatting"]["reportLineIndentationWithTones"] << 1 | config.conf["documentFormatting"]["reportLineIndentation"]
self.lineIndentationCombo.SetSelection(curChoice)
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report paragraph indentation if available.
paragraphIndentationText = _("&Paragraph indentation")
_paragraphIndentationCheckBox = wx.CheckBox(pageAndSpaceBox, label=paragraphIndentationText)
self.paragraphIndentationCheckBox = pageAndSpaceGroup.addItem(_paragraphIndentationCheckBox)
self.paragraphIndentationCheckBox.SetValue(config.conf["documentFormatting"]["reportParagraphIndentation"])
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report line spacing if available.
lineSpacingText=_("&Line spacing")
_lineSpacingCheckBox = wx.CheckBox(pageAndSpaceBox, label=lineSpacingText)
self.lineSpacingCheckBox = pageAndSpaceGroup.addItem(_lineSpacingCheckBox)
self.lineSpacingCheckBox.SetValue(config.conf["documentFormatting"]["reportLineSpacing"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
alignmentText = _("&Alignment")
self.alignmentCheckBox = pageAndSpaceGroup.addItem(wx.CheckBox(pageAndSpaceBox, label=alignmentText))
self.alignmentCheckBox.SetValue(config.conf["documentFormatting"]["reportAlignment"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
tablesGroupText = _("Table information")
tablesGroupSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=tablesGroupText)
tablesGroupBox = tablesGroupSizer.GetStaticBox()
tablesGroup = guiHelper.BoxSizerHelper(self, sizer=tablesGroupSizer)
sHelper.addItem(tablesGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tablesCheckBox = tablesGroup.addItem(wx.CheckBox(tablesGroupBox, label=_("&Tables")))
self.tablesCheckBox.SetValue(config.conf["documentFormatting"]["reportTables"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
_tableHeadersCheckBox = wx.CheckBox(tablesGroupBox, label=_("Row/column h&eaders"))
self.tableHeadersCheckBox = tablesGroup.addItem(_tableHeadersCheckBox)
self.tableHeadersCheckBox.SetValue(config.conf["documentFormatting"]["reportTableHeaders"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
_tableCellCoordsCheckBox = wx.CheckBox(tablesGroupBox, label=_("Cell c&oordinates"))
self.tableCellCoordsCheckBox = tablesGroup.addItem(_tableCellCoordsCheckBox)
self.tableCellCoordsCheckBox.SetValue(config.conf["documentFormatting"]["reportTableCellCoords"])
borderChoices=[
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Off"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Styles"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Both Colors and Styles"),
]
self.borderComboBox = tablesGroup.addLabeledControl(
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Cell &borders:"),
wx.Choice,
choices=borderChoices
)
curChoice = 0
if config.conf["documentFormatting"]["reportBorderStyle"]:
if config.conf["documentFormatting"]["reportBorderColor"]:
curChoice = 2
else:
curChoice = 1
self.borderComboBox.SetSelection(curChoice)
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
elementsGroupText = _("Elements")
elementsGroupSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=elementsGroupText)
elementsGroupBox = elementsGroupSizer.GetStaticBox()
elementsGroup = guiHelper.BoxSizerHelper(self, sizer=elementsGroupSizer)
sHelper.addItem(elementsGroup, flag=wx.EXPAND, proportion=1)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.headingsCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("&Headings")))
self.headingsCheckBox.SetValue(config.conf["documentFormatting"]["reportHeadings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.linksCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("Lin&ks")))
self.linksCheckBox.SetValue(config.conf["documentFormatting"]["reportLinks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.graphicsCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("&Graphics")))
self.graphicsCheckBox.SetValue(config.conf["documentFormatting"]["reportGraphics"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.listsCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("&Lists")))
self.listsCheckBox.SetValue(config.conf["documentFormatting"]["reportLists"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
_blockQuotesCheckBox = wx.CheckBox(elementsGroupBox, label=_("Block "es"))
self.blockQuotesCheckBox = elementsGroup.addItem(_blockQuotesCheckBox)
self.blockQuotesCheckBox.SetValue(config.conf["documentFormatting"]["reportBlockQuotes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
groupingsText = _("&Groupings")
self.groupingsCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=groupingsText))
self.groupingsCheckBox.SetValue(config.conf["documentFormatting"]["reportGroupings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
landmarksText = _("Lan&dmarks and regions")
self.landmarksCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=landmarksText))
self.landmarksCheckBox.SetValue(config.conf["documentFormatting"]["reportLandmarks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.articlesCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("Arti&cles")))
self.articlesCheckBox.SetValue(config.conf["documentFormatting"]["reportArticles"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.framesCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("Fra&mes")))
self.framesCheckBox.Value=config.conf["documentFormatting"]["reportFrames"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.clickableCheckBox = elementsGroup.addItem(wx.CheckBox(elementsGroupBox, label=_("&Clickable")))
self.clickableCheckBox.Value=config.conf["documentFormatting"]["reportClickable"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
detectFormatAfterCursorText = _("Report formatting chan&ges after the cursor (can cause a lag)")
self.detectFormatAfterCursorCheckBox = wx.CheckBox(self, label=detectFormatAfterCursorText)
self.bindHelpEvent(
"DocumentFormattingDetectFormatAfterCursor",
self.detectFormatAfterCursorCheckBox
)
self.detectFormatAfterCursorCheckBox.SetValue(config.conf["documentFormatting"]["detectFormatAfterCursor"])
sHelper.addItem(self.detectFormatAfterCursorCheckBox)
def onSave(self):
config.conf["documentFormatting"]["detectFormatAfterCursor"]=self.detectFormatAfterCursorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontName"]=self.fontNameCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontSize"]=self.fontSizeCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontAttributes"]=self.fontAttrsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSuperscriptsAndSubscripts"] = (
self.superscriptsAndSubscriptsCheckBox.IsChecked()
)
config.conf["documentFormatting"]["reportColor"]=self.colorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportComments"]=self.commentsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportRevisions"]=self.revisionsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportEmphasis"]=self.emphasisCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHighlight"] = self.highlightCheckBox.IsChecked()
config.conf["documentFormatting"]["reportAlignment"]=self.alignmentCheckBox.IsChecked()
config.conf["documentFormatting"]["reportStyle"]=self.styleCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSpellingErrors"]=self.spellingErrorsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportPage"]=self.pageCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineNumber"]=self.lineNumberCheckBox.IsChecked()
choice = self.lineIndentationCombo.GetSelection()
config.conf["documentFormatting"]["reportLineIndentation"] = choice in (1, 3)
config.conf["documentFormatting"]["reportLineIndentationWithTones"] = choice in (2, 3)
config.conf["documentFormatting"]["reportParagraphIndentation"]=self.paragraphIndentationCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineSpacing"]=self.lineSpacingCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTables"]=self.tablesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableHeaders"]=self.tableHeadersCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableCellCoords"]=self.tableCellCoordsCheckBox.IsChecked()
choice = self.borderComboBox.GetSelection()
config.conf["documentFormatting"]["reportBorderStyle"] = choice in (1,2)
config.conf["documentFormatting"]["reportBorderColor"] = (choice == 2)
config.conf["documentFormatting"]["reportLinks"]=self.linksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportGraphics"] = self.graphicsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHeadings"]=self.headingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLists"]=self.listsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportBlockQuotes"]=self.blockQuotesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportGroupings"] = self.groupingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLandmarks"]=self.landmarksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportArticles"] = self.articlesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFrames"]=self.framesCheckBox.Value
config.conf["documentFormatting"]["reportClickable"]=self.clickableCheckBox.Value
class TouchInteractionPanel(SettingsPanel):
# Translators: This is the label for the touch interaction settings panel.
title = _("Touch Interaction")
helpId = "TouchInteraction"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
touchSupportEnableLabel = _("Enable touch interaction support")
self.enableTouchSupportCheckBox = sHelper.addItem(wx.CheckBox(self, label=touchSupportEnableLabel))
self.bindHelpEvent("TouchSupportEnable", self.enableTouchSupportCheckBox)
self.enableTouchSupportCheckBox.SetValue(config.conf["touch"]["enabled"])
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
self.touchTypingCheckBox = sHelper.addItem(wx.CheckBox(self, label=_("&Touch typing mode")))
self.bindHelpEvent("TouchTypingMode", self.touchTypingCheckBox)
self.touchTypingCheckBox.SetValue(config.conf["touch"]["touchTyping"])
def onSave(self):
config.conf["touch"]["enabled"] = self.enableTouchSupportCheckBox.IsChecked()
config.conf["touch"]["touchTyping"] = self.touchTypingCheckBox.IsChecked()
touchHandler.setTouchSupport(config.conf["touch"]["enabled"])
class UwpOcrPanel(SettingsPanel):
# Translators: The title of the Windows OCR panel.
title = _("Windows OCR")
helpId = "Win10OcrSettings"
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Lazily import this.
from contentRecog import uwpOcr
self.languageCodes = uwpOcr.getLanguages()
languageChoices = [
languageHandler.getLanguageDescription(languageHandler.normalizeLanguage(lang))
for lang in self.languageCodes]
# Translators: Label for an option in the Windows OCR dialog.
languageLabel = _("Recognition &language:")
self.languageChoice = sHelper.addLabeledControl(languageLabel, wx.Choice, choices=languageChoices)
self.bindHelpEvent("Win10OcrSettingsRecognitionLanguage", self.languageChoice)
try:
langIndex = self.languageCodes.index(config.conf["uwpOcr"]["language"])
self.languageChoice.Selection = langIndex
except ValueError:
self.languageChoice.Selection = 0
def onSave(self):
lang = self.languageCodes[self.languageChoice.Selection]
config.conf["uwpOcr"]["language"] = lang
class AdvancedPanelControls(
gui.contextHelp.ContextHelpMixin,
wx.Panel, # wxPython does not seem to call base class initializer, put last in MRO
):
"""Holds the actual controls for the Advanced Settings panel, this allows the state of the controls to
be more easily managed.
"""
helpId = "AdvancedSettings"
def __init__(self, parent):
super().__init__(parent)
self._defaultsRestored = False
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
self.SetSizer(sHelper.sizer)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
groupText = _("NVDA Development")
devGroupSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=groupText)
devGroupBox = devGroupSizer.GetStaticBox()
devGroup = guiHelper.BoxSizerHelper(self, sizer=devGroupSizer)
sHelper.addItem(devGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Enable loading custom code from Developer Scratchpad directory")
self.scratchpadCheckBox = devGroup.addItem(wx.CheckBox(devGroupBox, label=label))
self.bindHelpEvent("AdvancedSettingsEnableScratchpad", self.scratchpadCheckBox)
self.scratchpadCheckBox.SetValue(config.conf["development"]["enableScratchpadDir"])
self.scratchpadCheckBox.defaultValue = self._getDefaultValue(["development", "enableScratchpadDir"])
self.scratchpadCheckBox.Bind(
wx.EVT_CHECKBOX,
lambda evt: self.openScratchpadButton.Enable(evt.IsChecked())
)
if config.isAppX:
self.scratchpadCheckBox.Disable()
# Translators: the label for a button in the Advanced settings category
label=_("Open developer scratchpad directory")
self.openScratchpadButton = devGroup.addItem(wx.Button(devGroupBox, label=label))
self.bindHelpEvent("AdvancedSettingsOpenScratchpadDir", self.openScratchpadButton)
self.openScratchpadButton.Enable(config.conf["development"]["enableScratchpadDir"])
self.openScratchpadButton.Bind(wx.EVT_BUTTON,self.onOpenScratchpadDir)
if config.isAppX:
self.openScratchpadButton.Disable()
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Microsoft UI Automation")
UIASizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
UIABox = UIASizer.GetStaticBox()
UIAGroup = guiHelper.BoxSizerHelper(self, sizer=UIASizer)
sHelper.addItem(UIAGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Enable &selective registration for UI Automation events and property changes")
self.selectiveUIAEventRegistrationCheckBox = UIAGroup.addItem(wx.CheckBox(UIABox, label=label))
self.bindHelpEvent(
"AdvancedSettingsSelectiveUIAEventRegistration",
self.selectiveUIAEventRegistrationCheckBox
)
self.selectiveUIAEventRegistrationCheckBox.SetValue(config.conf["UIA"]["selectiveEventRegistration"])
self.selectiveUIAEventRegistrationCheckBox.defaultValue = (
self._getDefaultValue(["UIA", "selectiveEventRegistration"])
)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Always use UI Automation to access Microsoft &Word document controls when available")
self.UIAInMSWordCheckBox = UIAGroup.addItem(wx.CheckBox(UIABox, label=label))
self.bindHelpEvent("AdvancedSettingsUseUiaForWord", self.UIAInMSWordCheckBox)
self.UIAInMSWordCheckBox.SetValue(config.conf["UIA"]["useInMSWordWhenAvailable"])
self.UIAInMSWordCheckBox.defaultValue = self._getDefaultValue(["UIA", "useInMSWordWhenAvailable"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use UI Automation to access Microsoft &Excel spreadsheet controls when available")
self.UIAInMSExcelCheckBox = UIAGroup.addItem(wx.CheckBox(UIABox, label=label))
self.bindHelpEvent("UseUiaForExcel", self.UIAInMSExcelCheckBox)
self.UIAInMSExcelCheckBox.SetValue(config.conf["UIA"]["useInMSExcelWhenAvailable"])
self.UIAInMSExcelCheckBox.defaultValue = self._getDefaultValue(["UIA", "useInMSExcelWhenAvailable"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use UI Automation to access the Windows C&onsole when available")
consoleUIADevMap = True if config.conf['UIA']['winConsoleImplementation'] == 'UIA' else False
self.ConsoleUIACheckBox = UIAGroup.addItem(wx.CheckBox(UIABox, label=label))
self.bindHelpEvent("AdvancedSettingsConsoleUIA", self.ConsoleUIACheckBox)
self.ConsoleUIACheckBox.SetValue(consoleUIADevMap)
self.ConsoleUIACheckBox.defaultValue = self._getDefaultValue(["UIA", "winConsoleImplementation"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Speak &passwords in UIA consoles (may improve performance)")
self.winConsoleSpeakPasswordsCheckBox = UIAGroup.addItem(wx.CheckBox(UIABox, label=label))
self.bindHelpEvent("AdvancedSettingsWinConsoleSpeakPasswords", self.winConsoleSpeakPasswordsCheckBox)
self.winConsoleSpeakPasswordsCheckBox.SetValue(config.conf["terminals"]["speakPasswords"])
self.winConsoleSpeakPasswordsCheckBox.defaultValue = self._getDefaultValue(["terminals", "speakPasswords"])
label = pgettext(
"advanced.uiaWithChromium",
# Translators: Label for the Use UIA with Chromium combobox, in the Advanced settings panel.
# Note the '\n' is used to split this long label approximately in half.
"Use UIA with Microsoft Edge and other \n&Chromium based browsers when available:"
)
chromiumChoices = (
# Translators: Label for the default value of the Use UIA with Chromium combobox,
# in the Advanced settings panel.
pgettext("advanced.uiaWithChromium", "Default (Only when necessary)"),
# Translators: Label for a value in the Use UIA with Chromium combobox, in the Advanced settings panel.
pgettext("advanced.uiaWithChromium", "Only when necessary"),
# Translators: Label for a value in the Use UIA with Chromium combobox, in the Advanced settings panel.
pgettext("advanced.uiaWithChromium", "Yes"),
# Translators: Label for a value in the Use UIA with Chromium combobox, in the Advanced settings panel.
pgettext("advanced.uiaWithChromium", "No"),
)
self.UIAInChromiumCombo = UIAGroup.addLabeledControl(label, wx.Choice, choices=chromiumChoices)
self.bindHelpEvent("ChromiumUIA", self.UIAInChromiumCombo)
self.UIAInChromiumCombo.SetSelection(config.conf["UIA"]["allowInChromium"])
self.UIAInChromiumCombo.defaultValue = self._getDefaultValue(["UIA", "allowInChromium"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Annotations")
AnnotationsSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
AnnotationsBox = AnnotationsSizer.GetStaticBox()
AnnotationsGroup = guiHelper.BoxSizerHelper(self, sizer=AnnotationsSizer)
self.bindHelpEvent("Annotations", AnnotationsBox)
sHelper.addItem(AnnotationsGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Report details in browse mode")
self.annotationsDetailsCheckBox = AnnotationsGroup.addItem(wx.CheckBox(AnnotationsBox, label=label))
self.annotationsDetailsCheckBox.SetValue(config.conf["annotations"]["reportDetails"])
self.annotationsDetailsCheckBox.defaultValue = self._getDefaultValue(["annotations", "reportDetails"])
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Report aria-description always")
self.ariaDescCheckBox: wx.CheckBox = AnnotationsGroup.addItem(
wx.CheckBox(AnnotationsBox, label=label)
)
self.ariaDescCheckBox.SetValue(config.conf["annotations"]["reportAriaDescription"])
self.ariaDescCheckBox.defaultValue = self._getDefaultValue(["annotations", "reportAriaDescription"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Terminal programs")
terminalsSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
terminalsBox = terminalsSizer.GetStaticBox()
terminalsGroup = guiHelper.BoxSizerHelper(self, sizer=terminalsSizer)
sHelper.addItem(terminalsGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use the new t&yped character support in Windows Console when available")
self.keyboardSupportInLegacyCheckBox = terminalsGroup.addItem(wx.CheckBox(terminalsBox, label=label))
self.bindHelpEvent("AdvancedSettingsKeyboardSupportInLegacy", self.keyboardSupportInLegacyCheckBox)
self.keyboardSupportInLegacyCheckBox.SetValue(config.conf["terminals"]["keyboardSupportInLegacy"])
self.keyboardSupportInLegacyCheckBox.defaultValue = self._getDefaultValue(["terminals", "keyboardSupportInLegacy"])
self.keyboardSupportInLegacyCheckBox.Enable(winVersion.getWinVer() >= winVersion.WIN10_1607)
# Translators: This is the label for a combo box for selecting a
# method of detecting changed content in terminals in the advanced
# settings panel.
# Choices are automatic, allow Diff Match Patch, and force Difflib.
diffAlgoComboText = _("&Diff algorithm:")
diffAlgoChoices = [
# Translators: A choice in a combo box in the advanced settings
# panel to have NVDA determine the method of detecting changed
# content in terminals automatically.
_("Automatic (Diff Match Patch)"),
# Translators: A choice in a combo box in the advanced settings
# panel to have NVDA detect changes in terminals
# by character when supported, using the diff match patch algorithm.
_("allow Diff Match Patch"),
# Translators: A choice in a combo box in the advanced settings
# panel to have NVDA detect changes in terminals
# by line, using the difflib algorithm.
_("force Difflib")
]
#: The possible diffAlgo config values, in the order they appear
#: in the combo box.
self.diffAlgoVals = (
"auto",
"dmp",
"difflib"
)
self.diffAlgoCombo = terminalsGroup.addLabeledControl(diffAlgoComboText, wx.Choice, choices=diffAlgoChoices)
self.bindHelpEvent("DiffAlgo", self.diffAlgoCombo)
curChoice = self.diffAlgoVals.index(
config.conf['terminals']['diffAlgo']
)
self.diffAlgoCombo.SetSelection(curChoice)
self.diffAlgoCombo.defaultValue = self.diffAlgoVals.index(
self._getDefaultValue(["terminals", "diffAlgo"])
)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Speech")
speechSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
speechGroup = guiHelper.BoxSizerHelper(speechSizer, sizer=speechSizer)
sHelper.addItem(speechGroup)
expiredFocusSpeechChoices = [
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("Default (Yes)"),
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("Yes"),
# Translators: Label for the 'Cancel speech for expired &focus events' combobox
# in the Advanced settings panel.
_("No"),
]
# Translators: This is the label for combobox in the Advanced settings panel.
cancelExpiredFocusSpeechText = _("Attempt to cancel speech for expired focus events:")
self.cancelExpiredFocusSpeechCombo: wx.Choice = speechGroup.addLabeledControl(
cancelExpiredFocusSpeechText,
wx.Choice,
choices=expiredFocusSpeechChoices
)
self.bindHelpEvent("CancelExpiredFocusSpeech", self.cancelExpiredFocusSpeechCombo)
self.cancelExpiredFocusSpeechCombo.SetSelection(
config.conf["featureFlag"]["cancelExpiredFocusSpeech"]
)
self.cancelExpiredFocusSpeechCombo.defaultValue = self._getDefaultValue(
["featureFlag", "cancelExpiredFocusSpeech"]
)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Editable Text")
editableSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
editableTextGroup = guiHelper.BoxSizerHelper(editableSizer, sizer=editableSizer)
sHelper.addItem(editableTextGroup)
# Translators: This is the label for a numeric control in the
# Advanced settings panel.
label = _("Caret movement timeout (in ms)")
self.caretMoveTimeoutSpinControl=editableTextGroup.addLabeledControl(
label,
nvdaControls.SelectOnFocusSpinCtrl,
min=0,
max=2000,
initial=config.conf["editableText"]["caretMoveTimeoutMs"]
)
self.bindHelpEvent("AdvancedSettingsCaretMoveTimeout", self.caretMoveTimeoutSpinControl)
self.caretMoveTimeoutSpinControl.defaultValue = self._getDefaultValue(["editableText", "caretMoveTimeoutMs"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Document Formatting")
docFormatting = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
docFormattingBox = docFormatting.GetStaticBox()
docFormattingGroup = guiHelper.BoxSizerHelper(self, sizer=docFormatting)
sHelper.addItem(docFormattingGroup)
# Translators: This is the label for a checkbox control in the
# Advanced settings panel.
label = _("Report transparent color values")
self.reportTransparentColorCheckBox: wx.CheckBox = docFormattingGroup.addItem(
wx.CheckBox(docFormattingBox, label=label)
)
self.bindHelpEvent("ReportTransparentColors", self.reportTransparentColorCheckBox)
self.reportTransparentColorCheckBox.SetValue(
config.conf["documentFormatting"]["reportTransparentColor"]
)
self.reportTransparentColorCheckBox.defaultValue = self._getDefaultValue(
["documentFormatting", "reportTransparentColor"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Debug logging")
debugLogSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=label)
debugLogGroup = guiHelper.BoxSizerHelper(self, sizer=debugLogSizer)
sHelper.addItem(debugLogGroup)
self.logCategories=[
"hwIo",
"MSAA",
"UIA",
"audioDucking",
"gui",
"louis",
"timeSinceInput",
"vision",
"speech",
"speechManager",
"synthDriver",
"nvwave",
]
# Translators: This is the label for a list in the
# Advanced settings panel
logCategoriesLabel=_("Enabled logging categories")
self.logCategoriesList=debugLogGroup.addLabeledControl(
logCategoriesLabel,
nvdaControls.CustomCheckListBox,
choices=self.logCategories
)
self.bindHelpEvent("AdvancedSettingsDebugLoggingCategories", self.logCategoriesList)
self.logCategoriesList.CheckedItems = [
index for index, x in enumerate(self.logCategories) if config.conf['debugLog'][x]
]
self.logCategoriesList.Select(0)
self.logCategoriesList.defaultCheckedItems = [
index for index, x in enumerate(self.logCategories) if bool(
self._getDefaultValue(['debugLog', x])
)
]
# Translators: Label for the Play a sound for logged errors combobox, in the Advanced settings panel.
label = _("Play a sound for logged e&rrors:")
playErrorSoundChoices = (
# Translators: Label for a value in the Play a sound for logged errors combobox, in the Advanced settings.
pgettext("advanced.playErrorSound", "Only in NVDA test versions"),
# Translators: Label for a value in the Play a sound for logged errors combobox, in the Advanced settings.
pgettext("advanced.playErrorSound", "Yes"),
)
self.playErrorSoundCombo = debugLogGroup.addLabeledControl(label, wx.Choice, choices=playErrorSoundChoices)
self.bindHelpEvent("PlayErrorSound", self.playErrorSoundCombo)
self.playErrorSoundCombo.SetSelection(config.conf["featureFlag"]["playErrorSound"])
self.playErrorSoundCombo.defaultValue = self._getDefaultValue(["featureFlag", "playErrorSound"])
self.Layout()
def onOpenScratchpadDir(self,evt):
path=config.getScratchpadDir(ensureExists=True)
os.startfile(path)
def _getDefaultValue(self, configPath):
return config.conf.getConfigValidation(configPath).default
def haveConfigDefaultsBeenRestored(self):
return (
self._defaultsRestored
and self.scratchpadCheckBox.IsChecked() == self.scratchpadCheckBox.defaultValue
and (
self.selectiveUIAEventRegistrationCheckBox.IsChecked()
== self.selectiveUIAEventRegistrationCheckBox.defaultValue
)
and self.UIAInMSWordCheckBox.IsChecked() == self.UIAInMSWordCheckBox.defaultValue
and self.UIAInMSExcelCheckBox.IsChecked() == self.UIAInMSExcelCheckBox.defaultValue
and self.ConsoleUIACheckBox.IsChecked() == (self.ConsoleUIACheckBox.defaultValue == 'UIA')
and self.winConsoleSpeakPasswordsCheckBox.IsChecked() == self.winConsoleSpeakPasswordsCheckBox.defaultValue
and self.cancelExpiredFocusSpeechCombo.GetSelection() == self.cancelExpiredFocusSpeechCombo.defaultValue
and self.UIAInChromiumCombo.GetSelection() == self.UIAInChromiumCombo.defaultValue
and self.keyboardSupportInLegacyCheckBox.IsChecked() == self.keyboardSupportInLegacyCheckBox.defaultValue
and self.diffAlgoCombo.GetSelection() == self.diffAlgoCombo.defaultValue
and self.caretMoveTimeoutSpinControl.GetValue() == self.caretMoveTimeoutSpinControl.defaultValue
and self.reportTransparentColorCheckBox.GetValue() == self.reportTransparentColorCheckBox.defaultValue
and set(self.logCategoriesList.CheckedItems) == set(self.logCategoriesList.defaultCheckedItems)
and self.annotationsDetailsCheckBox.IsChecked() == self.annotationsDetailsCheckBox.defaultValue
and self.ariaDescCheckBox.IsChecked() == self.ariaDescCheckBox.defaultValue
and True # reduce noise in diff when the list is extended.
)
def restoreToDefaults(self):
self.scratchpadCheckBox.SetValue(self.scratchpadCheckBox.defaultValue)
self.selectiveUIAEventRegistrationCheckBox.SetValue(self.selectiveUIAEventRegistrationCheckBox.defaultValue)
self.UIAInMSWordCheckBox.SetValue(self.UIAInMSWordCheckBox.defaultValue)
self.UIAInMSExcelCheckBox.SetValue(self.UIAInMSExcelCheckBox.defaultValue)
self.ConsoleUIACheckBox.SetValue(self.ConsoleUIACheckBox.defaultValue == 'UIA')
self.UIAInChromiumCombo.SetSelection(self.UIAInChromiumCombo.defaultValue)
self.winConsoleSpeakPasswordsCheckBox.SetValue(self.winConsoleSpeakPasswordsCheckBox.defaultValue)
self.cancelExpiredFocusSpeechCombo.SetSelection(self.cancelExpiredFocusSpeechCombo.defaultValue)
self.keyboardSupportInLegacyCheckBox.SetValue(self.keyboardSupportInLegacyCheckBox.defaultValue)
self.diffAlgoCombo.SetSelection(self.diffAlgoCombo.defaultValue == 'auto')
self.caretMoveTimeoutSpinControl.SetValue(self.caretMoveTimeoutSpinControl.defaultValue)
self.annotationsDetailsCheckBox.SetValue(self.annotationsDetailsCheckBox.defaultValue)
self.ariaDescCheckBox.SetValue(self.ariaDescCheckBox.defaultValue)
self.reportTransparentColorCheckBox.SetValue(self.reportTransparentColorCheckBox.defaultValue)
self.logCategoriesList.CheckedItems = self.logCategoriesList.defaultCheckedItems
self._defaultsRestored = True
def onSave(self):
log.debug("Saving advanced config")
config.conf["development"]["enableScratchpadDir"]=self.scratchpadCheckBox.IsChecked()
config.conf["UIA"]["selectiveEventRegistration"] = self.selectiveUIAEventRegistrationCheckBox.IsChecked()
config.conf["UIA"]["useInMSWordWhenAvailable"]=self.UIAInMSWordCheckBox.IsChecked()
config.conf["UIA"]["useInMSExcelWhenAvailable"] = self.UIAInMSExcelCheckBox.IsChecked()
if self.ConsoleUIACheckBox.IsChecked():
config.conf['UIA']['winConsoleImplementation'] = "UIA"
else:
config.conf['UIA']['winConsoleImplementation'] = "auto"
config.conf["terminals"]["speakPasswords"] = self.winConsoleSpeakPasswordsCheckBox.IsChecked()
config.conf["featureFlag"]["cancelExpiredFocusSpeech"] = self.cancelExpiredFocusSpeechCombo.GetSelection()
config.conf["UIA"]["allowInChromium"] = self.UIAInChromiumCombo.GetSelection()
config.conf["terminals"]["keyboardSupportInLegacy"]=self.keyboardSupportInLegacyCheckBox.IsChecked()
diffAlgoChoice = self.diffAlgoCombo.GetSelection()
config.conf['terminals']['diffAlgo'] = (
self.diffAlgoVals[diffAlgoChoice]
)
config.conf["editableText"]["caretMoveTimeoutMs"]=self.caretMoveTimeoutSpinControl.GetValue()
config.conf["documentFormatting"]["reportTransparentColor"] = (
self.reportTransparentColorCheckBox.IsChecked()
)
config.conf["annotations"]["reportDetails"] = self.annotationsDetailsCheckBox.IsChecked()
config.conf["annotations"]["reportAriaDescription"] = self.ariaDescCheckBox.IsChecked()
for index,key in enumerate(self.logCategories):
config.conf['debugLog'][key]=self.logCategoriesList.IsChecked(index)
config.conf["featureFlag"]["playErrorSound"] = self.playErrorSoundCombo.GetSelection()
class AdvancedPanel(SettingsPanel):
enableControlsCheckBox = None # type: wx.CheckBox
# Translators: This is the label for the Advanced settings panel.
title = _("Advanced")
helpId = "AdvancedSettings"
# Translators: This is the label to warn users about the Advanced options in the
# Advanced settings panel
warningHeader = _("Warning!")
warningExplanation = _(
# Translators: This is a label appearing on the Advanced settings panel.
"The following settings are for advanced users. "
"Changing them may cause NVDA to function incorrectly. "
"Please only change these if you know what you are doing or "
"have been specifically instructed by NVDA developers."
)
panelDescription = u"{}\n{}".format(warningHeader, warningExplanation)
def makeSettings(self, settingsSizer):
"""
:type settingsSizer: wx.BoxSizer
"""
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
warningSizer = wx.StaticBoxSizer(wx.VERTICAL, self)
warningGroup = guiHelper.BoxSizerHelper(self, sizer=warningSizer)
warningBox = warningGroup.sizer.GetStaticBox() # type: wx.StaticBox
sHelper.addItem(warningGroup)
warningText = wx.StaticText(warningBox, label=self.warningHeader)
warningText.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD))
warningGroup.addItem(warningText)
self.windowText = warningGroup.addItem(wx.StaticText(warningBox, label=self.warningExplanation))
self.windowText.Wrap(self.scaleSize(544))
enableAdvancedControlslabel = _(
# Translators: This is the label for a checkbox in the Advanced settings panel.
"I understand that changing these settings may cause NVDA to function incorrectly."
)
self.enableControlsCheckBox = warningGroup.addItem(
wx.CheckBox(parent=warningBox, label=enableAdvancedControlslabel, id=wx.NewIdRef())
)
boldedFont = self.enableControlsCheckBox.GetFont().Bold()
self.enableControlsCheckBox.SetFont(boldedFont)
self.bindHelpEvent("AdvancedSettingsMakingChanges", self.enableControlsCheckBox)
restoreDefaultsButton = warningGroup.addItem(
# Translators: This is the label for a button in the Advanced settings panel
wx.Button(warningBox, label=_("Restore defaults"))
)
self.bindHelpEvent("AdvancedSettingsRestoringDefaults", restoreDefaultsButton)
restoreDefaultsButton.Bind(wx.EVT_BUTTON, lambda evt: self.advancedControls.restoreToDefaults())
self.advancedControls = AdvancedPanelControls(self)
sHelper.sizer.Add(self.advancedControls, flag=wx.EXPAND)
self.enableControlsCheckBox.Bind(
wx.EVT_CHECKBOX,
self.onEnableControlsCheckBox
)
self.advancedControls.Enable(self.enableControlsCheckBox.IsChecked())
def onSave(self):
if (
self.enableControlsCheckBox.IsChecked() or
self.advancedControls.haveConfigDefaultsBeenRestored()
):
self.advancedControls.onSave()
def onEnableControlsCheckBox(self, evt):
# due to some not very well understood mis ordering of event processing, we force NVDA to
# process pending events. This fixes an issue where the checkbox state was being reported
# incorrectly. This checkbox is slightly different from most, in that its behaviour is to
# enable more controls than is typical. This might be causing enough of a delay, that there
# is a mismatch in the state of the checkbox and when the events are processed by NVDA.
from api import processPendingEvents
processPendingEvents()
self.advancedControls.Enable(evt.IsChecked())
class DictionaryEntryDialog(
gui.contextHelp.ContextHelpMixin,
wx.Dialog, # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "SpeechDictionaries"
TYPE_LABELS = {
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_ANYWHERE: _("&Anywhere"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_WORD: _("Whole &word"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_REGEXP: _("Regular &expression")
}
TYPE_LABELS_ORDERING = (speechDictHandler.ENTRY_TYPE_ANYWHERE, speechDictHandler.ENTRY_TYPE_WORD, speechDictHandler.ENTRY_TYPE_REGEXP)
# Translators: This is the label for the edit dictionary entry dialog.
def __init__(self, parent, title=_("Edit Dictionary Entry")):
super(DictionaryEntryDialog,self).__init__(parent,title=title)
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is a label for an edit field in add dictionary entry dialog.
patternLabelText = _("&Pattern")
self.patternTextCtrl=sHelper.addLabeledControl(patternLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog and in punctuation/symbol pronunciation dialog.
replacementLabelText = _("&Replacement")
self.replacementTextCtrl=sHelper.addLabeledControl(replacementLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog.
commentLabelText = _("&Comment")
self.commentTextCtrl=sHelper.addLabeledControl(commentLabelText, wx.TextCtrl)
# Translators: This is a label for a checkbox in add dictionary entry dialog.
caseSensitiveText = _("Case &sensitive")
self.caseSensitiveCheckBox=sHelper.addItem(wx.CheckBox(self,label=caseSensitiveText))
# Translators: This is a label for a set of radio buttons in add dictionary entry dialog.
typeText = _("&Type")
typeChoices = [DictionaryEntryDialog.TYPE_LABELS[i] for i in DictionaryEntryDialog.TYPE_LABELS_ORDERING]
self.typeRadioBox=sHelper.addItem(wx.RadioBox(self,label=typeText, choices=typeChoices))
sHelper.addDialogDismissButtons(wx.OK | wx.CANCEL, separated=True)
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.setType(speechDictHandler.ENTRY_TYPE_ANYWHERE)
self.patternTextCtrl.SetFocus()
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
def getType(self):
typeRadioValue = self.typeRadioBox.GetSelection()
if typeRadioValue == wx.NOT_FOUND:
return speechDictHandler.ENTRY_TYPE_ANYWHERE
return DictionaryEntryDialog.TYPE_LABELS_ORDERING[typeRadioValue]
def onOk(self,evt):
if not self.patternTextCtrl.GetValue():
# Translators: This is an error message to let the user know that the pattern field in the dictionary entry is not valid.
gui.messageBox(_("A pattern is required."), _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
self.patternTextCtrl.SetFocus()
return
try:
dictEntry = self.dictEntry = speechDictHandler.SpeechDictEntry(
self.patternTextCtrl.GetValue(),
self.replacementTextCtrl.GetValue(),
self.commentTextCtrl.GetValue(),
bool(self.caseSensitiveCheckBox.GetValue()),
self.getType()
)
dictEntry.sub("test") # Ensure there are no grouping error (#11407)
except Exception as e:
log.debugWarning("Could not add dictionary entry due to (regex error) : %s" % e)
# Translators: This is an error message to let the user know that the dictionary entry is not valid.
gui.messageBox(_("Regular Expression error: \"%s\".")%e, _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
return
evt.Skip()
def setType(self, type):
self.typeRadioBox.SetSelection(DictionaryEntryDialog.TYPE_LABELS_ORDERING.index(type))
class DictionaryDialog(SettingsDialog):
TYPE_LABELS = {t: l.replace("&", "") for t, l in DictionaryEntryDialog.TYPE_LABELS.items()}
helpId = "SpeechDictionaries"
def __init__(self,parent,title,speechDict):
self.title = title
self.speechDict = speechDict
self.tempSpeechDict=speechDictHandler.SpeechDict()
self.tempSpeechDict.extend(self.speechDict)
globalVars.speechDictionaryProcessing=False
super().__init__(parent, resizeable=True)
# Historical initial size, result of L{self.dictList} being (550,350) as of #6287.
# Setting an initial size on L{self.dictList} by passing a L{size} argument when
# creating the control would also set its minimum size and thus block the dialog from being shrunk.
self.SetSize(576, 502)
self.CentreOnScreen()
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list box of dictionary entries in speech dictionary dialog.
entriesLabelText=_("&Dictionary entries")
self.dictList = sHelper.addLabeledControl(
entriesLabelText,
wx.ListCtrl, style=wx.LC_REPORT | wx.LC_SINGLE_SEL
)
# Translators: The label for a column in dictionary entries list used to identify comments for the entry.
self.dictList.InsertColumn(0,_("Comment"),width=150)
# Translators: The label for a column in dictionary entries list used to identify pattern (original word or a pattern).
self.dictList.InsertColumn(1,_("Pattern"),width=150)
# Translators: The label for a column in dictionary entries list and in a list of symbols from symbol pronunciation dialog used to identify replacement for a pattern or a symbol
self.dictList.InsertColumn(2,_("Replacement"),width=150)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is case sensitive or not.
self.dictList.InsertColumn(3,_("case"),width=50)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is a regular expression, matches whole words, or matches anywhere.
self.dictList.InsertColumn(4,_("Type"),width=50)
self.offOn = (_("off"),_("on"))
for entry in self.tempSpeechDict:
self.dictList.Append((entry.comment,entry.pattern,entry.replacement,self.offOn[int(entry.caseSensitive)],DictionaryDialog.TYPE_LABELS[entry.type]))
self.editingIndex=-1
bHelper = guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to add new entries.
label=_("&Add")
).Bind(wx.EVT_BUTTON, self.OnAddClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to edit existing entries.
label=_("&Edit")
).Bind(wx.EVT_BUTTON, self.OnEditClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to remove existing entries.
label=_("&Remove")
).Bind(wx.EVT_BUTTON, self.OnRemoveClick)
sHelper.addItem(bHelper)
def postInit(self):
self.dictList.SetFocus()
def onCancel(self,evt):
globalVars.speechDictionaryProcessing=True
super(DictionaryDialog, self).onCancel(evt)
def onOk(self,evt):
globalVars.speechDictionaryProcessing=True
if self.tempSpeechDict!=self.speechDict:
del self.speechDict[:]
self.speechDict.extend(self.tempSpeechDict)
self.speechDict.save()
super(DictionaryDialog, self).onOk(evt)
def OnAddClick(self,evt):
# Translators: This is the label for the add dictionary entry dialog.
entryDialog=DictionaryEntryDialog(self,title=_("Add Dictionary Entry"))
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict.append(entryDialog.dictEntry)
self.dictList.Append((entryDialog.commentTextCtrl.GetValue(),entryDialog.patternTextCtrl.GetValue(),entryDialog.replacementTextCtrl.GetValue(),self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())],DictionaryDialog.TYPE_LABELS[entryDialog.getType()]))
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.Select(index,on=0)
index=self.dictList.GetNextSelected(index)
addedIndex=self.dictList.GetItemCount()-1
self.dictList.Select(addedIndex)
self.dictList.Focus(addedIndex)
self.dictList.SetFocus()
entryDialog.Destroy()
def OnEditClick(self,evt):
if self.dictList.GetSelectedItemCount()!=1:
return
editIndex=self.dictList.GetFirstSelected()
if editIndex<0:
return
entryDialog=DictionaryEntryDialog(self)
entryDialog.patternTextCtrl.SetValue(self.tempSpeechDict[editIndex].pattern)
entryDialog.replacementTextCtrl.SetValue(self.tempSpeechDict[editIndex].replacement)
entryDialog.commentTextCtrl.SetValue(self.tempSpeechDict[editIndex].comment)
entryDialog.caseSensitiveCheckBox.SetValue(self.tempSpeechDict[editIndex].caseSensitive)
entryDialog.setType(self.tempSpeechDict[editIndex].type)
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict[editIndex]=entryDialog.dictEntry
self.dictList.SetItem(editIndex,0,entryDialog.commentTextCtrl.GetValue())
self.dictList.SetItem(editIndex,1,entryDialog.patternTextCtrl.GetValue())
self.dictList.SetItem(editIndex,2,entryDialog.replacementTextCtrl.GetValue())
self.dictList.SetItem(editIndex,3,self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())])
self.dictList.SetItem(editIndex,4,DictionaryDialog.TYPE_LABELS[entryDialog.getType()])
self.dictList.SetFocus()
entryDialog.Destroy()
def OnRemoveClick(self,evt):
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.DeleteItem(index)
del self.tempSpeechDict[index]
index=self.dictList.GetNextSelected(index)
self.dictList.SetFocus()
class BrailleSettingsPanel(SettingsPanel):
# Translators: This is the label for the braille panel
title = _("Braille")
helpId = "BrailleSettings"
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the braille display on the braille panel.
displayLabel = _("Braille &display")
displaySizer = wx.StaticBoxSizer(wx.HORIZONTAL, self, label=displayLabel)
displayBox = displaySizer.GetStaticBox()
displayGroup = guiHelper.BoxSizerHelper(self, sizer=displaySizer)
settingsSizerHelper.addItem(displayGroup)
self.displayNameCtrl = ExpandoTextCtrl(
displayBox,
size=(self.scaleSize(250), -1),
style=wx.TE_READONLY
)
self.bindHelpEvent("BrailleSettingsChange", self.displayNameCtrl)
self.updateCurrentDisplay()
# Translators: This is the label for the button used to change braille display,
# it appears in the context of a braille display group on the braille settings panel.
changeDisplayBtn = wx.Button(displayBox, label=_("C&hange..."))
self.bindHelpEvent("BrailleSettingsChange", changeDisplayBtn)
displayGroup.addItem(
guiHelper.associateElements(
self.displayNameCtrl,
changeDisplayBtn
)
)
self.displayNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeDisplay)
changeDisplayBtn.Bind(wx.EVT_BUTTON,self.onChangeDisplay)
self.brailleSubPanel = BrailleSettingsSubPanel(self)
settingsSizerHelper.addItem(self.brailleSubPanel)
def _enterTriggersOnChangeDisplay(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeDisplay(evt)
else:
evt.Skip()
def onChangeDisplay(self, evt):
changeDisplay = BrailleDisplaySelectionDialog(self, multiInstanceAllowed=True)
ret = changeDisplay.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentDisplay(self):
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
displayDesc = BrailleDisplaySelectionDialog.getCurrentAutoDisplayDescription()
else:
displayDesc = braille.handler.display.description
self.displayNameCtrl.SetValue(displayDesc)
def onPanelActivated(self):
self.brailleSubPanel.onPanelActivated()
super(BrailleSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.brailleSubPanel.onPanelDeactivated()
super(BrailleSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.brailleSubPanel.onDiscard()
def onSave(self):
self.brailleSubPanel.onSave()
class BrailleDisplaySelectionDialog(SettingsDialog):
# Translators: This is the label for the braille display selection dialog.
title = _("Select Braille Display")
helpId = "SelectBrailleDisplay"
displayNames = []
possiblePorts = []
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for a setting in braille settings to choose a braille display.
displayLabelText = _("Braille &display:")
self.displayList = sHelper.addLabeledControl(displayLabelText, wx.Choice, choices=[])
self.bindHelpEvent("SelectBrailleDisplayDisplay", self.displayList)
self.Bind(wx.EVT_CHOICE, self.onDisplayNameChanged, self.displayList)
# Translators: The label for a setting in braille settings to choose the connection port (if the selected braille display supports port selection).
portsLabelText = _("&Port:")
self.portsList = sHelper.addLabeledControl(portsLabelText, wx.Choice, choices=[])
self.bindHelpEvent("SelectBrailleDisplayPort", self.portsList)
self.updateBrailleDisplayLists()
def postInit(self):
# Finally, ensure that focus is on the list of displays.
self.displayList.SetFocus()
@staticmethod
def getCurrentAutoDisplayDescription():
description = braille.AUTOMATIC_PORT[1]
if (
config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME
and braille.handler.display.name != "noBraille"
):
description = "%s (%s)" % (description, braille.handler.display.description)
return description
def updateBrailleDisplayLists(self):
driverList = [(braille.AUTO_DISPLAY_NAME, self.getCurrentAutoDisplayDescription())]
driverList.extend(braille.getDisplayList())
self.displayNames = [driver[0] for driver in driverList]
displayChoices = [driver[1] for driver in driverList]
self.displayList.Clear()
self.displayList.AppendItems(displayChoices)
try:
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
selection = 0
else:
selection = self.displayNames.index(braille.handler.display.name)
self.displayList.SetSelection(selection)
except:
pass
self.updatePossiblePorts()
def updatePossiblePorts(self):
displayName = self.displayNames[self.displayList.GetSelection()]
self.possiblePorts = []
if displayName != "auto":
displayCls = braille._getDisplayDriver(displayName)
try:
self.possiblePorts.extend(displayCls.getPossiblePorts().items())
except NotImplementedError:
pass
if self.possiblePorts:
self.portsList.SetItems([p[1] for p in self.possiblePorts])
try:
selectedPort = config.conf["braille"][displayName].get("port")
portNames = [p[0] for p in self.possiblePorts]
selection = portNames.index(selectedPort)
except (KeyError, ValueError):
# Display name not in config or port not valid
selection = 0
self.portsList.SetSelection(selection)
# If no port selection is possible or only automatic selection is available, disable the port selection control
enable = len(self.possiblePorts) > 0 and not (len(self.possiblePorts) == 1 and self.possiblePorts[0][0] == "auto")
self.portsList.Enable(enable)
def onDisplayNameChanged(self, evt):
self.updatePossiblePorts()
def onOk(self, evt):
if not self.displayNames:
# The list of displays has not been populated yet, so we didn't change anything in this panel
return
display = self.displayNames[self.displayList.GetSelection()]
if display not in config.conf["braille"]:
config.conf["braille"][display] = {}
if self.possiblePorts:
port = self.possiblePorts[self.portsList.GetSelection()][0]
config.conf["braille"][display]["port"] = port
if not braille.handler.setDisplayByName(display):
gui.messageBox(
# Translators: The message in a dialog presented when NVDA is unable to load the selected
# braille display.
message=_("Could not load the {display} display.").format(display=display),
# Translators: The title in a dialog presented when NVDA is unable to load the selected
# braille display.
caption=_("Braille Display Error"),
style=wx.OK | wx.ICON_WARNING,
parent=self
)
return
if self.IsModal():
# Hack: we need to update the display in our parent window before closing.
# Otherwise, NVDA will report the old display even though the new display is reflected visually.
self.Parent.updateCurrentDisplay()
super(BrailleDisplaySelectionDialog, self).onOk(evt)
class BrailleSettingsSubPanel(AutoSettingsMixin, SettingsPanel):
helpId = "BrailleSettings"
@property
def driver(self):
return braille.handler.display
def getSettings(self) -> AutoSettings:
return self.driver
def makeSettings(self, settingsSizer):
shouldDebugGui = gui._isDebug()
startTime = 0 if not shouldDebugGui else time.time()
# Construct braille display specific settings
self.updateDriverSettings()
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
tables = brailleTables.listTables()
# Translators: The label for a setting in braille settings to select the output table (the braille table used to read braille text on the braille display).
outputsLabelText = _("&Output table:")
outTables = [table for table in tables if table.output]
self.outTableNames = [table.fileName for table in outTables]
outTableChoices = [table.displayName for table in outTables]
self.outTableList = sHelper.addLabeledControl(outputsLabelText, wx.Choice, choices=outTableChoices)
self.bindHelpEvent("BrailleSettingsOutputTable", self.outTableList)
try:
selection = self.outTableNames.index(config.conf["braille"]["translationTable"])
self.outTableList.SetSelection(selection)
except:
pass
if shouldDebugGui:
timePassed = time.time() - startTime
log.debug(
f"Loading output tables completed, now at {timePassed:.2f} seconds from start"
)
# Translators: The label for a setting in braille settings to select the input table (the braille table used to type braille characters on a braille keyboard).
inputLabelText = _("&Input table:")
self.inTables = [table for table in tables if table.input]
inTableChoices = [table.displayName for table in self.inTables]
self.inTableList = sHelper.addLabeledControl(inputLabelText, wx.Choice, choices=inTableChoices)
self.bindHelpEvent("BrailleSettingsInputTable", self.inTableList)
try:
selection = self.inTables.index(brailleInput.handler.table)
self.inTableList.SetSelection(selection)
except:
pass
if shouldDebugGui:
timePassed = time.time() - startTime
log.debug(
f"Loading input tables completed, now at {timePassed:.2f} seconds from start"
)
# Translators: The label for a setting in braille settings to expand the current word under cursor to computer braille.
expandAtCursorText = _("E&xpand to computer braille for the word at the cursor")
self.expandAtCursorCheckBox = sHelper.addItem(
wx.CheckBox(self, wx.ID_ANY, label=expandAtCursorText)
)
self.bindHelpEvent("BrailleSettingsExpandToComputerBraille", self.expandAtCursorCheckBox)
self.expandAtCursorCheckBox.SetValue(config.conf["braille"]["expandAtCursor"])
# Translators: The label for a setting in braille settings to show the cursor.
showCursorLabelText = _("&Show cursor")
self.showCursorCheckBox = sHelper.addItem(wx.CheckBox(self, label=showCursorLabelText))
self.bindHelpEvent("BrailleSettingsShowCursor", self.showCursorCheckBox)
self.showCursorCheckBox.Bind(wx.EVT_CHECKBOX, self.onShowCursorChange)
self.showCursorCheckBox.SetValue(config.conf["braille"]["showCursor"])
# Translators: The label for a setting in braille settings to enable cursor blinking.
cursorBlinkLabelText = _("Blink cursor")
self.cursorBlinkCheckBox = sHelper.addItem(
wx.CheckBox(self, label=cursorBlinkLabelText)
)
self.bindHelpEvent("BrailleSettingsBlinkCursor", self.cursorBlinkCheckBox)
self.cursorBlinkCheckBox.Bind(wx.EVT_CHECKBOX, self.onBlinkCursorChange)
self.cursorBlinkCheckBox.SetValue(config.conf["braille"]["cursorBlink"])
if not self.showCursorCheckBox.GetValue():
self.cursorBlinkCheckBox.Disable()
# Translators: The label for a setting in braille settings to change cursor blink rate in milliseconds (1 second is 1000 milliseconds).
cursorBlinkRateLabelText = _("Cursor blink rate (ms)")
minBlinkRate = int(config.conf.getConfigValidation(
("braille", "cursorBlinkRate")
).kwargs["min"])
maxBlinkRate = int(config.conf.getConfigValidation(("braille", "cursorBlinkRate")).kwargs["max"])
self.cursorBlinkRateEdit = sHelper.addLabeledControl(
cursorBlinkRateLabelText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minBlinkRate,
max=maxBlinkRate,
initial=config.conf["braille"]["cursorBlinkRate"]
)
self.bindHelpEvent("BrailleSettingsBlinkRate", self.cursorBlinkRateEdit)
if not self.showCursorCheckBox.GetValue() or not self.cursorBlinkCheckBox.GetValue() :
self.cursorBlinkRateEdit.Disable()
self.cursorShapes = [s[0] for s in braille.CURSOR_SHAPES]
cursorShapeChoices = [s[1] for s in braille.CURSOR_SHAPES]
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to focus.
cursorShapeFocusLabelText = _("Cursor shape for &focus:")
self.cursorShapeFocusList = sHelper.addLabeledControl(cursorShapeFocusLabelText, wx.Choice, choices=cursorShapeChoices)
self.bindHelpEvent("BrailleSettingsCursorShapeForFocus", self.cursorShapeFocusList)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeFocus"])
self.cursorShapeFocusList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeFocusList.Disable()
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to review.
cursorShapeReviewLabelText = _("Cursor shape for &review:")
self.cursorShapeReviewList = sHelper.addLabeledControl(cursorShapeReviewLabelText, wx.Choice, choices=cursorShapeChoices)
self.bindHelpEvent("BrailleSettingsCursorShapeForReview", self.cursorShapeReviewList)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeReview"])
self.cursorShapeReviewList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeReviewList.Disable()
if gui._isDebug():
log.debug("Loading cursor settings completed, now at %.2f seconds from start"%(time.time() - startTime))
SHOW_MESSAGES_LABELS = [
# Translators: One of the show states of braille messages
# (the disabled mode turns off showing of braille messages completely).
_("Disabled"),
# Translators: One of the show states of braille messages
# (the timeout mode shows messages for the specific time).
_("Use timeout"),
# Translators: One of the show states of braille messages
# (the indefinitely mode prevents braille messages from disappearing automatically).
_("Show indefinitely"),
]
# Translators: The label for a setting in braille settings to combobox enabling user
# to decide if braille messages should be shown and automatically disappear from braille display.
showMessagesText = _("Show messages")
self.showMessagesList = sHelper.addLabeledControl(
showMessagesText,
wx.Choice,
choices=SHOW_MESSAGES_LABELS
)
self.bindHelpEvent("BrailleSettingsShowMessages", self.showMessagesList)
self.showMessagesList.Bind(wx.EVT_CHOICE, self.onShowMessagesChange)
if config.conf["braille"]["messageTimeout"] == 0:
self.showMessagesList.SetSelection(0)
elif config.conf["braille"]["noMessageTimeout"] == 0:
self.showMessagesList.SetSelection(1)
else:
self.showMessagesList.SetSelection(2)
# Minimal timeout value possible here is 1, because 0 disables showing of braille messages
# and is set using showMessagesList
minTimeout = 1
maxTimeOut = int(config.conf.getConfigValidation(
("braille", "messageTimeout")
).kwargs["max"])
# Translators: The label for a setting in braille settings to change how long a message stays on the braille display (in seconds).
messageTimeoutText = _("Message &timeout (sec)")
self.messageTimeoutEdit = sHelper.addLabeledControl(
messageTimeoutText,
nvdaControls.SelectOnFocusSpinCtrl,
min=minTimeout,
max=maxTimeOut,
initial=config.conf["braille"]["messageTimeout"]
)
self.bindHelpEvent("BrailleSettingsMessageTimeout", self.messageTimeoutEdit)
if self.showMessagesList.GetSelection() != 1:
self.messageTimeoutEdit.Disable()
if gui._isDebug():
log.debug("Loading timeout settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to set whether braille should be tethered to focus or review cursor.
tetherListText = _("Tether B&raille:")
# Translators: The value for a setting in the braille settings, to set whether braille should be tethered to focus or review cursor.
tetherChoices = [x[1] for x in braille.handler.tetherValues]
self.tetherList = sHelper.addLabeledControl(tetherListText, wx.Choice, choices=tetherChoices)
self.bindHelpEvent("BrailleTether", self.tetherList)
tetherChoice=braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"]
selection = next((x for x,y in enumerate(braille.handler.tetherValues) if y[0]==tetherChoice))
try:
self.tetherList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading tether settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to read by paragraph (if it is checked, the commands to move the display by lines moves the display by paragraphs instead).
readByParagraphText = _("Read by ¶graph")
self.readByParagraphCheckBox = sHelper.addItem(wx.CheckBox(self, label=readByParagraphText))
self.bindHelpEvent("BrailleSettingsReadByParagraph", self.readByParagraphCheckBox)
self.readByParagraphCheckBox.Value = config.conf["braille"]["readByParagraph"]
# Translators: The label for a setting in braille settings to enable word wrap (try to avoid spliting words at the end of the braille display).
wordWrapText = _("Avoid splitting &words when possible")
self.wordWrapCheckBox = sHelper.addItem(wx.CheckBox(self, label=wordWrapText))
self.bindHelpEvent("BrailleSettingsWordWrap", self.wordWrapCheckBox)
self.wordWrapCheckBox.Value = config.conf["braille"]["wordWrap"]
# Translators: The label for a setting in braille settings to select how the context for the focus object should be presented on a braille display.
focusContextPresentationLabelText = _("Focus context presentation:")
self.focusContextPresentationValues = [x[0] for x in braille.focusContextPresentations]
focusContextPresentationChoices = [x[1] for x in braille.focusContextPresentations]
self.focusContextPresentationList = sHelper.addLabeledControl(focusContextPresentationLabelText, wx.Choice, choices=focusContextPresentationChoices)
self.bindHelpEvent("BrailleSettingsFocusContextPresentation", self.focusContextPresentationList)
try:
index=self.focusContextPresentationValues.index(config.conf["braille"]["focusContextPresentation"])
except:
index=0
self.focusContextPresentationList.SetSelection(index)
if gui._isDebug():
log.debug("Finished making settings, now at %.2f seconds from start"%(time.time() - startTime))
def onSave(self):
AutoSettingsMixin.onSave(self)
config.conf["braille"]["translationTable"] = self.outTableNames[self.outTableList.GetSelection()]
brailleInput.handler.table = self.inTables[self.inTableList.GetSelection()]
config.conf["braille"]["expandAtCursor"] = self.expandAtCursorCheckBox.GetValue()
config.conf["braille"]["showCursor"] = self.showCursorCheckBox.GetValue()
config.conf["braille"]["cursorBlink"] = self.cursorBlinkCheckBox.GetValue()
config.conf["braille"]["cursorBlinkRate"] = self.cursorBlinkRateEdit.GetValue()
config.conf["braille"]["cursorShapeFocus"] = self.cursorShapes[self.cursorShapeFocusList.GetSelection()]
config.conf["braille"]["cursorShapeReview"] = self.cursorShapes[self.cursorShapeReviewList.GetSelection()]
config.conf["braille"]["noMessageTimeout"] = self.showMessagesList.GetSelection() == 2
if self.showMessagesList.GetSelection() == 0:
config.conf["braille"]["messageTimeout"] = 0
else:
config.conf["braille"]["messageTimeout"] = self.messageTimeoutEdit.GetValue()
tetherChoice = braille.handler.tetherValues[self.tetherList.GetSelection()][0]
if tetherChoice==braille.handler.TETHER_AUTO:
config.conf["braille"]["autoTether"] = True
config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS
else:
config.conf["braille"]["autoTether"] = False
braille.handler.setTether(tetherChoice, auto=False)
config.conf["braille"]["readByParagraph"] = self.readByParagraphCheckBox.Value
config.conf["braille"]["wordWrap"] = self.wordWrapCheckBox.Value
config.conf["braille"]["focusContextPresentation"] = self.focusContextPresentationValues[self.focusContextPresentationList.GetSelection()]
def onShowCursorChange(self, evt):
self.cursorBlinkCheckBox.Enable(evt.IsChecked())
self.cursorBlinkRateEdit.Enable(evt.IsChecked() and self.cursorBlinkCheckBox.GetValue())
self.cursorShapeFocusList.Enable(evt.IsChecked())
self.cursorShapeReviewList.Enable(evt.IsChecked())
def onBlinkCursorChange(self, evt):
self.cursorBlinkRateEdit.Enable(evt.IsChecked())
def onShowMessagesChange(self, evt):
self.messageTimeoutEdit.Enable(evt.GetSelection() == 1)
def showStartErrorForProviders(
parent: wx.Window,
providers: List[vision.providerInfo.ProviderInfo],
) -> None:
if not providers:
return
if len(providers) == 1:
providerName = providers[0].displayName
# Translators: This message is presented when
# NVDA is unable to load a single vision enhancement provider.
message = _("Could not load the {providerName} vision enhancement provider").format(
providerName=providerName
)
else:
providerNames = ", ".join(provider.displayName for provider in providers)
# Translators: This message is presented when NVDA is unable to
# load multiple vision enhancement providers.
message = _("Could not load the following vision enhancement providers:\n{providerNames}").format(
providerNames=providerNames
)
gui.messageBox(
message,
# Translators: The title of the vision enhancement provider error message box.
_("Vision Enhancement Provider Error"),
wx.OK | wx.ICON_WARNING,
parent,
)
def showTerminationErrorForProviders(
parent: wx.Window,
providers: List[vision.providerInfo.ProviderInfo],
) -> None:
if not providers:
return
if len(providers) == 1:
providerName = providers[0].displayName
# Translators: This message is presented when
# NVDA is unable to gracefully terminate a single vision enhancement provider.
message = _("Could not gracefully terminate the {providerName} vision enhancement provider").format(
providerName=providerName
)
else:
providerNames = ", ".join(provider.displayName for provider in providers)
message = _(
# Translators: This message is presented when
# NVDA is unable to terminate multiple vision enhancement providers.
"Could not gracefully terminate the following vision enhancement providers:\n"
"{providerNames}"
).format(providerNames=providerNames)
gui.messageBox(
message,
# Translators: The title of the vision enhancement provider error message box.
_("Vision Enhancement Provider Error"),
wx.OK | wx.ICON_WARNING,
parent,
)
class VisionProviderStateControl(vision.providerBase.VisionProviderStateControl):
"""
Gives settings panels for vision enhancement providers a way to control a
single vision enhancement provider, handling any error conditions in
a UX friendly way.
"""
def __init__(
self,
parent: wx.Window,
providerInfo: vision.providerInfo.ProviderInfo
):
self._providerInfo = providerInfo
self._parent = weakref.ref(parent) # don't keep parent dialog alive with a circular reference.
def getProviderInfo(self) -> vision.providerInfo.ProviderInfo:
return self._providerInfo
def getProviderInstance(self) -> Optional[vision.providerBase.VisionEnhancementProvider]:
return vision.handler.getProviderInstance(self._providerInfo)
def startProvider(
self,
shouldPromptOnError: bool = True
) -> bool:
"""Initializes the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
success = self._doStartProvider()
if not success and shouldPromptOnError:
showStartErrorForProviders(self._parent(), [self._providerInfo, ])
return success
def terminateProvider(
self,
shouldPromptOnError: bool = True
) -> bool:
"""Terminate the provider, prompting user with the error if necessary.
@param shouldPromptOnError: True if the user should be presented with any errors that may occur.
@return: True on success
"""
success = self._doTerminate()
if not success and shouldPromptOnError:
showTerminationErrorForProviders(self._parent(), [self._providerInfo, ])
return success
def _doStartProvider(self) -> bool:
"""Attempt to start the provider, catching any errors.
@return True on successful termination.
"""
try:
vision.handler.initializeProvider(self._providerInfo)
return True
except Exception:
log.error(
f"Could not initialize the {self._providerInfo.providerId} vision enhancement provider",
exc_info=True
)
return False
def _doTerminate(self) -> bool:
"""Attempt to terminate the provider, catching any errors.
@return True on successful termination.
"""
try:
# Terminating a provider from the gui should never save the settings.
# This is because termination happens on the fly when unchecking check boxes.
# Saving settings would be harmful if a user opens the vision panel,
# then changes some settings and disables the provider.
vision.handler.terminateProvider(self._providerInfo, saveSettings=False)
return True
except Exception:
log.error(
f"Could not terminate the {self._providerInfo.providerId} vision enhancement provider",
exc_info=True
)
return False
class VisionSettingsPanel(SettingsPanel):
settingsSizerHelper: guiHelper.BoxSizerHelper
providerPanelInstances: List[SettingsPanel]
initialProviders: List[vision.providerInfo.ProviderInfo]
# Translators: This is the label for the vision panel
title = _("Vision")
helpId = "VisionSettings"
# Translators: This is a label appearing on the vision settings panel.
panelDescription = _("Configure visual aids.")
def _createProviderSettingsPanel(
self,
providerInfo: vision.providerInfo.ProviderInfo
) -> Optional[SettingsPanel]:
settingsPanelCls = providerInfo.providerClass.getSettingsPanelClass()
if not settingsPanelCls:
if gui._isDebug():
log.debug(f"Using default panel for providerId: {providerInfo.providerId}")
settingsPanelCls = VisionProviderSubPanel_Wrapper
else:
if gui._isDebug():
log.debug(f"Using custom panel for providerId: {providerInfo.providerId}")
providerControl = VisionProviderStateControl(parent=self, providerInfo=providerInfo)
try:
return settingsPanelCls(
parent=self,
providerControl=providerControl
)
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error creating providerPanel: {settingsPanelCls!r}", exc_info=True)
return None
def makeSettings(self, settingsSizer: wx.BoxSizer):
self.initialProviders = vision.handler.getActiveProviderInfos()
self.providerPanelInstances = []
self.settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.settingsSizerHelper.addItem(wx.StaticText(self, label=self.panelDescription))
for providerInfo in vision.handler.getProviderList(reloadFromSystem=True):
providerSizer = self.settingsSizerHelper.addItem(
wx.StaticBoxSizer(wx.VERTICAL, self, label=providerInfo.displayName),
flag=wx.EXPAND
)
if len(self.providerPanelInstances) > 0:
settingsSizer.AddSpacer(guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
settingsPanel = self._createProviderSettingsPanel(providerInfo)
if not settingsPanel:
continue
providerSizer.Add(settingsPanel, flag=wx.EXPAND)
self.providerPanelInstances.append(settingsPanel)
def safeInitProviders(
self,
providers: List[vision.providerInfo.ProviderInfo]
) -> None:
"""Initializes one or more providers in a way that is gui friendly,
showing an error if appropriate.
"""
errorProviders: List[vision.providerInfo.ProviderInfo] = []
for provider in providers:
success = VisionProviderStateControl(self, provider).startProvider(shouldPromptOnError=False)
if not success:
errorProviders.append(provider)
showStartErrorForProviders(self, errorProviders)
def safeTerminateProviders(
self,
providers: List[vision.providerInfo.ProviderInfo],
verbose: bool = False
) -> None:
"""Terminates one or more providers in a way that is gui friendly,
@verbose: Whether to show a termination error.
@returns: Whether termination succeeded for all providers.
"""
errorProviders: List[vision.providerInfo.ProviderInfo] = []
for provider in providers:
success = VisionProviderStateControl(self, provider).terminateProvider(shouldPromptOnError=False)
if not success:
errorProviders.append(provider)
if verbose:
showTerminationErrorForProviders(self, errorProviders)
def refreshPanel(self):
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def onPanelActivated(self):
super().onPanelActivated()
def onDiscard(self):
for panel in self.providerPanelInstances:
try:
panel.onDiscard()
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error discarding providerPanel: {panel.__class__!r}", exc_info=True)
providersToInitialize = [
provider for provider in self.initialProviders
if not bool(vision.handler.getProviderInstance(provider))
]
self.safeInitProviders(providersToInitialize)
initialProviderIds = [
providerInfo.providerId for providerInfo in self.initialProviders
]
providersToTerminate = [
provider for provider in vision.handler.getActiveProviderInfos()
if provider.providerId not in initialProviderIds
]
self.safeTerminateProviders(providersToTerminate)
def onSave(self):
for panel in self.providerPanelInstances:
try:
panel.onSave()
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.debug(f"Error saving providerPanel: {panel.__class__!r}", exc_info=True)
self.initialProviders = vision.handler.getActiveProviderInfos()
class VisionProviderSubPanel_Settings(
AutoSettingsMixin,
SettingsPanel
):
helpId = "VisionSettings"
_settingsCallable: Callable[[], VisionEnhancementProviderSettings]
def __init__(
self,
parent: wx.Window,
*, # Make next argument keyword only
settingsCallable: Callable[[], vision.providerBase.VisionEnhancementProviderSettings]
):
"""
@param settingsCallable: A callable that returns an instance to a VisionEnhancementProviderSettings.
This will usually be a weakref, but could be any callable taking no arguments.
"""
self._settingsCallable = settingsCallable
super().__init__(parent=parent)
def getSettings(self) -> AutoSettings:
settings = self._settingsCallable()
return settings
def makeSettings(self, settingsSizer):
# Construct vision enhancement provider settings
self.updateDriverSettings()
class VisionProviderSubPanel_Wrapper(
SettingsPanel
):
_checkBox: wx.CheckBox
def __init__(
self,
parent: wx.Window,
providerControl: VisionProviderStateControl
):
self._providerControl = providerControl
self._providerSettings: Optional[VisionProviderSubPanel_Settings] = None
self._providerSettingsSizer = wx.BoxSizer(orient=wx.VERTICAL)
super().__init__(parent=parent)
def makeSettings(self, settingsSizer):
self._checkBox = wx.CheckBox(
self,
# Translators: Enable checkbox on a vision enhancement provider on the vision settings category panel
label=_("Enable")
)
settingsSizer.Add(self._checkBox)
self.bindHelpEvent("VisionSettings", self._checkBox)
self._optionsSizer = wx.BoxSizer(orient=wx.VERTICAL)
self._optionsSizer.AddSpacer(size=self.scaleSize(10))
# Translators: Options label on a vision enhancement provider on the vision settings category panel
self._optionsText = wx.StaticText(self, label=_("Options:"))
self._optionsSizer.Add(self._optionsText)
self._optionsSizer.Add(
self._providerSettingsSizer,
border=self.scaleSize(15),
flag=wx.LEFT | wx.EXPAND,
proportion=1.0
)
settingsSizer.Add(
self._optionsSizer,
flag=wx.EXPAND,
proportion=1.0
)
self._checkBox.SetValue(bool(self._providerControl.getProviderInstance()))
if self._createProviderSettings():
self._checkBox.Bind(wx.EVT_CHECKBOX, self._enableToggle)
else:
self._checkBox.Bind(wx.EVT_CHECKBOX, self._nonEnableableGUI)
self._updateOptionsVisibility()
def _updateOptionsVisibility(self):
hasProviderOptions = bool(self._providerSettings) and self._providerSettings.hasOptions
if hasProviderOptions:
self.settingsSizer.Show(self._optionsSizer, recursive=True)
else:
self.settingsSizer.Hide(self._optionsSizer, recursive=True)
self._sendLayoutUpdatedEvent()
def _createProviderSettings(self):
try:
getSettingsCallable = self._providerControl.getProviderInfo().providerClass.getSettings
self._providerSettings = VisionProviderSubPanel_Settings(
self,
settingsCallable=getSettingsCallable
)
self._providerSettingsSizer.Add(self._providerSettings, flag=wx.EXPAND, proportion=1.0)
# Broad except used since we can not know what exceptions a provider might throw.
# We should be able to continue despite a buggy provider.
except Exception:
log.error("unable to create provider settings", exc_info=True)
return False
return True
def _nonEnableableGUI(self, evt):
gui.messageBox(
# Translators: Shown when there is an error showing the GUI for a vision enhancement provider
_("Unable to configure user interface for Vision Enhancement Provider, it can not be enabled."),
# Translators: The title of the error dialog displayed when there is an error showing the GUI
# for a vision enhancement provider
_("Error"),
parent=self,
)
self._checkBox.SetValue(False)
def _enableToggle(self, evt):
shouldBeRunning = evt.IsChecked()
if shouldBeRunning and not self._providerControl.startProvider():
self._checkBox.SetValue(False)
self._updateOptionsVisibility()
return
elif not shouldBeRunning and not self._providerControl.terminateProvider():
# When there is an error on termination, don't leave the checkbox checked.
# The provider should not be left configured to startup.
self._checkBox.SetValue(False)
self._updateOptionsVisibility()
return
# Able to successfully start / terminate:
self._providerSettings.updateDriverSettings()
self._providerSettings.refreshGui()
self._updateOptionsVisibility()
def onDiscard(self):
if self._providerSettings:
self._providerSettings.onDiscard()
def onSave(self):
log.debug(f"calling VisionProviderSubPanel_Wrapper")
if self._providerSettings:
self._providerSettings.onSave()
""" The name of the config profile currently being edited, if any.
This is set when the currently edited configuration profile is determined and returned to None when the dialog is destroyed.
This can be used by an AppModule for NVDA to identify and announce
changes in the name of the edited configuration profile when categories are changed"""
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
class NVDASettingsDialog(MultiCategorySettingsDialog):
# Translators: This is the label for the NVDA settings dialog.
title = _("NVDA Settings")
categoryClasses=[
GeneralSettingsPanel,
SpeechSettingsPanel,
BrailleSettingsPanel,
VisionSettingsPanel,
KeyboardSettingsPanel,
MouseSettingsPanel,
ReviewCursorPanel,
InputCompositionPanel,
ObjectPresentationPanel,
BrowseModePanel,
DocumentFormattingPanel,
]
if touchHandler.touchSupported():
categoryClasses.append(TouchInteractionPanel)
if winVersion.isUwpOcrAvailable():
categoryClasses.append(UwpOcrPanel)
# And finally the Advanced panel which should always be last.
if not globalVars.appArgs.secure:
categoryClasses.append(AdvancedPanel)
def makeSettings(self, settingsSizer):
# Ensure that after the settings dialog is created the name is set correctly
super(NVDASettingsDialog, self).makeSettings(settingsSizer)
self._doOnCategoryChange()
global NvdaSettingsDialogWindowHandle
NvdaSettingsDialogWindowHandle = self.GetHandle()
def _doOnCategoryChange(self):
global NvdaSettingsDialogActiveConfigProfile
NvdaSettingsDialogActiveConfigProfile = config.conf.profiles[-1].name
if not NvdaSettingsDialogActiveConfigProfile or isinstance(self.currentCategory, GeneralSettingsPanel):
# Translators: The profile name for normal configuration
NvdaSettingsDialogActiveConfigProfile = _("normal configuration")
self.SetTitle(self._getDialogTitle())
self.bindHelpEvent(
self.currentCategory.helpId,
self.catListCtrl
)
def _getDialogTitle(self):
return u"{dialogTitle}: {panelTitle} ({configProfile})".format(
dialogTitle=self.title,
panelTitle=self.currentCategory.title,
configProfile=NvdaSettingsDialogActiveConfigProfile
)
def onCategoryChange(self,evt):
super(NVDASettingsDialog,self).onCategoryChange(evt)
if evt.Skipped:
return
self._doOnCategoryChange()
def Destroy(self):
global NvdaSettingsDialogActiveConfigProfile, NvdaSettingsDialogWindowHandle
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
super(NVDASettingsDialog, self).Destroy()
class AddSymbolDialog(
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "SymbolPronunciation"
def __init__(self, parent):
# Translators: This is the label for the add symbol dialog.
super().__init__(parent, title=_("Add Symbol"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is the label for the edit field in the add symbol dialog.
symbolText = _("&Symbol:")
self.identifierTextCtrl = sHelper.addLabeledControl(symbolText, wx.TextCtrl)
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK | wx.CANCEL))
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.identifierTextCtrl.SetFocus()
self.CentreOnScreen()
class SpeechSymbolsDialog(SettingsDialog):
helpId = "SymbolPronunciation"
def __init__(self,parent):
try:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData(speech.getCurrentLanguage())
except LookupError:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData("en")
self.symbolProcessor = symbolProcessor
# Translators: This is the label for the symbol pronunciation dialog.
# %s is replaced by the language for which symbol pronunciation is being edited.
self.title = _("Symbol Pronunciation (%s)")%languageHandler.getLanguageDescription(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).__init__(
parent,
resizeable=True,
)
def makeSettings(self, settingsSizer):
self.filteredSymbols = self.symbols = [
copy.copy(symbol) for symbol in self.symbolProcessor.computedSymbols.values()
]
self.pendingRemovals = {}
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label of a text field to search for symbols in the speech symbols dialog.
filterText = pgettext("speechSymbols", "&Filter by:")
self.filterEdit = sHelper.addLabeledControl(
labelText = filterText,
wxCtrlClass=wx.TextCtrl,
size=(self.scaleSize(310), -1),
)
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
# Translators: The label for symbols list in symbol pronunciation dialog.
symbolsText = _("&Symbols")
self.symbolsList = sHelper.addLabeledControl(
symbolsText,
nvdaControls.AutoWidthColumnListCtrl,
autoSizeColumn=2, # The replacement column is likely to need the most space
itemTextCallable=self.getItemTextForList,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VIRTUAL
)
# Translators: The label for a column in symbols list used to identify a symbol.
self.symbolsList.InsertColumn(0, _("Symbol"), width=self.scaleSize(150))
# Translators: The label for a column in symbols list used to identify a replacement.
self.symbolsList.InsertColumn(1, _("Replacement"))
# Translators: The label for a column in symbols list used to identify a symbol's speech level (either none, some, most, all or character).
self.symbolsList.InsertColumn(2, _("Level"))
# Translators: The label for a column in symbols list which specifies when the actual symbol will be sent to the synthesizer (preserved).
# See the "Punctuation/Symbol Pronunciation" section of the User Guide for details.
self.symbolsList.InsertColumn(3, _("Preserve"))
self.symbolsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemFocused)
# Translators: The label for the group of controls in symbol pronunciation dialog to change the pronunciation of a symbol.
changeSymbolText = _("Change selected symbol")
changeSymbolSizer = wx.StaticBoxSizer(wx.VERTICAL, self, label=changeSymbolText)
changeSymbolGroup = guiHelper.BoxSizerHelper(self, sizer=changeSymbolSizer)
changeSymbolHelper = sHelper.addItem(changeSymbolGroup)
# Used to ensure that event handlers call Skip(). Not calling skip can cause focus problems for controls. More
# generally the advice on the wx documentation is: "In general, it is recommended to skip all non-command events
# to allow the default handling to take place. The command events are, however, normally not skipped as usually
# a single command such as a button click or menu item selection must only be processed by one handler."
def skipEventAndCall(handler):
def wrapWithEventSkip(event):
if event:
event.Skip()
return handler()
return wrapWithEventSkip
# Translators: The label for the edit field in symbol pronunciation dialog to change the replacement text of a symbol.
replacementText = _("&Replacement")
self.replacementEdit = changeSymbolHelper.addLabeledControl(
labelText=replacementText,
wxCtrlClass=wx.TextCtrl,
size=(self.scaleSize(300), -1),
)
self.replacementEdit.Bind(wx.EVT_TEXT, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change the speech level of a symbol.
levelText = _("&Level")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
levelChoices = [symbolLevelLabels[level] for level in characterProcessing.SPEECH_SYMBOL_LEVELS]
self.levelList = changeSymbolHelper.addLabeledControl(levelText, wx.Choice, choices=levelChoices)
self.levelList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change when a symbol is sent to the synthesizer.
preserveText = _("&Send actual symbol to synthesizer")
symbolPreserveLabels = characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS
preserveChoices = [symbolPreserveLabels[mode] for mode in characterProcessing.SPEECH_SYMBOL_PRESERVES]
self.preserveList = changeSymbolHelper.addLabeledControl(preserveText, wx.Choice, choices=preserveChoices)
self.preserveList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
bHelper = sHelper.addItem(guiHelper.ButtonHelper(orientation=wx.HORIZONTAL))
# Translators: The label for a button in the Symbol Pronunciation dialog to add a new symbol.
addButton = bHelper.addButton(self, label=_("&Add"))
# Translators: The label for a button in the Symbol Pronunciation dialog to remove a symbol.
self.removeButton = bHelper.addButton(self, label=_("Re&move"))
self.removeButton.Disable()
addButton.Bind(wx.EVT_BUTTON, self.OnAddClick)
self.removeButton.Bind(wx.EVT_BUTTON, self.OnRemoveClick)
# Populate the unfiltered list with symbols.
self.filter()
def postInit(self):
self.symbolsList.SetFocus()
def filter(self, filterText=''):
NONE_SELECTED = -1
previousSelectionValue = None
previousIndex = self.symbolsList.GetFirstSelected() # may return NONE_SELECTED
if previousIndex != NONE_SELECTED:
previousSelectionValue = self.filteredSymbols[previousIndex]
if not filterText:
self.filteredSymbols = self.symbols
else:
# Do case-insensitive matching by lowering both filterText and each symbols's text.
filterText = filterText.lower()
self.filteredSymbols = [
symbol for symbol in self.symbols
if filterText in symbol.displayName.lower()
or filterText in symbol.replacement.lower()
]
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes filtering may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
return # exit early, no need to select an item.
# If there was a selection before filtering, try to preserve it
newIndex = 0 # select first item by default.
if previousSelectionValue:
try:
newIndex = self.filteredSymbols.index(previousSelectionValue)
except ValueError:
pass
# Change the selection
self.symbolsList.Select(newIndex)
self.symbolsList.Focus(newIndex)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(newIndex)
def getItemTextForList(self, item, column):
symbol = self.filteredSymbols[item]
if column == 0:
return symbol.displayName
elif column == 1:
return symbol.replacement
elif column == 2:
return characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[symbol.level]
elif column == 3:
return characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS[symbol.preserve]
else:
raise ValueError("Unknown column: %d" % column)
def onSymbolEdited(self):
if self.editingItem is not None:
# Update the symbol the user was just editing.
item = self.editingItem
symbol = self.filteredSymbols[item]
symbol.replacement = self.replacementEdit.Value
symbol.level = characterProcessing.SPEECH_SYMBOL_LEVELS[self.levelList.Selection]
symbol.preserve = characterProcessing.SPEECH_SYMBOL_PRESERVES[self.preserveList.Selection]
def onListItemFocused(self, evt):
# Update the editing controls to reflect the newly selected symbol.
item = evt.GetIndex()
symbol = self.filteredSymbols[item]
self.editingItem = item
# ChangeValue and Selection property used because they do not cause EVNT_CHANGED to be fired.
self.replacementEdit.ChangeValue(symbol.replacement)
self.levelList.Selection = characterProcessing.SPEECH_SYMBOL_LEVELS.index(symbol.level)
self.preserveList.Selection = characterProcessing.SPEECH_SYMBOL_PRESERVES.index(symbol.preserve)
self.removeButton.Enabled = not self.symbolProcessor.isBuiltin(symbol.identifier)
self.replacementEdit.Enable()
self.levelList.Enable()
self.preserveList.Enable()
evt.Skip()
def OnAddClick(self, evt):
with AddSymbolDialog(self) as entryDialog:
if entryDialog.ShowModal() != wx.ID_OK:
return
identifier = entryDialog.identifierTextCtrl.GetValue()
if not identifier:
return
# Clean the filter, so we can select the new entry.
self.filterEdit.Value=""
self.filter()
for index, symbol in enumerate(self.symbols):
if identifier == symbol.identifier:
# Translators: An error reported in the Symbol Pronunciation dialog when adding a symbol that is already present.
gui.messageBox(_('Symbol "%s" is already present.') % identifier,
_("Error"), wx.OK | wx.ICON_ERROR)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
self.symbolsList.SetFocus()
return
addedSymbol = characterProcessing.SpeechSymbol(identifier)
try:
del self.pendingRemovals[identifier]
except KeyError:
pass
addedSymbol.displayName = identifier
addedSymbol.replacement = ""
addedSymbol.level = characterProcessing.SymbolLevel.ALL
addedSymbol.preserve = characterProcessing.SYMPRES_NEVER
self.symbols.append(addedSymbol)
self.symbolsList.ItemCount = len(self.symbols)
index = self.symbolsList.ItemCount - 1
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def OnRemoveClick(self, evt):
index = self.symbolsList.GetFirstSelected()
symbol = self.filteredSymbols[index]
self.pendingRemovals[symbol.identifier] = symbol
del self.filteredSymbols[index]
if self.filteredSymbols is not self.symbols:
self.symbols.remove(symbol)
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes removing may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
else:
index = min(index, self.symbolsList.ItemCount - 1)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def onOk(self, evt):
self.onSymbolEdited()
self.editingItem = None
for symbol in self.pendingRemovals.values():
self.symbolProcessor.deleteSymbol(symbol)
for symbol in self.symbols:
if not symbol.replacement:
continue
self.symbolProcessor.updateSymbol(symbol)
try:
self.symbolProcessor.userSymbols.save()
except IOError as e:
log.error("Error saving user symbols info: %s" % e)
characterProcessing._localeSpeechSymbolProcessors.invalidateLocaleData(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).onOk(evt)
def _refreshVisibleItems(self):
count = self.symbolsList.GetCountPerPage()
first = self.symbolsList.GetTopItem()
self.symbolsList.RefreshItems(first, first+count)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.Value)
self._refreshVisibleItems()
evt.Skip()
| 1 | 34,098 | Why this is defined in the middle of imports? | nvaccess-nvda | py |
@@ -14,8 +14,10 @@
# limitations under the License.
#
+import importlib
from distutils.version import LooseVersion
+_backends = {}
import matplotlib
import numpy as np
import pandas as pd | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_integer, is_list_like
from pandas.io.formats.printing import pprint_thing
from pandas.core.base import PandasObject
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import KernelDensity
from pyspark.sql import functions as F
from databricks.koalas.missing import unsupported_function
from databricks.koalas.config import get_option
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot,
BoxPlot,
HistPlot,
MPLPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
else:
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib import (
BarPlot,
BoxPlot,
HistPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
from pandas.plotting._matplotlib.core import MPLPlot
_all_kinds = PlotAccessor._all_kinds
class TopNPlot:
def get_top_n(self, data):
from databricks.koalas import DataFrame, Series
max_rows = get_option("plotting.max_rows")
# Simply use the first 1k elements and make it into a pandas dataframe
# For categorical variables, it is likely called from df.x.value_counts().plot.xxx().
if isinstance(data, (Series, DataFrame)):
data = data.head(max_rows + 1).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
self.partial = False
if len(data) > max_rows:
self.partial = True
data = data.iloc[:max_rows]
return data
def set_result_text(self, ax):
max_rows = get_option("plotting.max_rows")
assert hasattr(self, "partial")
if self.partial:
ax.text(
1,
1,
"showing top {} elements only".format(max_rows),
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class SampledPlot:
def get_sampled(self, data):
from databricks.koalas import DataFrame, Series
fraction = get_option("plotting.sample_ratio")
if fraction is None:
fraction = 1 / (len(data) / get_option("plotting.max_rows"))
fraction = min(1.0, fraction)
self.fraction = fraction
if isinstance(data, (DataFrame, Series)):
if isinstance(data, Series):
data = data.to_frame()
sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction)
return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
def set_result_text(self, ax):
assert hasattr(self, "fraction")
if self.fraction < 1:
ax.text(
1,
1,
"showing the sampled result by fraction %s" % self.fraction,
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class KoalasBarPlot(BarPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarPlot, self).__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class KoalasBoxPlot(BoxPlot):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_xticks=True,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
""" Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(
prop_dict, matplotlib.rcParams[rc_str.format(rc_name, prop_dict)]
)
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
manage_xticks=manage_xticks,
zorder=zorder,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(KoalasBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is Koalas specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = KoalasBoxPlot._compute_stats(data, colname, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = KoalasBoxPlot._outliers(data, colname, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = KoalasBoxPlot._calc_whiskers(colname, outliers)
if showfliers:
fliers = KoalasBoxPlot._get_fliers(colname, outliers)
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = matplotlib.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = matplotlib.rcParams["boxplot.bootstrap"]
if notch is None:
notch = matplotlib.rcParams["boxplot.notch"]
if vert is None:
vert = matplotlib.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = matplotlib.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = matplotlib.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = matplotlib.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = matplotlib.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = matplotlib.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = matplotlib.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
@staticmethod
def _compute_stats(data, colname, whis, precision):
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
pdf = data._kdf._internal.resolved_copy.spark_frame.agg(
*[
F.expr(
"approx_percentile({}, {}, {})".format(colname, q, int(1.0 / precision))
).alias("{}_{}%".format(colname, int(q * 100)))
for q in [0.25, 0.50, 0.75]
],
F.mean(colname).alias("{}_mean".format(colname))
).toPandas()
# Computes IQR and Tukey's fences
iqr = "{}_iqr".format(colname)
p75 = "{}_75%".format(colname)
p25 = "{}_25%".format(colname)
pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25]
pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr]
pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr]
qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"]
col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]]
col_summ.columns = qnames
lfence, ufence = col_summ["lfence"], col_summ["ufence"]
stats = {
"mean": col_summ["mean"].values[0],
"med": col_summ["50%"].values[0],
"q1": col_summ["25%"].values[0],
"q3": col_summ["75%"].values[0],
}
return stats, (lfence.values[0], ufence.values[0])
@staticmethod
def _outliers(data, colname, lfence, ufence):
# Builds expression to identify outliers
expression = F.col(colname).between(lfence, ufence)
# Creates a column to flag rows as outliers or not
return data._kdf._internal.resolved_copy.spark_frame.withColumn(
"__{}_outlier".format(colname), ~expression
)
@staticmethod
def _calc_whiskers(colname, outliers):
# Computes min and max values of non-outliers - the whiskers
minmax = (
outliers.filter("not __{}_outlier".format(colname))
.agg(F.min(colname).alias("min"), F.max(colname).alias("max"))
.toPandas()
)
return minmax.iloc[0][["min", "max"]].values
@staticmethod
def _get_fliers(colname, outliers):
# Filters only the outliers, should "showfliers" be True
fliers_df = outliers.filter("__{}_outlier".format(colname))
# If shows fliers, takes the top 1k with highest absolute values
fliers = (
fliers_df.select(F.abs(F.col("`{}`".format(colname))).alias(colname))
.orderBy(F.desc("`{}`".format(colname)))
.limit(1001)
.toPandas()[colname]
.values
)
return fliers
class KoalasHistPlot(HistPlot):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
# TODO: this logic is same with KdePlot. Might have to deduplicate it.
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
if is_integer(self.bins):
# computes boundaries for the column
self.bins = self._get_bins(data.to_spark(), self.bins)
self.data = numeric_data
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
# 'y' is a Spark DataFrame that selects one column.
# here, we manually calculates the weights separately via Spark
# and assign it directly to histogram plot.
y = KoalasHistPlot._compute_hist(y, self.bins) # now y is a pandas Series.
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
@staticmethod
def _get_bins(sdf, bins):
# 'data' is a Spark DataFrame that selects all columns.
if len(sdf.columns) > 1:
min_col = F.least(*map(F.min, sdf))
max_col = F.greatest(*map(F.max, sdf))
else:
min_col = F.min(sdf.columns[-1])
max_col = F.max(sdf.columns[-1])
boundaries = sdf.select(min_col, max_col).first()
# divides the boundaries into bins
if boundaries[0] == boundaries[1]:
boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5)
return np.linspace(boundaries[0], boundaries[1], bins + 1)
@staticmethod
def _compute_hist(sdf, bins):
# 'data' is a Spark DataFrame that selects one column.
assert isinstance(bins, (np.ndarray, np.generic))
colname = sdf.columns[-1]
bucket_name = "__{}_bucket".format(colname)
# creates a Bucketizer to get corresponding bin of each value
bucketizer = Bucketizer(
splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip"
)
# after bucketing values, groups and counts them
result = (
bucketizer.transform(sdf)
.select(bucket_name)
.groupby(bucket_name)
.agg(F.count("*").alias("count"))
.toPandas()
.sort_values(by=bucket_name)
)
# generates a pandas DF with one row for each bin
# we need this as some of the bins may be empty
indexes = pd.DataFrame({bucket_name: np.arange(0, len(bins) - 1), "bucket": bins[:-1]})
# merges the bins with counts on it and fills remaining ones with zeros
pdf = indexes.merge(result, how="left", on=[bucket_name]).fillna(0)[["count"]]
pdf.columns = [bucket_name]
return pdf[bucket_name]
class KoalasPiePlot(PiePlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasPiePlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasPiePlot, self)._make_plot()
class KoalasAreaPlot(AreaPlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasAreaPlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasAreaPlot, self)._make_plot()
class KoalasLinePlot(LinePlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasLinePlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasLinePlot, self)._make_plot()
class KoalasBarhPlot(BarhPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarhPlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasBarhPlot, self)._make_plot()
class KoalasScatterPlot(ScatterPlot, TopNPlot):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasScatterPlot, self)._make_plot()
class KoalasKdePlot(KdePlot):
def _compute_plot_data(self):
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
self.data = numeric_data
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
# 'y' is a Spark DataFrame that selects one column.
if self.ind is None:
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = max_val - min_val
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, 1000,)
elif is_integer(self.ind):
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, self.ind,)
else:
ind = self.ind
return ind
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
# 'y' is a Spark DataFrame that selects one column.
# Using RDD is slow so we might have to change it to Dataset based implementation
# once Spark has that implementation.
sample = y.rdd.map(lambda x: float(x[0]))
kd = KernelDensity()
kd.setSample(sample)
assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number."
if bw_method is not None:
# Match the bandwidth with Spark.
kd.setBandwidth(float(bw_method))
y = kd.estimate(list(map(float, ind)))
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
KoalasHistPlot,
KoalasBarPlot,
KoalasBoxPlot,
KoalasPiePlot,
KoalasAreaPlot,
KoalasLinePlot,
KoalasBarhPlot,
KoalasScatterPlot,
KoalasKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = None
with plt.rc_context():
ax = plt.gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``kdf.plot(kind='line')`` is equivalent to
``kdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from databricks.koalas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle Koalas DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
class KoalasSeriesPlotMethods(PandasObject):
"""
Series plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
kind="line",
ax=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False,
**kwds
):
return plot_series(
self.data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
__call__.__doc__ = plot_series.__doc__
def line(self, x=None, y=None, **kwargs):
"""
Plot Series as lines.
This function is useful to plot lines using Series's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`Series.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.line()
"""
return self(kind="line", x=x, y=y, **kwargs)
def bar(self, **kwds):
"""
Vertical bar plot.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.bar()
"""
return self(kind="bar", **kwds)
def barh(self, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> plot = df.val.plot.barh()
"""
return self(kind="barh", **kwds)
def box(self, **kwds):
"""
Make a box plot of the DataFrame columns.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
precision: scalar, default = 0.01
This argument is used by Koalas to compute approximate statistics
for building a boxplot. Use *smaller* values to get more precise
statistics.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
There are behavior differences between Koalas and pandas.
* Koalas computes approximate statistics - expect differences between
pandas and Koalas boxplots, especially regarding 1st and 3rd quartiles.
* The `whis` argument is only supported as a single number.
* Koalas doesn't support the following argument(s).
* `bootstrap` argument is not supported
* `autorange` argument is not supported
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = ks.DataFrame(data, columns=list('ABCD'))
>>> ax = df['A'].plot.box()
"""
return self(kind="box", **kwds)
def hist(self, bins=10, **kwds):
"""
Draw one histogram of the DataFrame’s columns.
Parameters
----------
bins : integer, default 10
Number of histogram bins to be used
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.hist()
"""
return self(kind="hist", bins=bins, **kwds)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
A scalar bandwidth should be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> s = ks.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
The `ind` parameter determines the evaluation points for the
plot of the estimated KDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.sales.plot.area()
"""
return self(kind="area", **kwds)
def pie(self, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.mass.plot.pie(figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.mass.plot.pie(subplots=True, figsize=(6, 3))
"""
return self(kind="pie", **kwds)
class KoalasFramePlotMethods(PandasObject):
# TODO: not sure if Koalas wants to combine plot method for Series and DataFrame
"""
DataFrame plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``df.plot(kind='hist')`` is equivalent to ``df.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
return plot_frame(
self.data,
x=x,
y=y,
kind=kind,
ax=ax,
subplots=subplots,
sharex=sharex,
sharey=sharey,
layout=layout,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
secondary_y=secondary_y,
sort_columns=sort_columns,
**kwds
)
def line(self, x=None, y=None, **kwargs):
"""
Plot DataFrame as lines.
Parameters
----------
x: int or str, optional
Columns to use for the horizontal axis.
y : int, str, or list of them, optional
The values to be plotted.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = ks.DataFrame({'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]},
... index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind="line", x=x, y=y, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
For DataFrame, it works in the same way as Series:
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def pie(self, y=None, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(6, 3))
"""
from databricks.koalas import DataFrame
# pandas will raise an error if y is None and subplots if not True
if isinstance(self.data, DataFrame) and y is None and not kwds.get("subplots", False):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", y=y, **kwds)
def area(self, x=None, y=None, stacked=True, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.plot.area()
"""
return self(kind="area", x=x, y=y, stacked=stacked, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another.
If not specified, the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another.
If not specified, all numerical columns are used.
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind="bar", x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with rectangular
bars with lengths proportional to the values that they represent. A bar plot shows
comparisons among discrete categories. One axis of the plot shows the specific
categories being compared, and the other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds:
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind="barh", x=x, y=y, **kwargs)
def hexbin(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
def box(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="box")()
def hist(self, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns=['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> df = ks.from_pandas(df)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", bins=bins, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
c : str, int or array_like, optional
**kwds: Optional
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = ks.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwds)
| 1 | 15,741 | Could you move this to the original position? | databricks-koalas | py |
@@ -160,7 +160,7 @@ func (s *Source) Owner() (string, error) {
if err != nil {
return "", err
}
- return oAndR.repo, nil
+ return oAndR.owner, nil
}
// PipelineStage represents configuration for each deployment stage | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package deploy holds the structures to deploy applications and environments.
package deploy
import (
"errors"
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
)
// CreateEnvironmentInput represents the fields required to deploy an environment.
type CreateEnvironmentInput struct {
Project string // Name of the project this environment belongs to.
Name string // Name of the environment, must be unique within a project.
Prod bool // Whether or not this environment is a production environment.
PublicLoadBalancer bool // Whether or not this environment should contain a shared public load balancer between applications.
ToolsAccountPrincipalARN string // The Principal ARN of the tools account.
}
const (
GithubProviderName = "GitHub"
GithubSecretIdKeyName = "githubPersonalAccessTokenSecretId"
)
// CreateEnvironmentResponse holds the created environment on successful deployment.
// Otherwise, the environment is set to nil and a descriptive error is returned.
type CreateEnvironmentResponse struct {
Env *archer.Environment
Err error
}
// CreatePipelineInput represents the fields required to deploy a pipeline.
type CreatePipelineInput struct {
// Name of the project this pipeline belongs to
ProjectName string
// Name of the pipeline
Name string
// The source code provider for this pipeline
Source *Source
// The stages of the pipeline. The order of stages in this list
// will be the order we deploy to
Stages []PipelineStage
// A list of artifact buckets and corresponding KMS keys that will
// be used in this pipeline.
ArtifactBuckets []ArtifactBucket
}
// ArtifactBucket represents an S3 bucket used by the CodePipeline to store
// intermediate artifacts produced by the pipeline.
type ArtifactBucket struct {
// The ARN of the S3 bucket.
BucketArn string
// The ARN of the KMS key used to en/decrypt artifacts stored in this bucket.
KeyArn string
}
// Region parses out the region from the ARN of the KMS key associated with
// the artifact bucket.
func (a *ArtifactBucket) Region() (string, error) {
// We assume the bucket and the key are in the same AWS region.
parsedArn, err := arn.Parse(a.KeyArn)
if err != nil {
return "", fmt.Errorf("failed to parse region out of key ARN: %s, error: %w",
a.BucketArn, err)
}
return parsedArn.Region, nil
}
// BucketName parses out the name of the bucket from its ARN.
func (a *ArtifactBucket) BucketName() (string, error) {
parsedArn, err := arn.Parse(a.BucketArn)
if err != nil {
return "", fmt.Errorf("failed to parse the name of the bucket out of bucket ARN: %s, error: %w",
a.BucketArn, err)
}
return parsedArn.Resource, nil
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
// The name of the source code provider. For example, "GitHub"
ProviderName string
// Contains provider-specific configurations, such as:
// "repository": "aws/amazon-ecs-cli-v2"
// "githubPersonalAccessTokenSecretId": "heyyo"
Properties map[string]interface{}
}
// GitHubPersonalAccessTokenSecretID returns the ID of the secret in the Secrets manager,
// which stores the GitHub OAuth token if the provider is "GitHub". Otherwise,
// it returns an error.
func (s *Source) GitHubPersonalAccessTokenSecretID() (string, error) {
secretID, exists := s.Properties[GithubSecretIdKeyName]
if !exists {
return "", errors.New("the GitHub token secretID is not configured")
}
id, ok := secretID.(string)
if !ok {
return "", fmt.Errorf("unable to locate the GitHub token secretID from %v", secretID)
}
if s.ProviderName != GithubProviderName {
return "", fmt.Errorf("failed attempt to retrieve GitHub token from a non-GitHub provider")
}
return id, nil
}
type ownerAndRepo struct {
owner string
repo string
}
func (s *Source) parseOwnerAndRepo() (*ownerAndRepo, error) {
if s.ProviderName != GithubProviderName {
return nil, fmt.Errorf("invalid provider: %s", s.ProviderName)
}
ownerAndRepoI, exists := s.Properties["repository"]
if !exists {
return nil, fmt.Errorf("unable to locate the repository from the properties: %+v", s.Properties)
}
ownerAndRepoStr, ok := ownerAndRepoI.(string)
if !ok {
return nil, fmt.Errorf("unable to locate the repository from the properties: %+v", ownerAndRepoI)
}
result := strings.Split(ownerAndRepoStr, "/")
if len(result) != 2 {
return nil, fmt.Errorf("unable to locate the repository from the properties: %s", ownerAndRepoStr)
}
return &ownerAndRepo{
owner: result[0],
repo: result[1],
}, nil
}
// Repository returns the repository portion. For example,
// given "aws/amazon-ecs-cli-v2", this function returns "amazon-ecs-cli-v2"
func (s *Source) Repository() (string, error) {
oAndR, err := s.parseOwnerAndRepo()
if err != nil {
return "", err
}
return oAndR.repo, nil
}
// Owner returns the repository owner portion. For example,
// given "aws/amazon-ecs-cli-v2", this function returns "aws"
func (s *Source) Owner() (string, error) {
oAndR, err := s.parseOwnerAndRepo()
if err != nil {
return "", err
}
return oAndR.repo, nil
}
// PipelineStage represents configuration for each deployment stage
// of a workspace. A stage consists of the Archer Environment the pipeline
// is deloying to and the containerized applications that will be deployed.
type PipelineStage struct {
*AssociatedEnvironment
LocalApplications []string
}
// AssociatedEnvironment defines the necessary information a pipline stage
// needs for an Archer Environment.
type AssociatedEnvironment struct {
// Name of the environment, must be unique within a project.
// This is also the name of the pipeline stage.
Name string
// The region this environment is stored in.
Region string
// AccountID of the account this environment is stored in.
AccountID string
// Whether or not this environment is a production environment.
Prod bool
}
// CreateLBFargateAppInput holds the fields required to deploy a load-balanced AWS Fargate application.
type CreateLBFargateAppInput struct {
App *manifest.LBFargateManifest
Env *archer.Environment
ImageTag string
}
// Resource represents an AWS resource.
type Resource struct {
LogicalName string
Type string
}
// ResourceEvent represents a status update for an AWS resource during a deployment.
type ResourceEvent struct {
Resource
Status string
StatusReason string
}
| 1 | 10,987 | Is there a test that'd have caught this? | aws-copilot-cli | go |
@@ -419,7 +419,7 @@ func (s *VisibilityPersistenceSuite) TestFilteringByType() {
// List open with filtering
resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutionsByType(&visibility.ListWorkflowExecutionsByTypeRequest{
- ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
+ ListWorkflowExecutionsRequest: &visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package persistencetests
import (
"time"
"github.com/pborman/uuid"
"github.com/stretchr/testify/require"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
workflowpb "go.temporal.io/api/workflow/v1"
"go.temporal.io/server/common/persistence/cassandra"
"go.temporal.io/server/common/persistence/visibility"
"go.temporal.io/server/common/payload"
p "go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/searchattribute"
)
type (
// VisibilityPersistenceSuite tests visibility persistence
VisibilityPersistenceSuite struct {
TestBase
// override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test,
// not merely log an error
*require.Assertions
}
)
// SetupSuite implementation
func (s *VisibilityPersistenceSuite) SetupSuite() {
}
// SetupTest implementation
func (s *VisibilityPersistenceSuite) SetupTest() {
// Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil
s.Assertions = require.New(s.T())
}
// TearDownSuite implementation
func (s *VisibilityPersistenceSuite) TearDownSuite() {
s.TearDownWorkflowStore()
}
// TestBasicVisibility test
func (s *VisibilityPersistenceSuite) TestBasicVisibility() {
testNamespaceUUID := uuid.New()
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: "visibility-workflow-test",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
startTime := time.Now().UTC().Add(time.Second * -5)
startReq := &visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq)
s.Nil(err0)
resp, err1 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.Nil(err1)
s.Equal(1, len(resp.Executions))
s.assertOpenExecutionEquals(startReq, resp.Executions[0])
closeReq := &visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: time.Now(),
HistoryLength: 5,
}
err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq)
s.Nil(err2)
resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.Nil(err3)
s.Equal(0, len(resp.Executions))
resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
})
s.Nil(err4)
s.Equal(1, len(resp.Executions))
s.assertClosedExecutionEquals(closeReq, resp.Executions[0])
}
// TestBasicVisibilityTimeSkew test
func (s *VisibilityPersistenceSuite) TestBasicVisibilityTimeSkew() {
testNamespaceUUID := uuid.New()
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: "visibility-workflow-test-time-skew",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
startTime := time.Now().UTC()
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.NoError(err0)
resp, err1 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.NoError(err1)
s.Equal(1, len(resp.Executions))
s.Equal(workflowExecution.WorkflowId, resp.Executions[0].Execution.WorkflowId)
err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: startTime.Add(-10 * time.Millisecond),
})
s.NoError(err2)
resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.NoError(err3)
s.Equal(0, len(resp.Executions))
resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime.Add(-10 * time.Millisecond), // This is actually close_time
LatestStartTime: startTime.Add(-10 * time.Millisecond),
})
s.NoError(err4)
s.Equal(1, len(resp.Executions))
}
func (s *VisibilityPersistenceSuite) TestBasicVisibilityShortWorkflow() {
testNamespaceUUID := uuid.New()
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: "visibility-workflow-test-short-workflow",
RunId: "3c095198-0c33-4136-939a-c29fbbb6a80b",
}
startTime := time.Now().UTC()
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.NoError(err0)
err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: startTime.Add(10 * time.Millisecond),
})
s.NoError(err2)
resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.NoError(err3)
s.Equal(0, len(resp.Executions))
resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime.Add(10 * time.Millisecond), // This is actually close_time
LatestStartTime: startTime.Add(10 * time.Millisecond),
})
s.NoError(err4)
s.Equal(1, len(resp.Executions))
}
func (s *VisibilityPersistenceSuite) TestVisibilityRetention() {
if _, ok := s.VisibilityTestCluster.(*cassandra.TestCluster); !ok {
return
}
testNamespaceUUID := uuid.New()
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: "visibility-workflow-test-visibility-retention",
RunId: "3c095198-0c33-4136-939a-c29fbbb6a802",
}
startTime := time.Now().UTC().Add(-1 * time.Hour)
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.NoError(err0)
retention := 1 * time.Second
err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: startTime.Add(1 * time.Minute),
Retention: &retention,
})
s.NoError(err2)
resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime,
LatestStartTime: startTime,
})
s.NoError(err3)
s.Equal(0, len(resp.Executions))
resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime.Add(1 * time.Minute), // This is actually close_time
LatestStartTime: startTime.Add(1 * time.Minute),
})
s.NoError(err4)
s.Equal(1, len(resp.Executions))
// Sleep for retention to fire.
time.Sleep(retention)
resp2, err5 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime.Add(1 * time.Minute), // This is actually close_time
LatestStartTime: startTime.Add(1 * time.Minute),
})
s.NoError(err5)
s.Equal(0, len(resp2.Executions))
}
// TestVisibilityPagination test
func (s *VisibilityPersistenceSuite) TestVisibilityPagination() {
testNamespaceUUID := uuid.New()
// Create 2 executions
startTime1 := time.Now().UTC()
workflowExecution1 := commonpb.WorkflowExecution{
WorkflowId: "visibility-pagination-test1",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
startReq1 := &visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime1,
},
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq1)
s.Nil(err0)
startTime2 := startTime1.Add(time.Second)
workflowExecution2 := commonpb.WorkflowExecution{
WorkflowId: "visibility-pagination-test2",
RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52",
}
startReq2 := &visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime2,
},
}
err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(startReq2)
s.Nil(err1)
// Get the first one
resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime1,
LatestStartTime: startTime2,
})
s.Nil(err2)
s.Equal(1, len(resp.Executions))
s.assertOpenExecutionEquals(startReq2, resp.Executions[0])
// Use token to get the second one
resp, err3 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime1,
LatestStartTime: startTime2,
NextPageToken: resp.NextPageToken,
})
s.Nil(err3)
s.Equal(1, len(resp.Executions))
s.assertOpenExecutionEquals(startReq1, resp.Executions[0])
// It is possible to not return non empty token which is going to return empty result
if len(resp.NextPageToken) != 0 {
// Now should get empty result by using token
resp, err4 := s.VisibilityMgr.ListOpenWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 1,
EarliestStartTime: startTime1,
LatestStartTime: startTime2,
NextPageToken: resp.NextPageToken,
})
s.Nil(err4)
s.Equal(0, len(resp.Executions))
}
}
// TestFilteringByType test
func (s *VisibilityPersistenceSuite) TestFilteringByType() {
testNamespaceUUID := uuid.New()
startTime := time.Now()
// Create 2 executions
workflowExecution1 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test1",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow-1",
StartTime: startTime,
},
})
s.Nil(err0)
workflowExecution2 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test2",
RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52",
}
err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow-2",
StartTime: startTime,
},
})
s.Nil(err1)
// List open with filtering
resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutionsByType(&visibility.ListWorkflowExecutionsByTypeRequest{
ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime,
LatestStartTime: startTime,
},
WorkflowTypeName: "visibility-workflow-1",
})
s.Nil(err2)
s.Equal(1, len(resp.Executions))
s.Equal(workflowExecution1.WorkflowId, resp.Executions[0].Execution.WorkflowId)
// Close both executions
err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow-1",
StartTime: startTime,
},
CloseTime: time.Now(),
})
s.Nil(err3)
closeReq := &visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow-2",
StartTime: startTime,
},
CloseTime: time.Now(),
HistoryLength: 3,
}
err4 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq)
s.Nil(err4)
// List closed with filtering
resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutionsByType(&visibility.ListWorkflowExecutionsByTypeRequest{
ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
},
WorkflowTypeName: "visibility-workflow-2",
})
s.Nil(err5)
s.Equal(1, len(resp.Executions))
s.assertClosedExecutionEquals(closeReq, resp.Executions[0])
}
// TestFilteringByWorkflowID test
func (s *VisibilityPersistenceSuite) TestFilteringByWorkflowID() {
testNamespaceUUID := uuid.New()
startTime := time.Now()
// Create 2 executions
workflowExecution1 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test1",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.Nil(err0)
workflowExecution2 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test2",
RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52",
}
err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.Nil(err1)
// List open with filtering
resp, err2 := s.VisibilityMgr.ListOpenWorkflowExecutionsByWorkflowID(&visibility.ListWorkflowExecutionsByWorkflowIDRequest{
ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime,
LatestStartTime: startTime,
},
WorkflowID: "visibility-filtering-test1",
})
s.Nil(err2)
s.Equal(1, len(resp.Executions))
s.Equal(workflowExecution1.WorkflowId, resp.Executions[0].Execution.WorkflowId)
// Close both executions
err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: time.Now(),
})
s.Nil(err3)
closeReq := &visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
CloseTime: time.Now(),
HistoryLength: 3,
}
err4 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq)
s.Nil(err4)
// List closed with filtering
resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutionsByWorkflowID(&visibility.ListWorkflowExecutionsByWorkflowIDRequest{
ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
},
WorkflowID: "visibility-filtering-test2",
})
s.Nil(err5)
s.Equal(1, len(resp.Executions))
s.assertClosedExecutionEquals(closeReq, resp.Executions[0])
}
// TestFilteringByStatus test
func (s *VisibilityPersistenceSuite) TestFilteringByStatus() {
testNamespaceUUID := uuid.New()
startTime := time.Now()
// Create 2 executions
workflowExecution1 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test1",
RunId: "fb15e4b5-356f-466d-8c6d-a29223e5c536",
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.Nil(err0)
workflowExecution2 := commonpb.WorkflowExecution{
WorkflowId: "visibility-filtering-test2",
RunId: "843f6fc7-102a-4c63-a2d4-7c653b01bf52",
}
err1 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.Nil(err1)
// Close both executions with different status
err2 := s.VisibilityMgr.RecordWorkflowExecutionClosed(&visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution1,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED,
},
CloseTime: time.Now(),
})
s.Nil(err2)
closeReq := &visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution2,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED,
},
CloseTime: time.Now(),
HistoryLength: 3,
}
err3 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq)
s.Nil(err3)
// List closed with filtering
resp, err4 := s.VisibilityMgr.ListClosedWorkflowExecutionsByStatus(&visibility.ListClosedWorkflowExecutionsByStatusRequest{
ListWorkflowExecutionsRequest: visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
PageSize: 2,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
},
Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED,
})
s.Nil(err4)
s.Equal(1, len(resp.Executions))
s.assertClosedExecutionEquals(closeReq, resp.Executions[0])
}
// TestDelete test
func (s *VisibilityPersistenceSuite) TestDelete() {
if s.VisibilityMgr.GetName() == "cassandra" {
s.T().Skip("this test is not applicable for cassandra")
}
nRows := 5
testNamespaceUUID := uuid.New()
startTime := time.Now().UTC().Add(time.Second * -5)
for i := 0; i < nRows; i++ {
workflowExecution := commonpb.WorkflowExecution{
WorkflowId: uuid.New(),
RunId: uuid.New(),
}
err0 := s.VisibilityMgr.RecordWorkflowExecutionStarted(&visibility.RecordWorkflowExecutionStartedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
},
})
s.Nil(err0)
closeReq := &visibility.RecordWorkflowExecutionClosedRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: testNamespaceUUID,
Execution: workflowExecution,
WorkflowTypeName: "visibility-workflow",
StartTime: startTime,
Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED,
},
CloseTime: time.Now(),
HistoryLength: 3,
}
err1 := s.VisibilityMgr.RecordWorkflowExecutionClosed(closeReq)
s.Nil(err1)
}
resp, err3 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
PageSize: 10,
})
s.Nil(err3)
s.Equal(nRows, len(resp.Executions))
remaining := nRows
for _, row := range resp.Executions {
err4 := s.VisibilityMgr.DeleteWorkflowExecution(&visibility.VisibilityDeleteWorkflowExecutionRequest{
NamespaceID: testNamespaceUUID,
RunID: row.GetExecution().GetRunId(),
})
s.Nil(err4)
remaining--
resp, err5 := s.VisibilityMgr.ListClosedWorkflowExecutions(&visibility.ListWorkflowExecutionsRequest{
NamespaceID: testNamespaceUUID,
EarliestStartTime: startTime,
LatestStartTime: time.Now(),
PageSize: 10,
})
s.Nil(err5)
s.Equal(remaining, len(resp.Executions))
}
}
// TestUpsertWorkflowExecution test
func (s *VisibilityPersistenceSuite) TestUpsertWorkflowExecution() {
tests := []struct {
request *visibility.UpsertWorkflowExecutionRequest
expected error
}{
{
request: &visibility.UpsertWorkflowExecutionRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: "",
Namespace: "",
Execution: commonpb.WorkflowExecution{},
WorkflowTypeName: "",
StartTime: time.Time{},
ExecutionTime: time.Time{},
TaskID: 0,
Memo: nil,
SearchAttributes: &commonpb.SearchAttributes{
IndexedFields: map[string]*commonpb.Payload{
searchattribute.TemporalChangeVersion: payload.EncodeBytes([]byte("dummy")),
},
},
Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING,
},
},
expected: nil,
},
{
request: &visibility.UpsertWorkflowExecutionRequest{
VisibilityRequestBase: &visibility.VisibilityRequestBase{
NamespaceID: "",
Namespace: "",
Execution: commonpb.WorkflowExecution{},
WorkflowTypeName: "",
StartTime: time.Time{},
ExecutionTime: time.Time{},
TaskID: 0,
Memo: nil,
SearchAttributes: nil,
Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING,
},
},
// To avoid blocking the task queue processors on non-ElasticSearch visibility stores
// we simply treat any attempts to perform Upserts as "no-ops"
// Attempts to Scan, Count or List will still fail for non-ES stores.
expected: nil,
},
}
for _, test := range tests {
s.Equal(test.expected, s.VisibilityMgr.UpsertWorkflowExecution(test.request))
}
}
func (s *VisibilityPersistenceSuite) assertClosedExecutionEquals(
req *visibility.RecordWorkflowExecutionClosedRequest, resp *workflowpb.WorkflowExecutionInfo) {
s.Equal(req.Execution.RunId, resp.Execution.RunId)
s.Equal(req.Execution.WorkflowId, resp.Execution.WorkflowId)
s.Equal(req.WorkflowTypeName, resp.GetType().GetName())
s.Equal(p.UnixMilliseconds(req.StartTime), p.UnixMilliseconds(timestamp.TimeValue(resp.GetStartTime())))
s.Equal(p.UnixMilliseconds(req.CloseTime), p.UnixMilliseconds(timestamp.TimeValue(resp.GetCloseTime())))
s.Equal(req.Status, resp.GetStatus())
s.Equal(req.HistoryLength, resp.HistoryLength)
}
func (s *VisibilityPersistenceSuite) assertOpenExecutionEquals(
req *visibility.RecordWorkflowExecutionStartedRequest, resp *workflowpb.WorkflowExecutionInfo) {
s.Equal(req.Execution.GetRunId(), resp.Execution.GetRunId())
s.Equal(req.Execution.WorkflowId, resp.Execution.WorkflowId)
s.Equal(req.WorkflowTypeName, resp.GetType().GetName())
s.Equal(p.UnixMilliseconds(req.StartTime), p.UnixMilliseconds(timestamp.TimeValue(resp.GetStartTime())))
s.Nil(resp.CloseTime)
s.Equal(resp.Status, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING)
s.Zero(resp.HistoryLength)
}
| 1 | 12,464 | ListWorkflowExecutionsRequest <- this can be nil? | temporalio-temporal | go |
@@ -809,10 +809,14 @@ module Beaker
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
+ # forge api v1 canonical source is forge.puppetlabs.com
+ # forge api v3 canonical source is forgeapi.puppetlabs.com
+ #
# @param machine [String] the host to perform the stub on
def stub_forge_on(machine)
@forge_ip ||= Resolv.getaddress(forge)
stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip)
+ stub_hosts_on(machine, 'forgeapi.puppetlabs.com' => @forge_ip)
end
# This wraps the method `stub_hosts` and makes the stub specific to | 1 | require 'resolv'
require 'inifile'
require 'timeout'
require 'beaker/dsl/outcomes'
module Beaker
module DSL
# This is the heart of the Puppet Acceptance DSL. Here you find a helper
# to proxy commands to hosts, more commands to move files between hosts
# and execute remote scripts, confine test cases to certain hosts and
# prepare the state of a test case.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *logger* that yields a logger implementing
# {Beaker::Logger}'s interface.
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
#
#
# @api dsl
module Helpers
# @!macro common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# The primary method for executing commands *on* some set of hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# on hosts, 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if on(host, 'ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# on agent, 'cat /etc/puppet/puppet.conf' do
# assert_match stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @example Using a role (defined in a String) to identify the host
# on "master", "echo hello"
#
# @example Using a role (defined in a Symbol) to identify the host
# on :dashboard, "echo hello"
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def on(host, command, opts = {}, &block)
unless command.is_a? Command
cmd_opts = opts[:environment] ? { 'ENV' => opts.delete(:environment) } : Hash.new
command = Command.new(command.to_s, [], cmd_opts)
end
if host.is_a? String or host.is_a? Symbol
host = hosts_as(host) #check by role
end
if host.is_a? Array
host.map { |h| on h, command, opts, &block }
else
@result = host.exec(command, opts)
# Also, let additional checking be performed by the caller.
yield self if block_given?
return @result
end
end
# The method for executing commands on the default host
#
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# shell 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# shell 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if shell('ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# shell('cat /etc/puppet/puppet.conf') do |result|
# assert_match result.stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def shell(command, opts = {}, &block)
on(default, command, opts, &block)
end
# @deprecated
# An proxy for the last {Beaker::Result#stdout} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stdout
return nil if @result.nil?
@result.stdout
end
# @deprecated
# An proxy for the last {Beaker::Result#stderr} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stderr
return nil if @result.nil?
@result.stderr
end
# @deprecated
# An proxy for the last {Beaker::Result#exit_code} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def exit_code
return nil if @result.nil?
@result.exit_code
end
# Move a file from a remote to a local path
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec).
#
# @param [Host, #do_scp_from] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] from_path A remote path to a file.
# @param [String] to_path A local path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_from host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_from h, from_path, to_path, opts }
else
@result = host.do_scp_from(from_path, to_path, opts)
@result.log logger
end
end
# Move a local file to a remote host
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec.
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_to}.
# @param [String] from_path A local path to a file.
# @param [String] to_path A remote path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_to host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_to h, from_path, to_path, opts }
else
@result = host.do_scp_to(from_path, to_path, opts)
@result.log logger
end
end
# Check to see if a package is installed on a remote host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to check for.
#
# @return [Boolean] true/false if the package is found
def check_for_package host, package_name
host.check_for_package package_name
end
# Install a package on a host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *install command*.
def install_package host, package_name
host.install_package package_name
end
# Upgrade a package on a host. The package must already be installed
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *upgrade command*.
def upgrade_package host, package_name
host.upgrade_package package_name
end
# Deploy packaging configurations generated by
# https://github.com/puppetlabs/packaging to a host.
#
# @note To ensure the repo configs are available for deployment,
# you should run `rake pl:jenkins:deb_repo_configs` and
# `rake pl:jenkins:rpm_repo_configs` on your project checkout
#
# @param [Host] host
# @param [String] path The path to the generated repository config
# files. ex: /myproject/pkg/repo_configs
# @param [String] name A human-readable name for the repository
# @param [String[ version The version of the project, as used by the
# packaging tools. This can be determined with
# `rake pl:print_build_params` from the packaging
# repo.
def deploy_package_repo host, path, name, version
host.deploy_package_repo path, name, version
end
# Create a remote file out of a string
# @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}.
#
# @param [Host, #do_scp_to] hosts One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] file_path A remote path to place *file_content* at.
# @param [String] file_content The contents of the file to be placed.
# @!macro common_opts
#
# @return [Result] Returns the result of the underlying SCP operation.
def create_remote_file(hosts, file_path, file_content, opts = {})
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
scp_to hosts, tempfile.path, file_path, opts
end
end
# Move a local script to a remote host and execute it
# @note this relies on {#on} and {#scp_to}
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] script A local path to find an executable script at.
# @!macro common_opts
# @param [Proc] block Additional tests to run after script has executed
#
# @return [Result] Returns the result of the underlying SCP operation.
def run_script_on(host, script, opts = {}, &block)
# this is unsafe as it uses the File::SEPARATOR will be set to that
# of the coordinator node. This works for us because we use cygwin
# which will properly convert the paths. Otherwise this would not
# work for running tests on a windows machine when the coordinator
# that the harness is running on is *nix. We should use
# {Beaker::Host#temp_path} instead. TODO
remote_path = File.join("", "tmp", File.basename(script))
scp_to host, script, remote_path
on host, remote_path, opts, &block
end
# Move a local script to default host and execute it
# @see #run_script_on
def run_script(script, opts = {}, &block)
run_script_on(default, script, opts, &block)
end
# Limit the hosts a test case is run against
# @note This will modify the {Beaker::TestCase#hosts} member
# in place unless an array of hosts is passed into it and
# {Beaker::TestCase#logger} yielding an object that responds
# like {Beaker::Logger#warn}, as well as
# {Beaker::DSL::Outcomes#skip_test}, and optionally
# {Beaker::TestCase#hosts}.
#
# @param [Symbol] type The type of confinement to do. Valid parameters
# are *:to* to confine the hosts to only those that
# match *criteria* or *:except* to confine the test
# case to only those hosts that do not match
# criteria.
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion or exclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for confinement. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @example Basic usage to confine to debian OSes.
# confine :to, :platform => 'debian'
#
# @example Confining to anything but Windows and Solaris
# confine :except, :platform => ['windows', 'solaris']
#
# @example Using additional block to confine to Solaris global zone.
# confine :to, :platform => 'solaris' do |solaris|
# on( solaris, 'zonename' ) =~ /global/
# end
#
# @return [Array<Host>] Returns an array of hosts that are still valid
# targets for this tests case.
# @raise [SkipTest] Raises skip test if there are no valid hosts for
# this test case after confinement.
def confine(type, criteria, host_array = nil, &block)
provided_hosts = host_array ? true : false
hosts_to_modify = host_array || hosts
criteria.each_pair do |property, value|
case type
when :except
hosts_to_modify = hosts_to_modify.reject do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.reject do |host|
yield host
end
end
when :to
hosts_to_modify = hosts_to_modify.select do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.select do |host|
yield host
end
end
else
raise "Unknown option #{type}"
end
end
if hosts_to_modify.empty?
logger.warn "No suitable hosts with: #{criteria.inspect}"
skip_test 'No suitable hosts found'
end
self.hosts = hosts_to_modify
hosts_to_modify
end
# Ensures that host restrictions as specifid by type, criteria and
# host_array are confined to activity within the passed block.
# TestCase#hosts is reset after block has executed.
#
# @see #confine
def confine_block(type, criteria, host_array = nil, &block)
begin
original_hosts = self.hosts.dup
confine(type, criteria, host_array)
yield
ensure
self.hosts = original_hosts
end
end
# @!visibility private
def inspect_host(host, property, one_or_more_values)
values = Array(one_or_more_values)
return values.any? do |value|
true_false = false
case value
when String
true_false = host[property.to_s].include? value
when Regexp
true_false = host[property.to_s] =~ value
end
true_false
end
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
#
# There is a special setting for command_line
# arguments such as --debug or --logdest, which
# cannot be set in puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
#
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
# @api dsl
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts.delete(:__commandline_args__)
begin
backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
restore_puppet_conf_from_backup( host, backup_file )
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
end
rescue Exception => teardown_exception
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @api dsl
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppetpath = host['puppetpath']
puppet_conf = File.join(puppetpath, "puppet.conf")
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# Back up the given file in the current_dir to the new_dir
#
# @!visibility private
#
# @param host [Beaker::Host] The target host
# @param current_dir [String] The directory containing the file to back up
# @param new_dir [String] The directory to copy the file to
# @param filename [String] The file to back up. Defaults to 'puppet.conf'
#
# @return [String, nil] The path to the file if the file exists, nil if it
# doesn't exist.
def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf'
old_location = current_dir + '/' + filename
new_location = new_dir + '/' + filename + '.bak'
if host.file_exist? old_location
host.exec( Command.new( "cp #{old_location} #{new_location}" ) )
return new_location
else
logger.warn "Could not backup file '#{old_location}': no such file"
nil
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el/ then '/var/log/messages'
when /ubuntu|debian/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s
host.exec(
Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ),
:silent => true
)
host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# @!visibility private
def bounce_service host, service
# Any reason to not
# host.exec puppet_resource( 'service', service, 'ensure=stopped' )
# host.exec puppet_resource( 'service', service, 'ensure=running' )
host.exec( Command.new( "/etc/init.d/#{service} restart" ) )
end
# Blocks until the port is open on the host specified, returns false
# on failure
def port_open_within?( host, port = 8140, seconds = 120 )
repeat_for( seconds ) do
host.port_open?( port )
end
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @options opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
def apply_manifest_on(host, manifest, opts = {}, &block)
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts.delete(:acceptable_exit_codes))
args = ["--verbose"]
args << "--parseonly" if opts[:parseonly]
args << "--trace" if opts[:trace]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures]].select{|x|x}.length > 1
raise(ArgumentError, "Cannot specify more than one of `catch_failures`, `catch_changes`, or `expect_failures` for a single manifest")
end
if opts[:catch_changes]
args << '--detailed-exitcodes'
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
args << '--detailed-exitcodes'
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
args << '--detailed-exitcodes'
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
args << { :environment => opts[:environment]}
end
file_path = "/tmp/apply_manifest.#{rand(1000000000).to_s}.pp"
create_remote_file(host, file_path, manifest + "\n")
args << file_path
on host, puppet( 'apply', *args), on_options, &block
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
if host.is_a? Array
host.each { |h| run_agent_on h, arg, options, &block }
else
on host, puppet_agent(arg), options, &block
end
end
# FIX: this should be moved into host/platform
# @visibility private
def run_cron_on(host, action, user, entry="", &block)
platform = host['platform']
if platform.include?('solaris') || platform.include?('aix') then
case action
when :list then args = '-l'
when :remove then args = '-r'
when :add
on( host,
"echo '#{entry}' > /var/spool/cron/crontabs/#{user}",
&block )
end
else # default for GNU/Linux platforms
case action
when :list then args = '-l -u'
when :remove then args = '-r -u'
when :add
on( host,
"echo '#{entry}' > /tmp/#{user}.cron && " +
"crontab -u #{user} /tmp/#{user}.cron",
&block )
end
end
if args
case action
when :list, :remove then on(host, "crontab #{args} #{user}", &block)
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param machine [String] the host to execute this stub
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
ip_spec.each do |host, ip|
logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |host, ip|
logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# @param machine [String] the host to perform the stub on
def stub_forge_on(machine)
@forge_ip ||= Resolv.getaddress(forge)
stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip)
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge
stub_forge_on(default)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
retry_command(desc, host, "curl #{url}", desired_exit_codes, max_retries, retry_interval)
end
def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1)
desired_exit_codes = [desired_exit_codes].flatten
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries = 0
until desired_exit_codes.include?(result.exit_code)
sleep retry_interval
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries += 1
if (num_retries > max_retries)
fail("Unable to #{desc}")
end
end
end
#stops the puppet agent running on the host
def stop_agent_on(agent)
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1]
agent_running = (result.exit_code == 0)
sleep 2 unless agent_running
end
if agent['platform'].include?('solaris')
on(agent, '/usr/sbin/svcadm disable -s svc:/network/pe-puppet:default')
elsif agent['platform'].include?('aix')
on(agent, '/usr/bin/stopsrc -s pe-puppet')
elsif agent['platform'].include?('windows')
on(agent, 'net stop pe-puppet', :acceptable_exit_codes => [0,2])
else
# For the sake of not passing the PE version into this method,
# we just query the system to find out which service we want to
# stop
result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1]
service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet'
on(agent, "/etc/init.d/#{service} stop")
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host] host The host to sign for
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Get a facter fact from a provided host
#
# @param [Host] host The host to query the fact for
# @param [String] name The name of the fact to query for
# @!macro common_opts
#
# @return String The value of the fact 'name' on the provided host
# @raise [FailTest] Raises an exception if call to facter fails
def fact_on(host, name, opts = {})
result = on host, facter(name, opts)
result.stdout.chomp if result.stdout
end
# Get a facter fact from the default host
# @see #fact_on
def fact(name, opts = {})
fact_on(default, name, opts)
end
end
end
end
| 1 | 5,026 | Do we need to continue to support the old link, or is it dead dead dead? | voxpupuli-beaker | rb |
@@ -108,6 +108,14 @@ namespace Datadog.Trace.Configuration
GlobalTags = GlobalTags.Where(kvp => !string.IsNullOrEmpty(kvp.Key) && !string.IsNullOrEmpty(kvp.Value))
.ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim());
+ HeaderTags = source?.GetDictionary(ConfigurationKeys.HeaderTags) ??
+ // default value (empty)
+ new ConcurrentDictionary<string, string>();
+
+ // Filter out tags with empty keys or empty values, and trim whitespace
+ HeaderTags = HeaderTags.Where(kvp => !string.IsNullOrEmpty(kvp.Key) && !string.IsNullOrEmpty(kvp.Value))
+ .ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim());
+
DogStatsdPort = source?.GetInt32(ConfigurationKeys.DogStatsdPort) ??
// default value
8125; | 1 | using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using Datadog.Trace.Sampling;
using Datadog.Trace.Util;
namespace Datadog.Trace.Configuration
{
/// <summary>
/// Contains Tracer settings.
/// </summary>
public class TracerSettings
{
/// <summary>
/// The default host value for <see cref="AgentUri"/>.
/// </summary>
public const string DefaultAgentHost = "localhost";
/// <summary>
/// The default port value for <see cref="AgentUri"/>.
/// </summary>
public const int DefaultAgentPort = 8126;
/// <summary>
/// Initializes a new instance of the <see cref="TracerSettings"/> class with default values.
/// </summary>
public TracerSettings()
: this(null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="TracerSettings"/> class
/// using the specified <see cref="IConfigurationSource"/> to initialize values.
/// </summary>
/// <param name="source">The <see cref="IConfigurationSource"/> to use when retrieving configuration values.</param>
public TracerSettings(IConfigurationSource source)
{
Environment = source?.GetString(ConfigurationKeys.Environment);
ServiceName = source?.GetString(ConfigurationKeys.ServiceName) ??
// backwards compatibility for names used in the past
source?.GetString("DD_SERVICE_NAME");
ServiceVersion = source?.GetString(ConfigurationKeys.ServiceVersion);
TraceEnabled = source?.GetBool(ConfigurationKeys.TraceEnabled) ??
// default value
true;
var disabledIntegrationNames = source?.GetString(ConfigurationKeys.DisabledIntegrations)
?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries) ??
Enumerable.Empty<string>();
DisabledIntegrationNames = new HashSet<string>(disabledIntegrationNames, StringComparer.OrdinalIgnoreCase);
var agentHost = source?.GetString(ConfigurationKeys.AgentHost) ??
// backwards compatibility for names used in the past
source?.GetString("DD_TRACE_AGENT_HOSTNAME") ??
source?.GetString("DATADOG_TRACE_AGENT_HOSTNAME") ??
// default value
DefaultAgentHost;
var agentPort = source?.GetInt32(ConfigurationKeys.AgentPort) ??
// backwards compatibility for names used in the past
source?.GetInt32("DATADOG_TRACE_AGENT_PORT") ??
// default value
DefaultAgentPort;
var agentUri = source?.GetString(ConfigurationKeys.AgentUri) ??
// default value
$"http://{agentHost}:{agentPort}";
AgentUri = new Uri(agentUri);
AnalyticsEnabled = source?.GetBool(ConfigurationKeys.GlobalAnalyticsEnabled) ??
// default value
false;
LogsInjectionEnabled = source?.GetBool(ConfigurationKeys.LogsInjectionEnabled) ??
// default value
false;
var maxTracesPerSecond = source?.GetInt32(ConfigurationKeys.MaxTracesSubmittedPerSecond);
if (maxTracesPerSecond != null)
{
// Ensure our flag for the rate limiter is enabled
RuleBasedSampler.OptInTracingWithoutLimits();
}
else
{
maxTracesPerSecond = 100; // default
}
MaxTracesSubmittedPerSecond = maxTracesPerSecond.Value;
Integrations = new IntegrationSettingsCollection(source);
GlobalTags = source?.GetDictionary(ConfigurationKeys.GlobalTags) ??
// backwards compatibility for names used in the past
source?.GetDictionary("DD_TRACE_GLOBAL_TAGS") ??
// default value (empty)
new ConcurrentDictionary<string, string>();
// Filter out tags with empty keys or empty values, and trim whitespace
GlobalTags = GlobalTags.Where(kvp => !string.IsNullOrEmpty(kvp.Key) && !string.IsNullOrEmpty(kvp.Value))
.ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim());
DogStatsdPort = source?.GetInt32(ConfigurationKeys.DogStatsdPort) ??
// default value
8125;
TracerMetricsEnabled = source?.GetBool(ConfigurationKeys.TracerMetricsEnabled) ??
// default value
false;
CustomSamplingRules = source?.GetString(ConfigurationKeys.CustomSamplingRules);
GlobalSamplingRate = source?.GetDouble(ConfigurationKeys.GlobalSamplingRate);
DiagnosticSourceEnabled = source?.GetBool(ConfigurationKeys.DiagnosticSourceEnabled) ??
// default value
true;
StartupDiagnosticLogEnabled = source?.GetBool(ConfigurationKeys.StartupDiagnosticLogEnabled) ??
// default value
true;
}
/// <summary>
/// Gets or sets the default environment name applied to all spans.
/// </summary>
/// <seealso cref="ConfigurationKeys.Environment"/>
public string Environment { get; set; }
/// <summary>
/// Gets or sets the service name applied to top-level spans and used to build derived service names.
/// </summary>
/// <seealso cref="ConfigurationKeys.ServiceName"/>
public string ServiceName { get; set; }
/// <summary>
/// Gets or sets the version tag applied to all spans.
/// </summary>
/// <seealso cref="ConfigurationKeys.ServiceVersion"/>
public string ServiceVersion { get; set; }
/// <summary>
/// Gets or sets a value indicating whether tracing is enabled.
/// Default is <c>true</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.TraceEnabled"/>
public bool TraceEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether debug is enabled for a tracer.
/// This property is obsolete. Manage the debug setting through GlobalSettings.
/// </summary>
/// <seealso cref="GlobalSettings.DebugEnabled"/>
[Obsolete]
public bool DebugEnabled { get; set; }
/// <summary>
/// Gets or sets the names of disabled integrations.
/// </summary>
/// <seealso cref="ConfigurationKeys.DisabledIntegrations"/>
public HashSet<string> DisabledIntegrationNames { get; set; }
/// <summary>
/// Gets or sets the Uri where the Tracer can connect to the Agent.
/// Default is <c>"http://localhost:8126"</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.AgentUri"/>
/// <seealso cref="ConfigurationKeys.AgentHost"/>
/// <seealso cref="ConfigurationKeys.AgentPort"/>
public Uri AgentUri { get; set; }
/// <summary>
/// Gets or sets a value indicating whether default Analytics are enabled.
/// Settings this value is a shortcut for setting
/// <see cref="Configuration.IntegrationSettings.AnalyticsEnabled"/> on some predetermined integrations.
/// See the documentation for more details.
/// </summary>
/// <seealso cref="ConfigurationKeys.GlobalAnalyticsEnabled"/>
public bool AnalyticsEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether correlation identifiers are
/// automatically injected into the logging context.
/// Default is <c>false</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.LogsInjectionEnabled"/>
public bool LogsInjectionEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating the maximum number of traces set to AutoKeep (p1) per second.
/// Default is <c>100</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.MaxTracesSubmittedPerSecond"/>
public int MaxTracesSubmittedPerSecond { get; set; }
/// <summary>
/// Gets or sets a value indicating custom sampling rules.
/// </summary>
/// <seealso cref="ConfigurationKeys.CustomSamplingRules"/>
public string CustomSamplingRules { get; set; }
/// <summary>
/// Gets or sets a value indicating a global rate for sampling.
/// </summary>
/// <seealso cref="ConfigurationKeys.GlobalSamplingRate"/>
public double? GlobalSamplingRate { get; set; }
/// <summary>
/// Gets a collection of <see cref="Integrations"/> keyed by integration name.
/// </summary>
public IntegrationSettingsCollection Integrations { get; }
/// <summary>
/// Gets or sets the global tags, which are applied to all <see cref="Span"/>s.
/// </summary>
public IDictionary<string, string> GlobalTags { get; set; }
/// <summary>
/// Gets or sets the port where the DogStatsd server is listening for connections.
/// Default is <c>8125</c>.
/// </summary>
/// <seealso cref="ConfigurationKeys.DogStatsdPort"/>
public int DogStatsdPort { get; set; }
/// <summary>
/// Gets or sets a value indicating whether internal metrics
/// are enabled and sent to DogStatsd.
/// </summary>
public bool TracerMetricsEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether the use
/// of <see cref="System.Diagnostics.DiagnosticSource"/> is enabled.
/// </summary>
public bool DiagnosticSourceEnabled { get; set; }
/// <summary>
/// Gets or sets a value indicating whether the diagnostic log at startup is enabled
/// </summary>
public bool StartupDiagnosticLogEnabled { get; set; }
/// <summary>
/// Create a <see cref="TracerSettings"/> populated from the default sources
/// returned by <see cref="CreateDefaultConfigurationSource"/>.
/// </summary>
/// <returns>A <see cref="TracerSettings"/> populated from the default sources.</returns>
public static TracerSettings FromDefaultSources()
{
var source = CreateDefaultConfigurationSource();
return new TracerSettings(source);
}
/// <summary>
/// Creates a <see cref="IConfigurationSource"/> by combining environment variables,
/// AppSettings where available, and a local datadog.json file, if present.
/// </summary>
/// <returns>A new <see cref="IConfigurationSource"/> instance.</returns>
public static CompositeConfigurationSource CreateDefaultConfigurationSource()
{
return GlobalSettings.CreateDefaultConfigurationSource();
}
internal bool IsIntegrationEnabled(string name)
{
if (TraceEnabled && !DomainMetadata.ShouldAvoidAppDomain())
{
bool disabled = Integrations[name].Enabled == false || DisabledIntegrationNames.Contains(name);
return !disabled;
}
return false;
}
internal bool IsOptInIntegrationEnabled(string name)
{
if (TraceEnabled && !DomainMetadata.ShouldAvoidAppDomain())
{
var disabled = Integrations[name].Enabled != true || DisabledIntegrationNames.Contains(name);
return !disabled;
}
return false;
}
internal double? GetIntegrationAnalyticsSampleRate(string name, bool enabledWithGlobalSetting)
{
var integrationSettings = Integrations[name];
var analyticsEnabled = integrationSettings.AnalyticsEnabled ?? (enabledWithGlobalSetting && AnalyticsEnabled);
return analyticsEnabled ? integrationSettings.AnalyticsSampleRate : (double?)null;
}
}
}
| 1 | 17,320 | Do we need `ConcurrentDictionary`? `Dictionary` can be safely read from multiple threads. See `GlobalTags`. | DataDog-dd-trace-dotnet | .cs |
@@ -325,10 +325,12 @@ func validateRegisteredAttributes(expectedAttributes, actualAttributes []*ecs.At
}
func (client *APIECSClient) getAdditionalAttributes() []*ecs.Attribute {
+ osFamilyStr := config.GetOSFamilyType()
+ seelog.Infof("Server OSFamily string: %s", osFamilyStr)
return []*ecs.Attribute{
{
Name: aws.String("ecs.os-type"),
- Value: aws.String(config.OSType),
+ Value: aws.String(osFamilyStr),
},
}
} | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecsclient
import (
"errors"
"fmt"
"runtime"
"strings"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/async"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/httpclient"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/cihub/seelog"
"github.com/docker/docker/pkg/system"
)
const (
ecsMaxImageDigestLength = 255
ecsMaxReasonLength = 255
ecsMaxRuntimeIDLength = 255
pollEndpointCacheSize = 1
pollEndpointCacheTTL = 20 * time.Minute
roundtripTimeout = 5 * time.Second
azAttrName = "ecs.availability-zone"
)
// APIECSClient implements ECSClient
type APIECSClient struct {
credentialProvider *credentials.Credentials
config *config.Config
standardClient api.ECSSDK
submitStateChangeClient api.ECSSubmitStateSDK
ec2metadata ec2.EC2MetadataClient
pollEndpoinCache async.Cache
}
// NewECSClient creates a new ECSClient interface object
func NewECSClient(
credentialProvider *credentials.Credentials,
config *config.Config,
ec2MetadataClient ec2.EC2MetadataClient) api.ECSClient {
var ecsConfig aws.Config
ecsConfig.Credentials = credentialProvider
ecsConfig.Region = &config.AWSRegion
ecsConfig.HTTPClient = httpclient.New(roundtripTimeout, config.AcceptInsecureCert)
if config.APIEndpoint != "" {
ecsConfig.Endpoint = &config.APIEndpoint
}
standardClient := ecs.New(session.New(&ecsConfig))
submitStateChangeClient := newSubmitStateChangeClient(&ecsConfig)
pollEndpoinCache := async.NewLRUCache(pollEndpointCacheSize, pollEndpointCacheTTL)
return &APIECSClient{
credentialProvider: credentialProvider,
config: config,
standardClient: standardClient,
submitStateChangeClient: submitStateChangeClient,
ec2metadata: ec2MetadataClient,
pollEndpoinCache: pollEndpoinCache,
}
}
// SetSDK overrides the SDK to the given one. This is useful for injecting a
// test implementation
func (client *APIECSClient) SetSDK(sdk api.ECSSDK) {
client.standardClient = sdk
}
// SetSubmitStateChangeSDK overrides the SDK to the given one. This is useful
// for injecting a test implementation
func (client *APIECSClient) SetSubmitStateChangeSDK(sdk api.ECSSubmitStateSDK) {
client.submitStateChangeClient = sdk
}
// CreateCluster creates a cluster from a given name and returns its arn
func (client *APIECSClient) CreateCluster(clusterName string) (string, error) {
resp, err := client.standardClient.CreateCluster(&ecs.CreateClusterInput{ClusterName: &clusterName})
if err != nil {
seelog.Criticalf("Could not create cluster: %v", err)
return "", err
}
seelog.Infof("Created a cluster named: %s", clusterName)
return *resp.Cluster.ClusterName, nil
}
// RegisterContainerInstance calculates the appropriate resources, creates
// the default cluster if necessary, and returns the registered
// ContainerInstanceARN if successful. Supplying a non-empty container
// instance ARN allows a container instance to update its registered
// resources.
func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn string, attributes []*ecs.Attribute,
tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice,
outpostARN string) (string, string, error) {
clusterRef := client.config.Cluster
// If our clusterRef is empty, we should try to create the default
if clusterRef == "" {
clusterRef = config.DefaultClusterName
defer func() {
// Update the config value to reflect the cluster we end up in
client.config.Cluster = clusterRef
}()
// Attempt to register without checking existence of the cluster so we don't require
// excess permissions in the case where the cluster already exists and is active
containerInstanceArn, availabilityzone, err := client.registerContainerInstance(clusterRef,
containerInstanceArn, attributes, tags, registrationToken, platformDevices, outpostARN)
if err == nil {
return containerInstanceArn, availabilityzone, nil
}
// If trying to register fails because the default cluster doesn't exist, try to create the cluster before calling
// register again
if apierrors.IsClusterNotFoundError(err) {
clusterRef, err = client.CreateCluster(clusterRef)
if err != nil {
return "", "", err
}
}
}
return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken,
platformDevices, outpostARN)
}
func (client *APIECSClient) registerContainerInstance(clusterRef string, containerInstanceArn string,
attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string,
platformDevices []*ecs.PlatformDevice, outpostARN string) (string, string, error) {
registerRequest := ecs.RegisterContainerInstanceInput{Cluster: &clusterRef}
var registrationAttributes []*ecs.Attribute
if containerInstanceArn != "" {
// We are re-connecting a previously registered instance, restored from snapshot.
registerRequest.ContainerInstanceArn = &containerInstanceArn
} else {
// This is a new instance, not previously registered.
// Custom attribute registration only happens on initial instance registration.
for _, attribute := range client.getCustomAttributes() {
seelog.Debugf("Added a new custom attribute %v=%v",
aws.StringValue(attribute.Name),
aws.StringValue(attribute.Value),
)
registrationAttributes = append(registrationAttributes, attribute)
}
}
// Standard attributes are included with all registrations.
registrationAttributes = append(registrationAttributes, attributes...)
// Add additional attributes such as the os type
registrationAttributes = append(registrationAttributes, client.getAdditionalAttributes()...)
registrationAttributes = append(registrationAttributes, client.getOutpostAttribute(outpostARN)...)
registerRequest.Attributes = registrationAttributes
if len(tags) > 0 {
registerRequest.Tags = tags
}
registerRequest.PlatformDevices = platformDevices
registerRequest = client.setInstanceIdentity(registerRequest)
resources, err := client.getResources()
if err != nil {
return "", "", err
}
registerRequest.TotalResources = resources
registerRequest.ClientToken = ®istrationToken
resp, err := client.standardClient.RegisterContainerInstance(®isterRequest)
if err != nil {
seelog.Errorf("Unable to register as a container instance with ECS: %v", err)
return "", "", err
}
var availabilityzone = ""
if resp != nil {
for _, attr := range resp.ContainerInstance.Attributes {
if aws.StringValue(attr.Name) == azAttrName {
availabilityzone = aws.StringValue(attr.Value)
break
}
}
}
seelog.Info("Registered container instance with cluster!")
err = validateRegisteredAttributes(registerRequest.Attributes, resp.ContainerInstance.Attributes)
return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), availabilityzone, err
}
func (client *APIECSClient) setInstanceIdentity(registerRequest ecs.RegisterContainerInstanceInput) ecs.RegisterContainerInstanceInput {
instanceIdentityDoc := ""
instanceIdentitySignature := ""
if client.config.NoIID {
seelog.Info("Fetching Instance ID Document has been disabled")
registerRequest.InstanceIdentityDocument = &instanceIdentityDoc
registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature
return registerRequest
}
iidRetrieved := true
instanceIdentityDoc, err := client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentResource)
if err != nil {
seelog.Errorf("Unable to get instance identity document: %v", err)
iidRetrieved = false
}
registerRequest.InstanceIdentityDocument = &instanceIdentityDoc
if iidRetrieved {
instanceIdentitySignature, err = client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource)
if err != nil {
seelog.Errorf("Unable to get instance identity signature: %v", err)
}
}
registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature
return registerRequest
}
func attributesToMap(attributes []*ecs.Attribute) map[string]string {
attributeMap := make(map[string]string)
attribs := attributes
for _, attribute := range attribs {
attributeMap[aws.StringValue(attribute.Name)] = aws.StringValue(attribute.Value)
}
return attributeMap
}
func findMissingAttributes(expectedAttributes, actualAttributes map[string]string) ([]string, error) {
missingAttributes := make([]string, 0)
var err error
for key, val := range expectedAttributes {
if actualAttributes[key] != val {
missingAttributes = append(missingAttributes, key)
} else {
seelog.Tracef("Response contained expected value for attribute %v", key)
}
}
if len(missingAttributes) > 0 {
err = apierrors.NewAttributeError("Attribute validation failed")
}
return missingAttributes, err
}
func (client *APIECSClient) getResources() ([]*ecs.Resource, error) {
// Micro-optimization, the pointer to this is used multiple times below
integerStr := "INTEGER"
cpu, mem := getCpuAndMemory()
remainingMem := mem - int64(client.config.ReservedMemory)
seelog.Infof("Remaining mem: %d", remainingMem)
if remainingMem < 0 {
return nil, fmt.Errorf(
"api register-container-instance: reserved memory is higher than available memory on the host, total memory: %d, reserved: %d",
mem, client.config.ReservedMemory)
}
cpuResource := ecs.Resource{
Name: utils.Strptr("CPU"),
Type: &integerStr,
IntegerValue: &cpu,
}
memResource := ecs.Resource{
Name: utils.Strptr("MEMORY"),
Type: &integerStr,
IntegerValue: &remainingMem,
}
portResource := ecs.Resource{
Name: utils.Strptr("PORTS"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPorts),
}
udpPortResource := ecs.Resource{
Name: utils.Strptr("PORTS_UDP"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPortsUDP),
}
return []*ecs.Resource{&cpuResource, &memResource, &portResource, &udpPortResource}, nil
}
func getCpuAndMemory() (int64, int64) {
memInfo, err := system.ReadMemInfo()
mem := int64(0)
if err == nil {
mem = memInfo.MemTotal / 1024 / 1024 // MiB
} else {
seelog.Errorf("Unable to get memory info: %v", err)
}
cpu := runtime.NumCPU() * 1024
return int64(cpu), mem
}
func validateRegisteredAttributes(expectedAttributes, actualAttributes []*ecs.Attribute) error {
var err error
expectedAttributesMap := attributesToMap(expectedAttributes)
actualAttributesMap := attributesToMap(actualAttributes)
missingAttributes, err := findMissingAttributes(expectedAttributesMap, actualAttributesMap)
if err != nil {
msg := strings.Join(missingAttributes, ",")
seelog.Errorf("Error registering attributes: %v", msg)
}
return err
}
func (client *APIECSClient) getAdditionalAttributes() []*ecs.Attribute {
return []*ecs.Attribute{
{
Name: aws.String("ecs.os-type"),
Value: aws.String(config.OSType),
},
}
}
func (client *APIECSClient) getOutpostAttribute(outpostARN string) []*ecs.Attribute {
if len(outpostARN) > 0 {
return []*ecs.Attribute{
{
Name: aws.String("ecs.outpost-arn"),
Value: aws.String(outpostARN),
},
}
}
return []*ecs.Attribute{}
}
func (client *APIECSClient) getCustomAttributes() []*ecs.Attribute {
var attributes []*ecs.Attribute
for attribute, value := range client.config.InstanceAttributes {
attributes = append(attributes, &ecs.Attribute{
Name: aws.String(attribute),
Value: aws.String(value),
})
}
return attributes
}
func (client *APIECSClient) SubmitTaskStateChange(change api.TaskStateChange) error {
// Submit attachment state change
if change.Attachment != nil {
var attachments []*ecs.AttachmentStateChange
eniStatus := change.Attachment.Status.String()
attachments = []*ecs.AttachmentStateChange{
{
AttachmentArn: aws.String(change.Attachment.AttachmentARN),
Status: aws.String(eniStatus),
},
}
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&ecs.SubmitTaskStateChangeInput{
Cluster: aws.String(client.config.Cluster),
Task: aws.String(change.TaskARN),
Attachments: attachments,
})
if err != nil {
seelog.Warnf("Could not submit an attachment state change: %v", err)
return err
}
return nil
}
status := change.Status.BackendStatus()
req := ecs.SubmitTaskStateChangeInput{
Cluster: aws.String(client.config.Cluster),
Task: aws.String(change.TaskARN),
Status: aws.String(status),
Reason: aws.String(change.Reason),
PullStartedAt: change.PullStartedAt,
PullStoppedAt: change.PullStoppedAt,
ExecutionStoppedAt: change.ExecutionStoppedAt,
}
for _, managedAgentEvent := range change.ManagedAgents {
if mgspl := client.buildManagedAgentStateChangePayload(managedAgentEvent); mgspl != nil {
req.ManagedAgents = append(req.ManagedAgents, mgspl)
}
}
containerEvents := make([]*ecs.ContainerStateChange, len(change.Containers))
for i, containerEvent := range change.Containers {
containerEvents[i] = client.buildContainerStateChangePayload(containerEvent)
}
req.Containers = containerEvents
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&req)
if err != nil {
seelog.Warnf("Could not submit task state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func trimString(inputString string, maxLen int) string {
if len(inputString) > maxLen {
trimmed := inputString[0:maxLen]
return trimmed
} else {
return inputString
}
}
func (client *APIECSClient) buildManagedAgentStateChangePayload(change api.ManagedAgentStateChange) *ecs.ManagedAgentStateChange {
if !change.Status.ShouldReportToBackend() {
seelog.Warnf("Not submitting unsupported managed agent state %s for container %s in task %s",
change.Status.String(), change.Container.Name, change.TaskArn)
return nil
}
var trimmedReason *string
if change.Reason != "" {
trimmedReason = aws.String(trimString(change.Reason, ecsMaxReasonLength))
}
return &ecs.ManagedAgentStateChange{
ManagedAgentName: aws.String(change.Name),
ContainerName: aws.String(change.Container.Name),
Status: aws.String(change.Status.String()),
Reason: trimmedReason,
}
}
func (client *APIECSClient) buildContainerStateChangePayload(change api.ContainerStateChange) *ecs.ContainerStateChange {
statechange := &ecs.ContainerStateChange{
ContainerName: aws.String(change.ContainerName),
}
if change.RuntimeID != "" {
trimmedRuntimeID := trimString(change.RuntimeID, ecsMaxRuntimeIDLength)
statechange.RuntimeId = aws.String(trimmedRuntimeID)
}
if change.Reason != "" {
trimmedReason := trimString(change.Reason, ecsMaxReasonLength)
statechange.Reason = aws.String(trimmedReason)
}
if change.ImageDigest != "" {
trimmedImageDigest := trimString(change.ImageDigest, ecsMaxImageDigestLength)
statechange.ImageDigest = aws.String(trimmedImageDigest)
}
status := change.Status
if status != apicontainerstatus.ContainerStopped && status != apicontainerstatus.ContainerRunning {
seelog.Warnf("Not submitting unsupported upstream container state %s for container %s in task %s",
status.String(), change.ContainerName, change.TaskArn)
return nil
}
stat := change.Status.String()
if stat == "DEAD" {
stat = apicontainerstatus.ContainerStopped.String()
}
statechange.Status = aws.String(stat)
if change.ExitCode != nil {
exitCode := int64(aws.IntValue(change.ExitCode))
statechange.ExitCode = aws.Int64(exitCode)
}
networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings))
for i, binding := range change.PortBindings {
hostPort := int64(binding.HostPort)
containerPort := int64(binding.ContainerPort)
bindIP := binding.BindIP
protocol := binding.Protocol.String()
networkBindings[i] = &ecs.NetworkBinding{
BindIP: aws.String(bindIP),
ContainerPort: aws.Int64(containerPort),
HostPort: aws.Int64(hostPort),
Protocol: aws.String(protocol),
}
}
statechange.NetworkBindings = networkBindings
return statechange
}
func (client *APIECSClient) SubmitContainerStateChange(change api.ContainerStateChange) error {
pl := client.buildContainerStateChangePayload(change)
if pl == nil {
return nil
}
_, err := client.submitStateChangeClient.SubmitContainerStateChange(&ecs.SubmitContainerStateChangeInput{
Cluster: aws.String(client.config.Cluster),
ContainerName: aws.String(change.ContainerName),
ExitCode: pl.ExitCode,
ManagedAgents: pl.ManagedAgents,
NetworkBindings: pl.NetworkBindings,
Reason: pl.Reason,
RuntimeId: pl.RuntimeId,
Status: pl.Status,
Task: aws.String(change.TaskArn),
})
if err != nil {
seelog.Warnf("Could not submit container state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) SubmitAttachmentStateChange(change api.AttachmentStateChange) error {
attachmentStatus := change.Attachment.Status.String()
req := ecs.SubmitAttachmentStateChangesInput{
Cluster: &client.config.Cluster,
Attachments: []*ecs.AttachmentStateChange{
{
AttachmentArn: aws.String(change.Attachment.AttachmentARN),
Status: aws.String(attachmentStatus),
},
},
}
_, err := client.submitStateChangeClient.SubmitAttachmentStateChanges(&req)
if err != nil {
seelog.Warnf("Could not submit attachment state change [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) DiscoverPollEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
return aws.StringValue(resp.Endpoint), nil
}
func (client *APIECSClient) DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
if resp.TelemetryEndpoint == nil {
return "", errors.New("No telemetry endpoint returned; nil")
}
return aws.StringValue(resp.TelemetryEndpoint), nil
}
func (client *APIECSClient) discoverPollEndpoint(containerInstanceArn string) (*ecs.DiscoverPollEndpointOutput, error) {
// Try getting an entry from the cache
cachedEndpoint, found := client.pollEndpoinCache.Get(containerInstanceArn)
if found {
// Cache hit. Return the output.
if output, ok := cachedEndpoint.(*ecs.DiscoverPollEndpointOutput); ok {
return output, nil
}
}
// Cache miss, invoke the ECS DiscoverPollEndpoint API.
seelog.Debugf("Invoking DiscoverPollEndpoint for '%s'", containerInstanceArn)
output, err := client.standardClient.DiscoverPollEndpoint(&ecs.DiscoverPollEndpointInput{
ContainerInstance: &containerInstanceArn,
Cluster: &client.config.Cluster,
})
if err != nil {
return nil, err
}
// Cache the response from ECS.
client.pollEndpoinCache.Set(containerInstanceArn, output)
return output, nil
}
func (client *APIECSClient) GetResourceTags(resourceArn string) ([]*ecs.Tag, error) {
output, err := client.standardClient.ListTagsForResource(&ecs.ListTagsForResourceInput{
ResourceArn: &resourceArn,
})
if err != nil {
return nil, err
}
return output.Tags, nil
}
func (client *APIECSClient) UpdateContainerInstancesState(instanceARN string, status string) error {
seelog.Debugf("Invoking UpdateContainerInstancesState, status='%s' instanceARN='%s'", status, instanceARN)
_, err := client.standardClient.UpdateContainerInstancesState(&ecs.UpdateContainerInstancesStateInput{
ContainerInstances: []*string{aws.String(instanceARN)},
Status: aws.String(status),
Cluster: &client.config.Cluster,
})
return err
}
| 1 | 25,841 | this seems like more of a Debugf statement | aws-amazon-ecs-agent | go |
@@ -972,10 +972,11 @@ void CoreChecks::RecordBarrierArrayValidationInfo(const char *func_name, CMD_BUF
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
- cb_state->eventUpdates.emplace_back(
- [this, func_name, cb_state, typed_handle, src_queue_family, dst_queue_family](VkQueue queue) {
- return ValidateConcurrentBarrierAtSubmit(queue, func_name, cb_state, typed_handle, src_queue_family,
- dst_queue_family);
+ cb_state->queue_submit_functions.emplace_back(
+ [func_name, cb_state, typed_handle, src_queue_family, dst_queue_family](
+ const ValidationStateTracker *device_data, const QUEUE_STATE *queue_state) {
+ return ValidateConcurrentBarrierAtSubmit(device_data, queue_state, func_name, cb_state, typed_handle,
+ src_queue_family, dst_queue_family);
});
}
} | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
* Shannon McPherson <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "core_validation.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) {
VkImageSubresourceRange subresource_range;
subresource_range.aspectMask = subresource_layers.aspectMask;
subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer;
subresource_range.layerCount = subresource_layers.layerCount;
subresource_range.baseMipLevel = subresource_layers.mipLevel;
subresource_range.levelCount = 1;
return subresource_range;
}
IMAGE_STATE::IMAGE_STATE(VkImage img, const VkImageCreateInfo *pCreateInfo)
: image(img),
createInfo(*pCreateInfo),
valid(false),
acquired(false),
shared_presentable(false),
layout_locked(false),
get_sparse_reqs_called(false),
sparse_metadata_required(false),
sparse_metadata_bound(false),
imported_ahb(false),
has_ahb_format(false),
ahb_format(0),
full_range{},
create_from_swapchain(VK_NULL_HANDLE),
bind_swapchain(VK_NULL_HANDLE),
bind_swapchain_imageIndex(0),
sparse_requirements{} {
if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
}
createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
}
if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
sparse = true;
}
const auto format = createInfo.format;
VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
if (FormatIsColor(format) || FormatIsMultiplane(format)) {
init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane
} else {
init_range.aspectMask =
(FormatHasDepth(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (FormatHasStencil(format) ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
}
full_range = NormalizeSubresourceRange(*this, init_range);
#ifdef VK_USE_PLATFORM_ANDROID_KHR
auto external_format = lvl_find_in_chain<VkExternalFormatANDROID>(createInfo.pNext);
if (external_format) {
external_format_android = external_format->externalFormat;
} else {
// If externalFormat is zero, the effect is as if the VkExternalFormatANDROID structure was not present.
external_format_android = 0;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
}
bool IMAGE_STATE::IsCreateInfoEqual(const VkImageCreateInfo &other_createInfo) const {
bool is_equal = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_equal = is_equal && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_equal = is_equal && IsMipLevelsEqual(other_createInfo) && IsArrayLayersEqual(other_createInfo);
is_equal = is_equal && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_equal = is_equal && IsExtentEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_equal = is_equal && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
return is_equal && IsQueueFamilyIndicesEqual(other_createInfo);
}
// Check image compatibility rules for VK_NV_dedicated_allocation_image_aliasing
bool IMAGE_STATE::IsCreateInfoDedicatedAllocationImageAliasingCompatible(const VkImageCreateInfo &other_createInfo) const {
bool is_compatible = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_compatible = is_compatible && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_compatible = is_compatible && IsMipLevelsEqual(other_createInfo);
is_compatible = is_compatible && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_compatible = is_compatible && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
is_compatible = is_compatible && IsQueueFamilyIndicesEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_compatible = is_compatible && createInfo.extent.width <= other_createInfo.extent.width &&
createInfo.extent.height <= other_createInfo.extent.height &&
createInfo.extent.depth <= other_createInfo.extent.depth &&
createInfo.arrayLayers <= other_createInfo.arrayLayers;
return is_compatible;
}
bool IMAGE_STATE::IsCompatibleAliasing(IMAGE_STATE *other_image_state) {
if (!(createInfo.flags & other_image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT)) return false;
if ((create_from_swapchain == VK_NULL_HANDLE) && (binding.mem == other_image_state->binding.mem) &&
(binding.mem != VK_NULL_HANDLE) && (binding.offset == other_image_state->binding.offset) &&
IsCreateInfoEqual(other_image_state->createInfo)) {
return true;
}
if ((bind_swapchain == other_image_state->bind_swapchain) && (bind_swapchain != VK_NULL_HANDLE)) {
return true;
}
return false;
}
IMAGE_VIEW_STATE::IMAGE_VIEW_STATE(const IMAGE_STATE *image_state, VkImageView iv, const VkImageViewCreateInfo *ci)
: image_view(iv), create_info(*ci), normalized_subresource_range(ci->subresourceRange), samplerConversion(VK_NULL_HANDLE) {
auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info.pNext);
if (conversionInfo) samplerConversion = conversionInfo->conversion;
if (image_state) {
// A light normalization of the createInfo range
auto &sub_res_range = create_info.subresourceRange;
sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels);
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
// Cache a full normalization (for "full image/whole image" comparisons)
normalized_subresource_range = NormalizeSubresourceRange(*image_state, ci->subresourceRange);
samples = image_state->createInfo.samples;
descriptor_format_bits = DescriptorRequirementsBitsFromFormat(create_info.format);
}
}
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + (uint32_t)log2(std::max({height, width, depth}));
}
uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); }
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
VkImageSubresourceRange NormalizeSubresourceRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &range) {
const VkImageCreateInfo &image_create_info = image_state.createInfo;
VkImageSubresourceRange norm = range;
norm.levelCount = ResolveRemainingLevels(&range, image_create_info.mipLevels);
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias.
uint32_t layer_limit = (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR))
? image_create_info.extent.depth
: image_create_info.arrayLayers;
norm.layerCount = ResolveRemainingLayers(&range, layer_limit);
// For multiplanar formats, IMAGE_ASPECT_COLOR is equivalent to adding the aspect of the individual planes
VkImageAspectFlags &aspect_mask = norm.aspectMask;
if (FormatIsMultiplane(image_create_info.format)) {
if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
aspect_mask &= ~VK_IMAGE_ASPECT_COLOR_BIT;
aspect_mask |= (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT);
if (FormatPlaneCount(image_create_info.format) > 2) {
aspect_mask |= VK_IMAGE_ASPECT_PLANE_2_BIT;
}
}
}
return norm;
}
template <class OBJECT, class LAYOUT>
void CoreChecks::SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
}
template <class OBJECT, class LAYOUT>
void CoreChecks::SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
if (imgpair.subresource.aspectMask & aspectMask) {
imgpair.subresource.aspectMask = aspectMask;
SetLayout(pObject, imgpair, layout);
}
}
// Set the layout in supplied map
void CoreChecks::SetLayout(ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout layout) {
auto it = imageLayoutMap.find(imgpair);
if (it != imageLayoutMap.end()) {
it->second.layout = layout; // Update
} else {
imageLayoutMap[imgpair].layout = layout; // Insert
}
}
bool CoreChecks::FindLayoutVerifyLayout(ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) {
return false;
}
if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image),
kVUID_Core_DrawState_InvalidLayout,
"Cannot query for %s layout when combined aspect mask %d has multiple layout types: %s and %s",
report_data->FormatHandle(imgpair.image).c_str(), oldAspectMask, string_VkImageLayout(layout),
string_VkImageLayout(imgsubIt->second.layout));
}
layout = imgsubIt->second.layout;
return true;
}
// Find layout(s) on the global level
bool CoreChecks::FindGlobalLayout(ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayoutVerifyLayout(imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
bool CoreChecks::FindLayouts(VkImage image, std::vector<VkImageLayout> &layouts) {
auto sub_data = imageSubresourceMap.find(image);
if (sub_data == imageSubresourceMap.end()) return false;
auto image_state = GetImageState(image);
if (!image_state) return false;
bool ignoreGlobal = false;
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
ignoreGlobal = true;
}
for (auto imgsubpair : sub_data->second) {
if (ignoreGlobal && !imgsubpair.hasSubresource) continue;
auto img_data = imageLayoutMap.find(imgsubpair);
if (img_data != imageLayoutMap.end()) {
layouts.push_back(img_data->second.layout);
}
}
return true;
}
bool CoreChecks::FindLayout(const ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair, VkImageLayout &layout,
const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) {
return false;
}
layout = imgsubIt->second.layout;
return true;
}
// find layout in supplied map
bool CoreChecks::FindLayout(const ImageSubresPairLayoutMap &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout &layout) const {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
// Image+subresource not found, look for image handle w/o subresource
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
// Set the layout on the global level
void CoreChecks::SetGlobalLayout(ImageSubresourcePair imgpair, const VkImageLayout &layout) {
VkImage &image = imgpair.image;
auto data = imageLayoutMap.find(imgpair);
if (data != imageLayoutMap.end()) {
data->second.layout = layout; // Update
} else {
imageLayoutMap[imgpair].layout = layout; // Insert
}
auto &image_subresources = imageSubresourceMap[image];
auto subresource = std::find(image_subresources.begin(), image_subresources.end(), imgpair);
if (subresource == image_subresources.end()) {
image_subresources.push_back(imgpair);
}
}
// Set image layout for given VkImageSubresourceRange struct
void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout,
VkImageLayout expected_layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map); // the non-const getter must return a valid pointer
if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) {
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
}
}
// Set the initial image layout for all slices of an image view
void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
if (disabled.image_layout_validation) {
return;
}
IMAGE_STATE *image_state = GetImageState(view_state.create_info.image);
if (image_state) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, view_state.normalized_subresource_range, layout, &view_state);
}
}
// Set the initial image layout for a passed non-normalized subresource range
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &range, VkImageLayout layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(image_state, range), layout);
}
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range,
VkImageLayout layout) {
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) return;
SetImageInitialLayout(cb_node, *image_state, range, layout);
};
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceLayers &layers, VkImageLayout layout) {
SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout);
}
// Set image layout for all slices of an image view
void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
IMAGE_STATE *image_state = GetImageState(view_state.create_info.image);
if (!image_state) return; // TODO: track/report stale image references
VkImageSubresourceRange sub_range = view_state.normalized_subresource_range;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
SetImageLayout(cb_node, *image_state, sub_range, layout);
}
bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout,
VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) const {
bool skip = false;
auto image_state = GetImageState(image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
if (!image_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"Render Pass begin with %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image",
report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(),
attachment_index, report_data->FormatHandle(image_view).c_str());
return skip;
}
auto image_usage = image_state->createInfo.usage;
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
if (device_extensions.vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name, string_VkImageLayout(layout),
report_data->FormatHandle(framebuffer).c_str(), report_data->FormatHandle(image_view).c_str());
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) const {
bool skip = false;
auto const pRenderPassInfo = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebufferInfo = framebuffer_state->createInfo;
const VkImageView *attachments = framebufferInfo.pAttachments;
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass;
auto framebuffer = framebuffer_state->framebuffer;
if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
const auto *attachmentInfo = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext);
if (((framebufferInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) && attachmentInfo != nullptr) {
attachments = attachmentInfo->pAttachments;
}
if (attachments != nullptr) {
const auto *const_pCB = static_cast<const CMD_BUFFER_STATE *>(pCB);
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto image_view = attachments[i];
auto view_state = GetImageViewState(image_view);
if (!view_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str());
continue;
}
const VkImage image = view_state->create_info.image;
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str());
continue;
}
auto attachment_initial_layout = pRenderPassInfo->pAttachments[i].initialLayout;
auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout;
// Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something
// in common with the non-const version.)
const ImageSubresourceLayoutMap *subresource_map =
(attachment_initial_layout != VK_IMAGE_LAYOUT_UNDEFINED) ? GetImageSubresourceLayoutMap(const_pCB, image) : nullptr;
if (subresource_map) { // If no layout information for image yet, will be checked at QueueSubmit time
LayoutUseCheckAndMessage layout_check(subresource_map);
bool subres_skip = false;
auto subresource_cb = [this, i, attachment_initial_layout, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, attachment_initial_layout, layout, initial_layout)) {
subres_skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the %s layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(attachment_initial_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip; // quit checking subresources once we fail once
};
subresource_map->ForRange(view_state->normalized_subresource_range, subresource_cb);
skip |= subres_skip;
}
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) {
auto &subpass = pRenderPassInfo->pSubpasses[j];
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
}
}
return skip;
}
void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2KHR &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = GetAttachmentImageViewState(pFramebuffer, ref.attachment);
if (image_view) {
SetImageViewLayout(pCB, *image_view, ref.layout);
}
}
}
void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First transition into initialLayout
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(framebuffer_state, i);
if (view_state) {
SetImageViewLayout(cb_state, *view_state, rpci->pAttachments[i].initialLayout);
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 !=
(aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool CoreChecks::ValidateBarrierLayoutToImageUsage(const VkImageMemoryBarrier &img_barrier, bool new_not_old,
VkImageUsageFlags usage_flags, const char *func_name,
const char *barrier_pname) const {
bool skip = false;
const VkImageLayout layout = (new_not_old) ? img_barrier.newLayout : img_barrier.oldLayout;
const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error"
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210";
}
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213";
}
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088";
}
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (msg_code != kVUIDUndefined) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), msg_code,
"%s: Image barrier %s %s Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".", func_name,
barrier_pname, ((new_not_old) ? "new" : "old"), string_VkImageLayout(layout),
report_data->FormatHandle(img_barrier.image).c_str(), usage_flags);
}
return skip;
}
// Verify image barriers are compatible with the images they reference.
bool CoreChecks::ValidateBarriersToImages(const CMD_BUFFER_STATE *cb_state, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) const {
bool skip = false;
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const VkImageMemoryBarrier *barrier;
};
using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>;
using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
const auto &img_barrier = pImageMemoryBarriers[i];
const std::string barrier_pname = "pImageMemoryBarrier[" + std::to_string(i) + "]";
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier.oldLayout != img_barrier.newLayout) {
const ImageBarrierScoreboardEntry new_entry{i, &img_barrier};
const auto image_it = layout_transitions.find(img_barrier.image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second;
auto subres_it = subres_map.find(img_barrier.subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
if ((entry.barrier->newLayout != img_barrier.oldLayout) &&
(img_barrier.oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier.subresourceRange;
skip = log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: %s conflicts with earlier entry pImageMemoryBarrier[%u]. %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
func_name, barrier_pname.c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(),
range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier.subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier.image][img_barrier.subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(img_barrier.image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, false, usage_flags, func_name, barrier_pname.c_str());
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, true, usage_flags, func_name, barrier_pname.c_str());
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), 0,
"Attempting to transition shared presentable %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
}
const VkImageCreateInfo &image_create_info = image_state->createInfo;
// For a Depth/Stencil image both aspects MUST be set
if (FormatIsDepthAndStencil(image_create_info.format)) {
auto const aspect_mask = img_barrier.subresourceRange.aspectMask;
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier.image), "VUID-VkImageMemoryBarrier-image-01207",
"%s: Image barrier %s references %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_create_info.format), aspect_mask);
}
}
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_state, img_barrier.image);
if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
// Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with
// subresource.
} else if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, img_barrier.subresourceRange);
auto subres_callback = [this, img_barrier, cb_state, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, img_barrier.oldLayout, layout, initial_layout)) {
subres_skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"For %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when the "
"%s layout is %s.",
report_data->FormatHandle(img_barrier.image).c_str(), subres.aspectMask, subres.mipLevel,
subres.arrayLayer, string_VkImageLayout(img_barrier.oldLayout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(normalized_isr, subres_callback);
skip |= subres_skip;
}
}
}
return skip;
}
bool CoreChecks::IsReleaseOp(CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier &barrier) const {
if (!IsTransferOp(&barrier)) return false;
auto pool = GetCommandPoolState(cb_state->createInfo.commandPool);
return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, &barrier);
}
template <typename Barrier>
bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t barrier_count, const Barrier *barriers) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
bool skip = false;
auto pool = GetCommandPoolState(cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
const char *transfer_type = nullptr;
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
const BarrierRecord *barrier_record = nullptr;
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsSpecial(barriers[b].dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barriers[b]);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsSpecial(barriers[b].srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barriers[b]);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInCB(),
"%s: %s at index %" PRIu32 " %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.",
func_name, barrier_name, b, transfer_type, handle_name,
report_data->FormatHandle(barrier_record->handle).c_str(), barrier_record->srcQueueFamilyIndex,
barrier_record->dstQueueFamilyIndex);
}
}
return skip;
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) {
return device_state.GetImageState(barrier.image);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
VkBuffer BarrierHandle(const VkBufferMemoryBarrier &barrier) { return barrier.buffer; }
template <typename Barrier>
void CoreChecks::RecordBarrierArrayValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const Barrier *barriers) {
auto pool = GetCommandPoolState(cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag());
for (uint32_t b = 0; b < barrier_count; b++) {
auto &barrier = barriers[b];
if (IsTransferOp(&barrier)) {
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barrier) &&
!QueueFamilyIsSpecial(barrier.dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barrier);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barrier) &&
!QueueFamilyIsSpecial(barrier.srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barrier);
}
}
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
if (!QueueFamilyIsIgnored(src_queue_family) && !QueueFamilyIsIgnored(dst_queue_family)) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
auto handle_state = BarrierHandleState(*this, barrier);
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
cb_state->eventUpdates.emplace_back(
[this, func_name, cb_state, typed_handle, src_queue_family, dst_queue_family](VkQueue queue) {
return ValidateConcurrentBarrierAtSubmit(queue, func_name, cb_state, typed_handle, src_queue_family,
dst_queue_family);
});
}
}
}
}
bool CoreChecks::ValidateBarriersQFOTransferUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
return skip;
}
void CoreChecks::RecordBarrierValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
RecordBarrierArrayValidationInfo(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
RecordBarrierArrayValidationInfo(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
template <typename BarrierRecord, typename Scoreboard>
bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state,
const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) const {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state));
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
skip = log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier submitted in this batch from %s.",
"vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(),
report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex,
report_data->FormatHandle(inserted.first->second->commandBuffer).c_str());
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateQueuedQFOTransferBarriers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<Barrier> *scoreboards) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
bool skip = false;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
bool skip = false;
skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state, qfo_image_scoreboards);
skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state, qfo_buffer_scoreboards);
return skip;
}
template <typename Barrier>
void CoreChecks::RecordQueuedQFOTransferBarriers(CMD_BUFFER_STATE *cb_state) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) {
RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state);
RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state);
}
// Avoid making the template globally visible by exporting the one instance of it we need.
void CoreChecks::EraseQFOImageRelaseBarriers(const VkImage &image) { EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); }
void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t memBarrierCount,
const VkImageMemoryBarrier *pImgMemBarriers) {
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pImgMemBarriers[i];
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
if (IsReleaseOp(cb_state, mem_barrier)) {
continue;
}
auto *image_state = GetImageState(mem_barrier.image);
if (!image_state) continue;
RecordTransitionImageLayout(cb_state, image_state, mem_barrier);
for (const auto &image : image_state->aliasing_images) {
image_state = GetImageState(image);
RecordTransitionImageLayout(cb_state, image_state, mem_barrier);
}
}
}
void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state,
const VkImageMemoryBarrier &mem_barrier) {
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, mem_barrier.subresourceRange);
const auto &image_create_info = image_state->createInfo;
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
normalized_isr.baseArrayLayer = 0;
normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource
}
SetImageLayout(cb_state, *image_state, normalized_isr, mem_barrier.newLayout, mem_barrier.oldLayout);
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask,
VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller,
const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const {
if (disabled.image_layout_validation) return false;
assert(cb_node);
assert(image_state);
const auto image = image_state->image;
bool skip = false;
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask);
auto subresource_cb = [this, explicit_layout, cb_node, layout_mismatch_msg_code, caller, image, &layout_check, &error,
&subres_skip](const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, explicit_layout, layout, initial_layout)) {
*error = true;
subres_skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_mismatch_msg_code,
"%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the "
"%s layout %s.",
caller, report_data->FormatHandle(image).c_str(), subres.arrayLayer, subres.mipLevel,
string_VkImageLayout(explicit_layout), layout_check.message, string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(range, subresource_cb);
skip |= subres_skip;
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer),
kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance %s layout should be %s instead of GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout));
}
} else if (device_extensions.vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
layout_invalid_msg_code,
"Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_invalid_msg_code,
"%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout),
string_VkImageLayout(optimal_layout));
}
}
return skip;
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout,
VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code,
const char *layout_mismatch_msg_code, bool *error) const {
return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller,
layout_invalid_msg_code, layout_mismatch_msg_code, error);
}
void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto renderPass = GetRenderPassState(pRenderPassBegin->renderPass);
if (!renderPass) return;
const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr();
if (framebuffer_state) {
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(framebuffer_state, i);
if (view_state) {
SetImageViewLayout(pCB, *view_state, pRenderPassInfo->pAttachments[i].finalLayout);
}
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android) {
if (0 != ext_fmt_android->externalFormat) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-01974",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero "
"externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED.");
}
if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02396",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02397",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but usage includes bits other than VK_IMAGE_USAGE_SAMPLED_BIT.");
}
if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02398",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL.");
}
}
if ((0 != ext_fmt_android->externalFormat) && (0 == ahb_ext_formats_set.count(ext_fmt_android->externalFormat))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkExternalFormatANDROID-externalFormat-01894",
"vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat which has "
"not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().");
}
}
if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-01975",
"vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a "
"chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0.");
}
}
const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
if (create_info->imageType != VK_IMAGE_TYPE_2D) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02393",
"vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo "
"struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
string_VkImageType(create_info->imageType));
}
if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-pNext-02394",
"vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of "
"handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID "
"specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").",
create_info->mipLevels, FullMipChainLevels(create_info->extent));
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) {
bool skip = false;
IMAGE_STATE *image_state = GetImageState(create_info->image);
if (image_state->has_ahb_format) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02399",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"format member is %s.",
string_VkFormat(create_info->format));
}
// Chain must include a compatible ycbcr conversion
bool conv_found = false;
uint64_t external_format = 0;
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info->pNext);
if (ycbcr_conv_info != nullptr) {
VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion;
if (ycbcr_conversion_ahb_fmt_map.find(conv_handle) != ycbcr_conversion_ahb_fmt_map.end()) {
conv_found = true;
external_format = ycbcr_conversion_ahb_fmt_map.at(conv_handle);
}
}
if ((!conv_found) || (external_format != image_state->ahb_format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02400",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"without a chained VkSamplerYcbcrConversionInfo struct with the same external format.");
}
// Errors in create_info swizzles
if ((create_info->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) ||
(create_info->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02401",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"includes one or more non-identity component swizzles.");
}
}
return skip;
}
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-image-01895",
"vkGetImageSubresourceLayout(): Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been "
"bound to memory.");
}
return skip;
}
#else
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) {
return false;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) { return false; }
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(report_data, pCreateInfo);
} else { // These checks are omitted or replaced when Android HW Buffer extension is active
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-format-00943",
"vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-00949",
"vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT "
"flag bit is set");
}
if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal "
"height (%d) and arrayLayers (%d) must be >= 6.",
pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers);
}
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00964",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width exceeds device "
"maxFramebufferWidth.");
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00965",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height exceeds device "
"maxFramebufferHeight");
}
if (device_extensions.vk_ext_fragment_density_map) {
uint32_t ceiling_width =
(uint32_t)ceil((float)device_limits->maxFramebufferWidth /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-02559",
"vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the "
"ceiling of device "
"maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width);
}
uint32_t ceiling_height =
(uint32_t)ceil((float)device_limits->maxFramebufferHeight /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-02560",
"vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the "
"ceiling of device "
"maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height);
}
}
VkImageFormatProperties format_limits = {};
VkResult res = GetPDImageFormatProperties(pCreateInfo, &format_limits);
if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (!lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext))
#endif // VK_USE_PLATFORM_ANDROID_KHR
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): Format %s is not supported for this combination of parameters.",
string_VkFormat(pCreateInfo->format));
} else {
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
const char *format_string = string_VkFormat(pCreateInfo->format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-mipLevels-02255",
"vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.",
pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string);
}
uint64_t texel_count = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height *
(uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers *
(uint64_t)pCreateInfo->samples;
uint64_t total_size = (uint64_t)std::ceil(FormatTexelSize(pCreateInfo->format) * texel_count);
// Round up to imageGranularity boundary
VkDeviceSize imageGranularity = phys_dev_props.limits.bufferImageGranularity;
uint64_t ig_mask = imageGranularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
kVUID_Core_Image_InvalidFormatLimitsViolation,
"vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-arrayLayers-02256",
"vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.",
pCreateInfo->arrayLayers, format_limits.maxArrayLayers);
}
if (device_extensions.vk_khr_sampler_ycbcr_conversion && FormatRequiresYcbcrConversion(pCreateInfo->format) &&
!device_extensions.vk_ext_ycbcr_image_arrays && pCreateInfo->arrayLayers > 1) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-format-02653",
"vkCreateImage(): arrayLayers=%d exceeds the maximum allowed of 1 for formats requiring sampler ycbcr conversion",
pCreateInfo->arrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-samples-02258", "vkCreateImage(): samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) && (!enabled_features.core.sparseResidencyAliased)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-01924",
"vkCreateImage(): the sparseResidencyAliased device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT set.");
}
if (device_extensions.vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC_LDR(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage",
"pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420",
"VUID-VkImageCreateInfo-sharingMode-01420", false);
}
if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) &&
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-format-01577",
"vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.",
string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str());
}
return skip;
}
void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
IMAGE_LAYOUT_STATE image_state;
image_state.layout = pCreateInfo->initialLayout;
image_state.format = pCreateInfo->format;
ImageSubresourcePair subpair{*pImage, false, VkImageSubresource()};
imageSubresourceMap[*pImage].push_back(subpair);
imageLayoutMap[subpair] = image_state;
}
bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
IMAGE_STATE *image_state = GetImageState(image);
const VulkanTypedHandle obj_struct(image, kVulkanObjectTypeImage);
bool skip = false;
if (image_state) {
skip |= ValidateObjectNotInUse(image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
// Clean up validation specific data
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image);
const auto &sub_entry = imageSubresourceMap.find(image);
if (sub_entry != imageSubresourceMap.end()) {
for (const auto &pair : sub_entry->second) {
imageLayoutMap.erase(pair);
}
imageSubresourceMap.erase(sub_entry);
}
// Clean up generic image state
StateTracker::PreCallRecordDestroyImage(device, image, pAllocator);
}
bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range) const {
bool skip = false;
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageAspect, str);
}
if (FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
} else if (FormatIsCompressed(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with compressed image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
char const str[] = "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00002", "%s.", str);
}
return skip;
}
uint32_t ResolveRemainingLevels(const VkImageSubresourceRange *range, uint32_t mip_levels) {
// Return correct number of mip levels taking into account VK_REMAINING_MIP_LEVELS
uint32_t mip_level_count = range->levelCount;
if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
mip_level_count = mip_levels - range->baseMipLevel;
}
return mip_level_count;
}
uint32_t ResolveRemainingLayers(const VkImageSubresourceRange *range, uint32_t layers) {
// Return correct number of layers taking into account VK_REMAINING_ARRAY_LAYERS
uint32_t array_layer_count = range->layerCount;
if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
array_layer_count = layers - range->baseArrayLayer;
}
return array_layer_count;
}
bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageLayout dest_image_layout,
const char *func_name) const {
bool skip = false;
if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageLayout,
"%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
}
} else if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR == dest_image_layout) {
if (!device_extensions.vk_khr_shared_presentable_image) {
// TODO: Add unique error id when available.
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Must enable VK_KHR_shared_presentable_image extension before creating images with a layout type "
"of VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.");
} else {
if (image_state->shared_presentable) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Layout for shared presentable cleared image is %s but can only be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(dest_image_layout));
}
}
} else {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00005";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00012";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
}
// Cast to const to prevent creation at validate time.
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state->image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, range);
auto subres_callback = [this, cb_node, dest_image_layout, func_name, &layout_check, &subres_skip](
const VkImageSubresource &subres, VkImageLayout layout, VkImageLayout initial_layout) {
if (!layout_check.Check(subres, dest_image_layout, layout, initial_layout)) {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
subres_skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.",
func_name, string_VkImageLayout(dest_image_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
return !subres_skip;
};
subresource_map->ForRange(normalized_isr, subres_callback);
skip |= subres_skip;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdClearColorImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |=
ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993", "VUID-vkCmdClearColorImage-image-01993");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(image_state, pRanges[i]);
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage",
"VUID-vkCmdClearDepthStencilImage-image-01994",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
char const str[] =
"vkCmdClearDepthStencilImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_InvalidImageAspect, str);
}
}
if (image_state && !FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearDepthStencilImage called without a depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00014", "%s.", str);
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
char const str[] =
"vkCmdClearDepthStencilImage() called with an image that was not created with the VK_IMAGE_USAGE_TRANSFER_DST_BIT "
"set.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00009", "%s.", str);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first copy region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t x_bit = 1;
static const uint32_t y_bit = 2;
static const uint32_t z_bit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= z_bit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= y_bit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= x_bit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// For image copies between compressed/uncompressed formats, the extent is provided in source image texels
// Destination image texel extents must be adjusted by block size for the dest validation checks
VkExtent3D GetAdjustedDestImageExtent(VkFormat src_format, VkFormat dst_format, VkExtent3D extent) {
VkExtent3D adjusted_extent = extent;
if ((FormatIsCompressed(src_format) && (!FormatIsCompressed(dst_format)))) {
VkExtent3D block_size = FormatTexelBlockExtent(src_format);
adjusted_extent.width /= block_size.width;
adjusted_extent.height /= block_size.height;
adjusted_extent.depth /= block_size.depth;
} else if ((!FormatIsCompressed(src_format) && (FormatIsCompressed(dst_format)))) {
VkExtent3D block_size = FormatTexelBlockExtent(dst_format);
adjusted_extent.width *= block_size.width;
adjusted_extent.height *= block_size.height;
adjusted_extent.depth *= block_size.depth;
}
return adjusted_extent;
}
// Returns the effective extent of an image subresource, adjusted for mip level and array depth.
static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
const uint32_t mip = subresource->mipLevel;
// Return zero extent if mip level doesn't exist
if (mip >= img->createInfo.mipLevels) {
return VkExtent3D{0, 0, 0};
}
// Don't allow mip adjustment to create 0 dim, but pass along a 0 if that's what subresource specified
VkExtent3D extent = img->createInfo.extent;
// If multi-plane, adjust per-plane extent
if (FormatIsMultiplane(img->createInfo.format)) {
VkExtent2D divisors = FindMultiplaneExtentDivisors(img->createInfo.format, subresource->aspectMask);
extent.width /= divisors.width;
extent.height /= divisors.height;
}
if (img->createInfo.flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) {
extent.width = (0 == extent.width ? 0 : std::max(2U, 1 + ((extent.width - 1) >> mip)));
extent.height = (0 == extent.height ? 0 : std::max(2U, 1 + ((extent.height - 1) >> mip)));
extent.depth = (0 == extent.depth ? 0 : std::max(2U, 1 + ((extent.depth - 1) >> mip)));
} else {
extent.width = (0 == extent.width ? 0 : std::max(1U, extent.width >> mip));
extent.height = (0 == extent.height ? 0 : std::max(1U, extent.height >> mip));
extent.depth = (0 == extent.depth ? 0 : std::max(1U, extent.depth >> mip));
}
// Image arrays have an effective z extent that isn't diminished by mip level
if (VK_IMAGE_TYPE_3D != img->createInfo.imageType) {
extent.depth = img->createInfo.arrayLayers;
}
return extent;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Test if the extent argument has any dimensions set to 0.
static inline bool IsExtentSizeZero(const VkExtent3D *extent) {
return ((extent->width == 0) || (extent->height == 0) || (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pPool = GetCommandPoolState(cb_node->createInfo.commandPool);
if (pPool) {
granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset,
const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width,
granularity->height, granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height,
extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"In %s, pRegions[%u].%s.mipLevel is %u, but provided %s has %u mip levels.", function, i, member, mip_level,
report_data->FormatHandle(img->image).c_str(), img->createInfo.mipLevels);
}
return skip;
}
bool CoreChecks::ValidateImageArrayLayerRange(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer,
const uint32_t layer_count, const uint32_t i, const char *function,
const char *member, const char *vuid) const {
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is "
"%u, but provided %s has %u array layers.",
function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image).c_str(),
img->createInfo.arrayLayers);
}
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure
bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img,
const VkBufferImageCopy *region, const uint32_t i,
const char *function, const char *vuid) const {
bool skip = false;
VkExtent3D granularity = GetScaledItg(cb_node, img);
skip |= CheckItgOffset(cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->imageSubresource);
skip |= CheckItgExtent(cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent,
img->createInfo.imageType, i, function, "imageExtent", vuid);
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure
bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *src_img,
const IMAGE_STATE *dst_img, const VkImageCopy *region,
const uint32_t i, const char *function) const {
bool skip = false;
// Source image checks
VkExtent3D granularity = GetScaledItg(cb_node, src_img);
skip |=
CheckItgOffset(cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset", "VUID-vkCmdCopyImage-srcOffset-01783");
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, ®ion->srcSubresource);
const VkExtent3D extent = region->extent;
skip |= CheckItgExtent(cb_node, &extent, ®ion->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType,
i, function, "extent", "VUID-vkCmdCopyImage-srcOffset-01783");
// Destination image checks
granularity = GetScaledItg(cb_node, dst_img);
skip |=
CheckItgOffset(cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset", "VUID-vkCmdCopyImage-dstOffset-01784");
// Adjust dest extent, if necessary
const VkExtent3D dest_effective_extent =
GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent);
subresource_extent = GetImageSubresourceExtent(dst_img, ®ion->dstSubresource);
skip |= CheckItgExtent(cb_node, &dest_effective_extent, ®ion->dstOffset, &granularity, &subresource_extent,
dst_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-dstOffset-01784");
return skip;
}
// Validate contents of a VkImageCopy struct
bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const VkImageCopy *ic_regions, const IMAGE_STATE *src_state,
const IMAGE_STATE *dst_state) const {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageCopy region = ic_regions[i];
// For comp<->uncomp copies, the copy extent for the dest image must be adjusted
const VkExtent3D src_copy_extent = region.extent;
const VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
// Do all checks on source image
//
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00146",
"vkCmdCopyImage(): pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must "
"be 0 and 1, respectively.",
i, region.srcOffset.y, src_copy_extent.height);
}
}
// VUID-VkImageCopy-srcImage-01785
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01785",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.srcOffset.z, src_copy_extent.depth);
}
// VUID-VkImageCopy-srcImage-01787
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01787",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.srcOffset.z);
}
if (device_extensions.vk_khr_maintenance1) {
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and "
"srcSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
}
// Source checks that apply only to compressed images (or to _422 images if ycbcr enabled)
bool ext_ycbcr = device_extensions.vk_khr_sampler_ycbcr_conversion;
if (FormatIsCompressed(src_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) ||
(SafeModulo(region.srcOffset.y, block_size.height) != 0) ||
(SafeModulo(region.srcOffset.z, block_size.depth) != 0)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource));
if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) &&
(src_copy_extent.width + region.srcOffset.x != mip_extent.width)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).",
i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) &&
(src_copy_extent.height + region.srcOffset.y != mip_extent.height)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block "
"height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).",
i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01730" : "VUID-VkImageCopy-extent-00160";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).",
i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth);
}
} // Compressed
// Do all checks on dest image
//
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-00152",
"vkCmdCopyImage(): pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.dstOffset.y, dst_copy_extent.height);
}
}
// VUID-VkImageCopy-dstImage-01786
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01786",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 "
"and 1, respectively.",
i, region.dstOffset.z, dst_copy_extent.depth);
}
// VUID-VkImageCopy-dstImage-01788
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01788",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.dstOffset.z);
}
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
// VU01199 changed with mnt1
if (device_extensions.vk_khr_maintenance1) {
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and "
"dstSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
}
// Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled)
if (FormatIsCompressed(dst_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) ||
(SafeModulo(region.dstOffset.y, block_size.height) != 0) ||
(SafeModulo(region.dstOffset.z, block_size.depth) != 0)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource));
if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) &&
(dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).",
i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height
if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) &&
(dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed "
"texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource "
"height (%d).",
i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) {
const char *vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01734" : "VUID-VkImageCopy-extent-00165";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).",
i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth);
}
} // Compressed
}
return skip;
}
// vkCmdCopyImage checks that only apply if the multiplane extension is enabled
bool CoreChecks::CopyImageMultiplaneValidation(VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state,
const IMAGE_STATE *dst_image_state, const VkImageCopy region) const {
bool skip = false;
// Neither image is multiplane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) {
// If neither image is multi-plane the aspectMask member of src and dst must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Copy between non-multiplane images with differing aspectMasks ( 0x" << std::hex
<< region.srcSubresource.aspectMask << " and 0x" << region.dstSubresource.aspectMask << " )";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01551", "%s.", ss.str().c_str());
}
} else {
// Source image multiplane checks
uint32_t planes = FormatPlaneCount(src_image_state->createInfo.format);
VkImageAspectFlags aspect = region.srcSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01552", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01553", "%s.", ss.str().c_str());
}
// Single-plane to multi-plane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01557", "%s.", ss.str().c_str());
}
// Dest image multiplane checks
planes = FormatPlaneCount(dst_image_state->createInfo.format);
aspect = region.dstSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01554", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01555", "%s.", ss.str().c_str());
}
// Multi-plane to single-plane
if ((FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01556", "%s.", ss.str().c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
bool skip = false;
skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state);
VkCommandBuffer command_buffer = cb_node->commandBuffer;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageCopy region = pRegions[i];
// For comp/uncomp copies, the copy extent for the dest image must be adjusted
VkExtent3D src_copy_extent = region.extent;
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image_state->createInfo.format, dst_image_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.srcSubresource, "vkCmdCopyImage", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.dstSubresource, "vkCmdCopyImage", "dstSubresource", i);
skip |= ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, "vkCmdCopyImage",
"srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01696");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, "vkCmdCopyImage",
"dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01697");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer,
region.srcSubresource.layerCount, i, "vkCmdCopyImage", "srcSubresource",
"VUID-vkCmdCopyImage-srcSubresource-01698");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer,
region.dstSubresource.layerCount, i, "vkCmdCopyImage", "dstSubresource",
"VUID-vkCmdCopyImage-dstSubresource-01699");
if (device_extensions.vk_khr_maintenance1) {
// No chance of mismatch if we're overriding depth slice count
if (!slice_override) {
// The number of depth slices in srcSubresource and dstSubresource must match
// Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D
uint32_t src_slices =
(VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth
: region.srcSubresource.layerCount);
uint32_t dst_slices =
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth
: region.dstSubresource.layerCount);
if (src_slices != dst_slices) {
std::stringstream ss;
ss << "vkCmdCopyImage(): number of depth slices in source and destination subresources for pRegions[" << i
<< "] do not match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str());
}
}
} else {
// For each region the layerCount member of srcSubresource and dstSubresource must match
if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) {
std::stringstream ss;
ss << "vkCmdCopyImage(): number of layers in source and destination subresources for pRegions[" << i
<< "] do not match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str());
}
}
// Do multiplane-specific checks, if extension enabled
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
skip |= CopyImageMultiplaneValidation(command_buffer, src_image_state, dst_image_state, region);
}
if (!device_extensions.vk_khr_sampler_ycbcr_conversion) {
// not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
char const str[] = "vkCmdCopyImage(): Src and dest aspectMasks for each region must match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00137", "%s.", str);
}
}
// For each region, the aspectMask member of srcSubresource must be present in the source image
if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_image_state->createInfo.format)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegion[" << i
<< "] srcSubresource.aspectMask cannot specify aspects not present in source image";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00142", "%s.", ss.str().c_str());
}
// For each region, the aspectMask member of dstSubresource must be present in the destination image
if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_image_state->createInfo.format)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegion[" << i
<< "] dstSubresource.aspectMask cannot specify aspects not present in dest image";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00143", "%s.", ss.str().c_str());
}
// Check region extents for 1D-1D, 2D-2D, and 3D-3D copies
if (src_image_state->createInfo.imageType == dst_image_state->createInfo.imageType) {
// The source region specified by a given element of regions must be a region that is contained within srcImage
VkExtent3D img_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (0 != ExceedsBounds(®ion.srcOffset, &src_copy_extent, &img_extent)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source pRegion[" << i << "] with mipLevel [ " << region.srcSubresource.mipLevel
<< " ], offset [ " << region.srcOffset.x << ", " << region.srcOffset.y << ", " << region.srcOffset.z
<< " ], extent [ " << src_copy_extent.width << ", " << src_copy_extent.height << ", " << src_copy_extent.depth
<< " ] exceeds the source image dimensions";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00122", "%s.", ss.str().c_str());
}
// The destination region specified by a given element of regions must be a region that is contained within dst_image
img_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (0 != ExceedsBounds(®ion.dstOffset, &dst_copy_extent, &img_extent)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest pRegion[" << i << "] with mipLevel [ " << region.dstSubresource.mipLevel
<< " ], offset [ " << region.dstOffset.x << ", " << region.dstOffset.y << ", " << region.dstOffset.z
<< " ], extent [ " << dst_copy_extent.width << ", " << dst_copy_extent.height << ", " << dst_copy_extent.depth
<< " ] exceeds the destination image dimensions";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00123", "%s.", ss.str().c_str());
}
}
// Each dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (slice_override) src_copy_extent.depth = depth_slices;
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00144",
"vkCmdCopyImage(): Source image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00145",
"vkCmdCopyImage(): Source image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00147",
"vkCmdCopyImage(): Source image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth);
}
// Adjust dest extent if necessary
subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (slice_override) dst_copy_extent.depth = depth_slices;
extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00150",
"vkCmdCopyImage(): Dest image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00151",
"vkCmdCopyImage(): Dest image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00153",
"vkCmdCopyImage(): Dest image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth);
}
// The union of all source regions, and the union of all destination regions, specified by the elements of regions,
// must not overlap in memory
if (src_image_state->image == dst_image_state->image) {
for (uint32_t j = 0; j < regionCount; j++) {
if (RegionIntersects(®ion, &pRegions[j], src_image_state->createInfo.imageType,
FormatIsMultiplane(src_image_state->createInfo.format))) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegions[" << i << "] src overlaps with pRegions[" << j << "].";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00124", "%s.", ss.str().c_str());
}
}
}
}
// The formats of src_image and dst_image must be compatible. Formats are considered compatible if their texel size in bytes
// is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT because
// because both texels are 4 bytes in size. Depth/stencil formats must match exactly.
if (FormatIsDepthOrStencil(src_image_state->createInfo.format) || FormatIsDepthOrStencil(dst_image_state->createInfo.format)) {
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image depth/stencil formats.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), kVUID_Core_DrawState_MismatchedImageFormat, str);
}
} else {
if (!FormatSizesAreEqual(src_image_state->createInfo.format, dst_image_state->createInfo.format, regionCount, pRegions)) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image format sizes.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00135", "%s.", str);
}
}
// Source and dest image sample counts must match
if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) {
char const str[] = "vkCmdCopyImage() called on image pair with non-identical sample counts.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00136", "%s", str);
}
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-00127");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-00132");
// Validate that SRC & DST images have correct usage flags set
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImage-srcImage-00126",
"vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImage-dstImage-00131",
"vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-srcImage-01995", "VUID-vkCmdCopyImage-srcImage-01995");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-dstImage-01996", "VUID-vkCmdCopyImage-dstImage-01996");
}
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyImage()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
skip |= InsideRenderPass(cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass");
bool hit_error = false;
const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-srcImageLayout-01917"
: "VUID-vkCmdCopyImage-srcImageLayout-00129";
const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-dstImageLayout-01395"
: "VUID-vkCmdCopyImage-dstImageLayout-00134";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImage()", invalid_src_layout_vuid,
"VUID-vkCmdCopyImage-srcImageLayout-00128", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyImage()", invalid_dst_layout_vuid,
"VUID-vkCmdCopyImage-dstImageLayout-00133", &hit_error);
skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i,
"vkCmdCopyImage()");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
// Returns true if sub_rect is entirely contained within rect
static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
(sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height))
return false;
return true;
}
bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index,
const FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment,
const VkRect2D &render_area, uint32_t rect_count,
const VkClearRect *clear_rects) const {
bool skip = false;
const IMAGE_VIEW_STATE *image_view_state = nullptr;
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) {
image_view_state = GetImageViewState(framebuffer->createInfo.pAttachments[fb_attachment]);
}
for (uint32_t j = 0; j < rect_count; j++) {
if (!ContainsRect(render_area, clear_rects[j].rect)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
if (image_view_state) {
// The layers specified by a given element of pRects must be contained within every attachment that
// pAttachments refers to
const auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) ||
(clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdClearAttachments-pRects-00017",
"vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers "
"of pAttachment[%d].",
j, attachment_index);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
bool skip = false;
const CMD_BUFFER_STATE *cb_node = GetCBState(commandBuffer); // TODO: Should be const, and never modified during validation
if (!cb_node) return skip;
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearAttachments()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearAttachments-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
// Warn if this is issued prior to Draw Cmd and clearing the entire attachment
if (!cb_node->hasDrawCmd && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
(cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
// There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
// This warning should be made more specific. It'd be best to avoid triggering this test if it's a use that must call
// CmdClearAttachments.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_ClearCmdBeforeDraw,
"vkCmdClearAttachments() issued on %s prior to any Draw Cmds. It is recommended you "
"use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
report_data->FormatHandle(commandBuffer).c_str());
}
skip |= OutsideRenderPass(cb_node, "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-renderpass");
// Validate that attachment is in reference list of active subpass
if (cb_node->activeRenderPass) {
const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount;
const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
const auto *framebuffer = GetFramebufferState(cb_node->activeFramebuffer);
const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if (0 == clear_desc->aspectMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-requiredbitmask", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00020", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
uint32_t color_attachment = VK_ATTACHMENT_UNUSED;
if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) {
color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED "
"and not a valid attachment for %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(),
cb_node->activeSubpass, clear_desc->colorAttachment, color_attachment,
renderpass_attachment_count);
color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for %s"
" subpass %u. colorAttachmentCount=%u",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(),
cb_node->activeSubpass, subpass_desc->colorAttachmentCount);
}
fb_attachment = color_attachment;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] =
"vkCmdClearAttachments() aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00019", str, attachment_index);
}
} else { // Must be depth and/or stencil
if (((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdClearAttachments() aspectMask [%d] is not a valid combination of bits.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-parameter", str,
attachment_index);
}
if (!subpass_desc->pDepthStencilAttachment ||
(subpass_desc->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_MissingAttachmentReference,
"vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
} else {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
}
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area,
rectCount, pRects);
}
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
auto *cb_node = GetCBState(commandBuffer);
if (cb_node->activeRenderPass && (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) {
const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
const auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(clear_desc->colorAttachment < subpass_desc->colorAttachmentCount)) {
fb_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
} else if ((clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) &&
subpass_desc->pDepthStencilAttachment) {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (fb_attachment != VK_ATTACHMENT_UNUSED) {
if (!clear_rect_copy) {
// We need a copy of the clear rectangles that will persist until the last lambda executes
// but we want to create it as lazily as possible
clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount));
}
// if a secondary level command buffer inherits the framebuffer from the primary command buffer
// (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time
auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy](
const CMD_BUFFER_STATE *prim_cb, VkFramebuffer fb) {
assert(rectCount == clear_rect_copy->size());
const FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(fb);
const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea;
bool skip = false;
skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area,
rectCount, clear_rect_copy->data());
return skip;
};
cb_node->cmd_execute_commands_functions.emplace_back(val_fn);
}
}
}
}
bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-srcImage-00256");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-00258");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdResolveImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdResolveImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
skip |= InsideRenderPass(cb_node, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, "vkCmdResolveImage()",
"VUID-vkCmdResolveImage-dstImage-02003", "VUID-vkCmdResolveImage-dstImage-02003");
bool hit_error = false;
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-srcImageLayout-01400"
: "VUID-vkCmdResolveImage-srcImageLayout-00261";
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-dstImageLayout-01401"
: "VUID-vkCmdResolveImage-dstImageLayout-00263";
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
skip |=
ValidateImageSubresourceLayers(cb_node, &pRegions[i].srcSubresource, "vkCmdResolveImage()", "srcSubresource", i);
skip |=
ValidateImageSubresourceLayers(cb_node, &pRegions[i].dstSubresource, "vkCmdResolveImage()", "dstSubresource", i);
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdResolveImage()", invalid_src_layout_vuid,
"VUID-vkCmdResolveImage-srcImageLayout-00260", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdResolveImage()", invalid_dst_layout_vuid,
"VUID-vkCmdResolveImage-dstImageLayout-00262", &hit_error);
skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].srcSubresource.mipLevel, i, "vkCmdResolveImage()",
"srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01709");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].dstSubresource.mipLevel, i, "vkCmdResolveImage()",
"dstSubresource", "VUID-vkCmdResolveImage-dstSubresource-01710");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].srcSubresource.baseArrayLayer,
pRegions[i].srcSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-srcSubresource-01711");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].dstSubresource.baseArrayLayer,
pRegions[i].dstSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-dstSubresource-01712");
// layer counts must match
if (pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-layerCount-00267",
"vkCmdResolveImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i);
}
// For each region, src and dest image aspect must be color only
if ((pRegions[i].srcSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(pRegions[i].dstSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
char const str[] =
"vkCmdResolveImage(): src and dest aspectMasks for each region must specify only VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-aspectMask-00266", "%s.", str);
}
}
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
char const str[] = "vkCmdResolveImage called with unmatched source and dest formats.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageFormat, str);
}
if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) {
char const str[] = "vkCmdResolveImage called with unmatched source and dest image types.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageType, str);
}
if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
char const str[] = "vkCmdResolveImage called with source sample count less than 2.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-srcImage-00257", "%s.", str);
}
if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
char const str[] = "vkCmdResolveImage called with dest sample count greater than 1.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-dstImage-00259", "%s.", str);
}
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
bool skip = false;
if (cb_node) {
skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
}
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
"VUID-vkCmdBlitImage-srcImage-00233");
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
"VUID-vkCmdBlitImage-dstImage-00234");
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-00220");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-00225");
skip |=
ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdBlitImage-srcImage-00219",
"vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdBlitImage-dstImage-00224",
"vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdBlitImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBlitImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
skip |= InsideRenderPass(cb_node, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-srcImage-01999", "VUID-vkCmdBlitImage-srcImage-01999");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-dstImage-02000", "VUID-vkCmdBlitImage-dstImage-02000");
// TODO: Need to validate image layouts, which will include layout validation for shared presentable images
VkFormat src_format = src_image_state->createInfo.format;
VkFormat dst_format = dst_image_state->createInfo.format;
VkImageType src_type = src_image_state->createInfo.imageType;
VkImageType dst_type = dst_image_state->createInfo.imageType;
if (VK_FILTER_LINEAR == filter) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT,
"vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02001",
"VUID-vkCmdBlitImage-filter-02001");
} else if (VK_FILTER_CUBIC_IMG == filter) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
"vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02002",
"VUID-vkCmdBlitImage-filter-02002");
}
if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-filter-00237",
"vkCmdBlitImage(): source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified.");
}
if ((VK_SAMPLE_COUNT_1_BIT != src_image_state->createInfo.samples) ||
(VK_SAMPLE_COUNT_1_BIT != dst_image_state->createInfo.samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00228",
"vkCmdBlitImage(): source or dest image has sample count other than VK_SAMPLE_COUNT_1_BIT.");
}
// Validate consistency for unsigned formats
if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has unsigned integer format, "
<< "the other one must also have unsigned integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00230", "%s.", ss.str().c_str());
}
// Validate consistency for signed formats
if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has signed integer format, "
<< "the other one must also have signed integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00229", "%s.", ss.str().c_str());
}
// Validate filter for Depth/Stencil formats
if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If the format of srcImage is a depth, stencil, or depth stencil "
<< "then filter must be VK_FILTER_NEAREST.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00232", "%s.", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is "
<< string_VkFormat(dst_format);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00231", "%s.", ss.str().c_str());
}
} // Depth or Stencil
// Do per-region checks
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-srcImageLayout-01398"
: "VUID-vkCmdBlitImage-srcImageLayout-00222";
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-dstImageLayout-01399"
: "VUID-vkCmdBlitImage-dstImageLayout-00227";
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageBlit rgn = pRegions[i];
bool hit_error = false;
skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdBlitImage()", invalid_src_layout_vuid,
"VUID-vkCmdBlitImage-srcImageLayout-00221", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdBlitImage()", invalid_dst_layout_vuid,
"VUID-vkCmdBlitImage-dstImageLayout-00226", &hit_error);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, "vkCmdBlitImage()", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, "vkCmdBlitImage()", "dstSubresource", i);
skip |= ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, "vkCmdBlitImage()",
"srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01705");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, "vkCmdBlitImage()",
"dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01706");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer,
rgn.srcSubresource.layerCount, i, "vkCmdBlitImage()", "srcSubresource",
"VUID-vkCmdBlitImage-srcSubresource-01707");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer,
rgn.dstSubresource.layerCount, i, "vkCmdBlitImage()", "dstSubresource",
"VUID-vkCmdBlitImage-dstSubresource-01708");
// Warn for zero-sized regions
if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) ||
(rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): pRegions[" << i << "].srcOffsets specify a zero-volume area.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) ||
(rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): pRegions[" << i << "].dstOffsets specify a zero-volume area.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
// Check that src/dst layercounts match
if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-layerCount-00239",
"vkCmdBlitImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i);
}
if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00238",
"vkCmdBlitImage(): aspectMask members for pRegion[%d] do not match.", i);
}
if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00241",
"vkCmdBlitImage(): region [%d] source aspectMask (0x%x) specifies aspects not present in source "
"image format %s.",
i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format));
}
if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00242",
"vkCmdBlitImage(): region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.",
i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format));
}
// Validate source image offsets
VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource));
if (VK_IMAGE_TYPE_1D == src_type) {
if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00245",
"vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values "
"of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) {
if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00247",
"vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"srcOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z);
}
}
bool oob = false;
if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) ||
(rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00243",
"vkCmdBlitImage(): region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).", i,
rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width);
}
if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) ||
(rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00244",
"vkCmdBlitImage(): region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).", i,
rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height);
}
if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) ||
(rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00246",
"vkCmdBlitImage(): region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).", i,
rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth);
}
if (oob) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00215",
"vkCmdBlitImage(): region [%d] source image blit region exceeds image dimensions.", i);
}
// Validate dest image offsets
VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource));
if (VK_IMAGE_TYPE_1D == dst_type) {
if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00250",
"vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of "
"(%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) {
if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00252",
"vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"dstOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z);
}
}
oob = false;
if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) ||
(rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00248",
"vkCmdBlitImage(): region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).", i,
rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width);
}
if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) ||
(rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00249",
"vkCmdBlitImage(): region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).", i,
rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height);
}
if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) ||
(rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00251",
"vkCmdBlitImage(): region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).", i,
rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth);
}
if (oob) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00216",
"vkCmdBlitImage(): region [%d] destination image blit region exceeds image dimensions.", i);
}
if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) {
if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) ||
(0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00240",
"vkCmdBlitImage(): region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a "
"layerCount other than 1.",
i);
}
}
} // per-region checks
} else {
assert(0);
}
return skip;
}
void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
// This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout
bool CoreChecks::ValidateCmdBufImageLayouts(const CMD_BUFFER_STATE *pCB, const ImageSubresPairLayoutMap &globalImageLayoutMap,
ImageSubresPairLayoutMap *overlayLayoutMap_arg) const {
if (disabled.image_layout_validation) return false;
bool skip = false;
ImageSubresPairLayoutMap &overlayLayoutMap = *overlayLayoutMap_arg;
// Iterate over the layout maps for each referenced image
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't check layouts of a dead image
const auto &subres_map = layout_map_entry.second;
ImageSubresourcePair isr_pair;
isr_pair.image = image;
isr_pair.hasSubresource = true;
// Validate the initial_uses for each subresource referenced
for (auto it_init = subres_map->BeginInitialUse(); !it_init.AtEnd(); ++it_init) {
isr_pair.subresource = (*it_init).subresource;
VkImageLayout initial_layout = (*it_init).layout;
VkImageLayout image_layout;
if (FindLayout(overlayLayoutMap, isr_pair, image_layout) || FindLayout(globalImageLayoutMap, isr_pair, image_layout)) {
if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (image_layout != initial_layout) {
// Need to look up the inital layout *state* to get a bit more information
const auto *initial_layout_state = subres_map->GetSubresourceInitialLayoutState(isr_pair.subresource);
assert(initial_layout_state); // There's no way we should have an initial layout without matching state...
bool matches = ImageLayoutMatches(initial_layout_state->aspect_mask, image_layout, initial_layout);
if (!matches) {
std::string formatted_label = FormatDebugLabel(" ", pCB->debug_label);
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout,
"Submitted command buffer expects %s (subresource: aspectMask 0x%X array layer %u, mip level %u) "
"to be in layout %s--instead, current layout is %s.%s",
report_data->FormatHandle(image).c_str(), isr_pair.subresource.aspectMask,
isr_pair.subresource.arrayLayer, isr_pair.subresource.mipLevel, string_VkImageLayout(initial_layout),
string_VkImageLayout(image_layout), formatted_label.c_str());
}
}
}
}
// Update all layout set operations (which will be a subset of the initial_layouts
for (auto it_set = subres_map->BeginSetLayout(); !it_set.AtEnd(); ++it_set) {
VkImageLayout layout = (*it_set).layout;
isr_pair.subresource = (*it_set).subresource;
SetLayout(overlayLayoutMap, isr_pair, layout);
}
}
return skip;
}
void CoreChecks::UpdateCmdBufImageLayouts(CMD_BUFFER_STATE *pCB) {
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto &subres_map = layout_map_entry.second;
ImageSubresourcePair isr_pair;
isr_pair.image = image;
isr_pair.hasSubresource = true;
// Update all layout set operations (which will be a subset of the initial_layouts
for (auto it_set = subres_map->BeginSetLayout(); !it_set.AtEnd(); ++it_set) {
VkImageLayout layout = (*it_set).layout;
isr_pair.subresource = (*it_set).subresource;
SetGlobalLayout(isr_pair, layout);
}
}
}
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
const VkImageLayout first_layout, const uint32_t attachment,
const VkAttachmentDescription2KHR &attachment_description) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
// Verify that initial loadOp on READ_ONLY attachments is not CLEAR
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo2KHR-pAttachments-02522",
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
} else if (!use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-pAttachments-00836",
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01566";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01567";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
return skip;
}
bool CoreChecks::ValidateLayouts(RenderPassCreateVersion rp_version, VkDevice device,
const VkRenderPassCreateInfo2KHR *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Render pass has an attachment with loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.");
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Render pass has an attachment with stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.");
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
// Check input attachments first, so we can detect first-use-as-input for VU #00349
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto attach_index = subpass.pInputAttachments[j].attachment;
if (attach_index == VK_ATTACHMENT_UNUSED) continue;
switch (subpass.pInputAttachments[j].layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// These are ideal.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal. TODO: reconsider this warning based on other constraints.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for input attachment reference %u in subpass %u is %s but must be "
"DEPTH_STENCIL_READ_ONLY, SHADER_READ_ONLY_OPTIMAL, or GENERAL.",
j, i, string_VkImageLayout(subpass.pInputAttachments[j].layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR:
if (device_extensions.vk_khr_maintenance2) {
break;
} else {
// Intentionally fall through to generic error message
}
// fall through
default:
// No other layouts are acceptable
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pInputAttachments[j].layout));
}
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
bool used_as_depth =
(subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attach_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attach_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attach_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: attachment %u is first used as an input attachment in subpass %u with loadOp=CLEAR.",
function_name, attach_index, attach_index);
}
}
attach_first_use[attach_index] = false;
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto attach_index = subpass.pColorAttachments[j].attachment;
if (attach_index == VK_ATTACHMENT_UNUSED) continue;
// TODO: Need a way to validate shared presentable images here, currently just allowing
// VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR
// as an acceptable layout, but need to make sure shared presentable images ONLY use that layout
switch (subpass.pColorAttachments[j].layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// This is ideal.
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
// TODO: See note above, just assuming that attachment is shared presentable and allowing this for now.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal; TODO: reconsider this warning based on other constraints?
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for color attachment reference %u in subpass %u is %s but should be "
"COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
j, i, string_VkImageLayout(subpass.pColorAttachments[j].layout));
break;
default:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pColorAttachments[j].layout));
}
if (subpass.pResolveAttachments && (subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) &&
(subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_UNDEFINED ||
subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for resolve attachment reference %u in subpass %u is %s but should be "
"COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
j, i, string_VkImageLayout(subpass.pResolveAttachments[j].layout));
}
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
}
attach_first_use[attach_index] = false;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
switch (subpass.pDepthStencilAttachment->layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
// These are ideal.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than
// doing a bunch of transitions.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"GENERAL layout for depth attachment may not give optimal performance.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for depth attachment reference in subpass %u is %s but must be a valid depth/stencil "
"layout or GENERAL.",
i, string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR:
if (device_extensions.vk_khr_maintenance2) {
break;
} else {
// Intentionally fall through to generic error message
}
// fall through
default:
// No other layouts are acceptable
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
}
auto attach_index = subpass.pDepthStencilAttachment->attachment;
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pDepthStencilAttachment->layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
}
attach_first_use[attach_index] = false;
}
}
return skip;
}
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, const VulkanTypedHandle &typed_handle,
const char *msgCode, char const *func_name, char const *usage_str) const {
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[typed_handle.type];
if (strict) {
correct_usage = ((actual & desired) == desired);
} else {
correct_usage = ((actual & desired) != 0);
}
if (!correct_usage) {
if (msgCode == kVUIDUndefined) {
// TODO: Fix callers with kVUIDUndefined to use correct validation checks.
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[typed_handle.type],
typed_handle.handle, kVUID_Core_MemTrack_InvalidUsageFlag,
"Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.",
report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str);
} else {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[typed_handle.type], typed_handle.handle,
msgCode, "Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.",
report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str);
}
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(image_state->createInfo.usage, desired, strict,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
char const *func_name, const char *linear_vuid, const char *optimal_vuid) const {
VkFormatProperties format_properties = GetPDFormatProperties(image_state->createInfo.format);
bool skip = false;
if (image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if ((format_properties.linearTilingFeatures & desired) != desired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), linear_vuid,
"In %s, invalid linearTilingFeatures (0x%08X) for format %u used by %s.", func_name,
format_properties.linearTilingFeatures, image_state->createInfo.format,
report_data->FormatHandle(image_state->image).c_str());
}
} else if (image_state->createInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
if ((format_properties.optimalTilingFeatures & desired) != desired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), optimal_vuid,
"In %s, invalid optimalTilingFeatures (0x%08X) for format %u used by %s.", func_name,
format_properties.optimalTilingFeatures, image_state->createInfo.format,
report_data->FormatHandle(image_state->image).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceLayers(const CMD_BUFFER_STATE *cb_node, const VkImageSubresourceLayers *subresource_layers,
char const *func_name, char const *member, uint32_t i) const {
bool skip = false;
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-layerCount-01700",
"In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT
if (subresource_layers->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00168",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member);
}
// if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL
if ((subresource_layers->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(subresource_layers->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00167",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, i, member);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo,
const VkPhysicalDeviceLimits *device_limits) {
bool skip = false;
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
// Range must be greater than 0
if (range <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00928",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be greater than 0.",
range);
}
// Range must be a multiple of the element size of format
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
if (SafeModulo(range, format_size) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00929",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format "
"(%" PRIu32 ").",
range, format_size);
}
// Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(range, format_size) > device_limits->maxTexelBufferElements) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00930",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, format_size, device_limits->maxTexelBufferElements);
}
// The sum of range and offset must be less than or equal to the size of buffer
if (range + pCreateInfo->offset > buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00931",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64
") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").",
range, pCreateInfo->offset, buffer_state->createInfo.size);
}
}
return skip;
}
bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) {
bool skip = false;
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00933",
"If buffer was created with `usage` containing VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format must "
"be supported for uniform texel buffers");
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00934",
"If buffer was created with `usage` containing VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format must "
"be supported for storage texel buffers");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
bool skip = false;
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && (!enabled_features.core.sparseBinding)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00915",
"vkCreateBuffer(): the sparseBinding device feature is disabled: Buffers cannot be created with the "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT set.");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) && (!enabled_features.core.sparseResidencyBuffer)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00916",
"vkCreateBuffer(): the sparseResidencyBuffer device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT set.");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT) && (!enabled_features.core.sparseResidencyAliased)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00917",
"vkCreateBuffer(): the sparseResidencyAliased device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_SPARSE_ALIASED_BIT set.");
}
auto chained_devaddr_struct = lvl_find_in_chain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext);
if (chained_devaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) &&
chained_devaddr_struct->deviceAddress != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-deviceAddress-02604",
"vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress "
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT.");
}
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) &&
!enabled_features.buffer_address.bufferDeviceAddressCaptureReplay) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-02605",
"vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT set.");
}
if ((pCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) &&
!enabled_features.buffer_address.bufferDeviceAddress) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-usage-02606",
"vkCreateBuffer(): the bufferDeviceAddress device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT set.");
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer",
"pCreateInfo->pQueueFamilyIndices", "VUID-VkBufferCreateInfo-sharingMode-01419",
"VUID-VkBufferCreateInfo-sharingMode-01419", false);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
bool skip = false;
BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935");
// In order to create a valid buffer view, the buffer must have been created with at least one of the following flags:
// UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
skip |= ValidateBufferUsageFlags(buffer_state,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
"VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()",
"VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
// Buffer view offset must be less than the size of buffer
if (pCreateInfo->offset >= buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00925",
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be less than the size of the buffer (%" PRIuLEAST64 ").",
pCreateInfo->offset, buffer_state->createInfo.size);
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
// Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment
if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0 &&
!enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-02749",
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").",
pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment);
}
if (enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
VkDeviceSize elementSize = FormatElementSize(pCreateInfo->format);
if ((elementSize % 3) == 0) {
elementSize /= 3;
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
VkDeviceSize alignmentRequirement =
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment) {
alignmentRequirement = std::min(alignmentRequirement, elementSize);
}
if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-02750",
"If buffer was created with usage containing VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment);
}
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
VkDeviceSize alignmentRequirement =
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment) {
alignmentRequirement = std::min(alignmentRequirement, elementSize);
}
if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-02751",
"If buffer was created with usage containing VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment);
}
}
}
skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits);
skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo);
}
return skip;
}
// For the given format verify that the aspect masks make sense
bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name,
const char *vuid) const {
bool skip = false;
VkDebugReportObjectTypeEXT objectType = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT;
if (image != VK_NULL_HANDLE) {
objectType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
}
if (FormatIsColor(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
}
} else if (FormatIsDepthAndStencil(format)) {
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
} else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
}
} else if (FormatIsDepthOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
}
} else if (FormatIsStencilOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name);
}
} else if (FormatIsMultiplane(format)) {
VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
if (3 == FormatPlaneCount(format)) {
valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT;
}
if ((aspect_mask & valid_flags) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Multi-plane image formats may have only VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs "
"set, where n = [0, 1, 2].",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name, const char *image_layer_count_var_name,
const uint64_t image_handle, SubresourceRangeErrorCodes errorCodes) const {
bool skip = false;
// Validate mip levels
if (subresourceRange.baseMipLevel >= image_mip_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.base_mip_err,
"%s: %s.baseMipLevel (= %" PRIu32
") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count);
}
if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) {
if (subresourceRange.levelCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.mip_count_err, "%s: %s.levelCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount};
if (necessary_mip_count > image_mip_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.mip_count_err,
"%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount,
necessary_mip_count, image_mip_count);
}
}
}
// Validate array layers
if (subresourceRange.baseArrayLayer >= image_layer_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.base_layer_err,
"%s: %s.baseArrayLayer (= %" PRIu32
") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count);
}
if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) {
if (subresourceRange.layerCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.layer_count_err, "%s: %s.layerCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_layer_count =
uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount};
if (necessary_layer_count > image_layer_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.layer_count_err,
"%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount,
necessary_layer_count, image_layer_count_var_name, image_layer_count);
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type,
const VkImageSubresourceRange &subresourceRange) {
bool is_khr_maintenance1 = device_extensions.vk_khr_maintenance1;
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR);
bool is_3D_to_2D_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type;
const auto image_layer_count = is_3D_to_2D_map ? image_state->createInfo.extent.depth : image_state->createInfo.arrayLayers;
const auto image_layer_count_var_name = is_3D_to_2D_map ? "extent.depth" : "arrayLayers";
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718";
subresourceRangeErrorCodes.base_layer_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-image-02724"
: "VUID-VkImageViewCreateInfo-image-01482")
: "VUID-VkImageViewCreateInfo-subresourceRange-01480";
subresourceRangeErrorCodes.layer_count_err = is_khr_maintenance1
? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-subresourceRange-02725"
: "VUID-VkImageViewCreateInfo-subresourceRange-01483")
: "VUID-VkImageViewCreateInfo-subresourceRange-01719";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange,
"vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name,
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearColorImage", param_name, "arrayLayers", HandleToUint64(image_state->image),
subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearDepthStencilImage", param_name, "arrayLayers",
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateImageBarrierSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724";
subresourceRangeErrorCodes.base_layer_err = "VUID-VkImageMemoryBarrier-subresourceRange-01488";
subresourceRangeErrorCodes.layer_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01725";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
cmd_name, param_name, "arrayLayers", HandleToUint64(image_state->image),
subresourceRangeErrorCodes);
}
bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
bool skip = false;
IMAGE_STATE *image_state = GetImageState(pCreateInfo->image);
if (image_state) {
skip |=
ValidateImageUsageFlags(image_state,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV | VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT,
false, kVUIDUndefined, "vkCreateImageView()",
"VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|"
"SHADING_RATE_IMAGE|FRAGMENT_DENSITY_MAP]_BIT");
// If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020");
// Checks imported from image layer
skip |= ValidateCreateImageViewSubresourceRange(
image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY,
pCreateInfo->subresourceRange);
VkImageCreateFlags image_flags = image_state->createInfo.flags;
VkFormat image_format = image_state->createInfo.format;
VkImageUsageFlags image_usage = image_state->createInfo.usage;
VkImageTiling image_tiling = image_state->createInfo.tiling;
VkFormat view_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
VkImageType image_type = image_state->createInfo.imageType;
VkImageViewType view_type = pCreateInfo->viewType;
// If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match
auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(pCreateInfo->pNext);
if (chained_ivuci_struct) {
image_usage = chained_ivuci_struct->usage;
}
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ
if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) {
if (FormatIsMultiplane(image_format)) {
VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, aspect_mask);
if (view_format != compat_format) {
// View format must match the multiplane compatible format
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not compatible with plane " << GetPlaneIndex(aspect_mask) << " of underlying image format "
<< string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << ".";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01586", "%s",
ss.str().c_str());
}
} else {
if ((!device_extensions.vk_khr_maintenance2 ||
!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR))) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) {
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not in the same format compatibility class as "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
<< "can support ImageViews with differing formats but they must be in the same compatibility class.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01018", "%s",
ss.str().c_str());
}
}
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
if (image_format != view_format) {
std::stringstream ss;
ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01019", "%s", ss.str().c_str());
}
}
// Validate correct image aspect bits for desired formats and format consistency
skip |= ValidateImageAspectMask(image_state->image, image_format, aspect_mask, "vkCreateImageView()");
switch (image_type) {
case VK_IMAGE_TYPE_1D:
if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
break;
case VK_IMAGE_TYPE_2D:
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) &&
!(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01003",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
case VK_IMAGE_TYPE_3D:
if (device_extensions.vk_khr_maintenance1) {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-01005",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
} else {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
default:
break;
}
// External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageViewANDROID(pCreateInfo);
}
VkFormatProperties format_properties = GetPDFormatProperties(view_format);
VkFormatFeatureFlags tiling_features = (image_tiling & VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
if (tiling_features == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-None-02273",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this "
"physical device.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02274",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02275",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02276",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-usage-02277",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
}
if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-02086",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, viewType must be "
"VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_format != VK_FORMAT_R8_UINT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(pCreateInfo->image), "VUID-VkImageViewCreateInfo-image-02087",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT.");
}
}
}
return skip;
}
static inline bool ValidateCmdCopyBufferBounds(const debug_report_data *report_data, const BUFFER_STATE *src_buffer_state,
const BUFFER_STATE *dst_buffer_state, uint32_t regionCount,
const VkBufferCopy *pRegions) {
bool skip = false;
VkDeviceSize src_buffer_size = src_buffer_state->createInfo.size;
VkDeviceSize dst_buffer_size = dst_buffer_state->createInfo.size;
VkDeviceSize src_min = UINT64_MAX;
VkDeviceSize src_max = 0;
VkDeviceSize dst_min = UINT64_MAX;
VkDeviceSize dst_max = 0;
for (uint32_t i = 0; i < regionCount; i++) {
src_min = std::min(src_min, pRegions[i].srcOffset);
src_max = std::max(src_max, (pRegions[i].srcOffset + pRegions[i].size));
dst_min = std::min(dst_min, pRegions[i].dstOffset);
dst_max = std::max(dst_max, (pRegions[i].dstOffset + pRegions[i].size));
// The srcOffset member of each element of pRegions must be less than the size of srcBuffer
if (pRegions[i].srcOffset >= src_buffer_size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(src_buffer_state->buffer), "VUID-vkCmdCopyBuffer-srcOffset-00113",
"vkCmdCopyBuffer(): pRegions[%d].srcOffset (%" PRIuLEAST64
") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
i, pRegions[i].srcOffset, i, pRegions[i].size);
}
// The dstOffset member of each element of pRegions must be less than the size of dstBuffer
if (pRegions[i].dstOffset >= dst_buffer_size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(dst_buffer_state->buffer), "VUID-vkCmdCopyBuffer-dstOffset-00114",
"vkCmdCopyBuffer(): pRegions[%d].dstOffset (%" PRIuLEAST64
") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
i, pRegions[i].dstOffset, i, pRegions[i].size);
}
// The size member of each element of pRegions must be less than or equal to the size of srcBuffer minus srcOffset
if (pRegions[i].size > (src_buffer_size - pRegions[i].srcOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(src_buffer_state->buffer), "VUID-vkCmdCopyBuffer-size-00115",
"vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64
") is greater than the source buffer size (%" PRIuLEAST64
") minus pRegions[%d].srcOffset (%" PRIuLEAST64 ").",
i, pRegions[i].size, src_buffer_size, i, pRegions[i].srcOffset);
}
// The size member of each element of pRegions must be less than or equal to the size of dstBuffer minus dstOffset
if (pRegions[i].size > (dst_buffer_size - pRegions[i].dstOffset)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(dst_buffer_state->buffer), "VUID-vkCmdCopyBuffer-size-00116",
"vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64
") is greater than the destination buffer size (%" PRIuLEAST64
") minus pRegions[%d].dstOffset (%" PRIuLEAST64 ").",
i, pRegions[i].size, dst_buffer_size, i, pRegions[i].dstOffset);
}
}
// The union of the source regions, and the union of the destination regions, must not overlap in memory
if (src_buffer_state->buffer == dst_buffer_state->buffer) {
if (((src_min > dst_min) && (src_min < dst_max)) || ((src_max > dst_min) && (src_max < dst_max))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(src_buffer_state->buffer), "VUID-vkCmdCopyBuffer-pRegions-00117",
"vkCmdCopyBuffer(): Detected overlap between source and dest regions in memory.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |=
ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdCopyBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skip |= InsideRenderPass(cb_node, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-renderpass");
skip |= ValidateCmdCopyBufferBounds(report_data, src_buffer_state, dst_buffer_state, regionCount, pRegions);
return skip;
}
bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) {
bool skip = false;
auto buffer_state = GetBufferState(buffer);
if (!buffer_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer),
kVUID_Core_DrawState_DoubleDestroy, "Cannot free %s that has not been allocated.",
report_data->FormatHandle(buffer).c_str());
} else {
if (buffer_state->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer), "VUID-vkDestroyBuffer-buffer-00922",
"Cannot free %s that is in use by a command buffer.", report_data->FormatHandle(buffer).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView);
const VulkanTypedHandle obj_struct(imageView, kVulkanObjectTypeImageView);
bool skip = false;
if (image_view_state) {
skip |=
ValidateObjectNotInUse(image_view_state, obj_struct, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
auto buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
skip |= ValidateIdleBuffer(buffer);
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) {
auto buffer_view_state = GetBufferViewState(bufferView);
const VulkanTypedHandle obj_struct(bufferView, kVulkanObjectTypeBufferView);
bool skip = false;
if (buffer_view_state) {
skip |= ValidateObjectNotInUse(buffer_view_state, obj_struct, "vkDestroyBufferView",
"VUID-vkDestroyBufferView-bufferView-00936");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
auto cb_node = GetCBState(commandBuffer);
auto buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdFillBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdFillBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029",
"vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= InsideRenderPass(cb_node, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-renderpass");
return skip;
}
bool CoreChecks::ValidateBufferImageCopyData(uint32_t regionCount, const VkBufferImageCopy *pRegions, IMAGE_STATE *image_state,
const char *function) {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00199",
"%s(): pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 "
"and 1, respectively.",
function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height);
}
}
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) {
if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00201",
"%s(): pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these "
"must be 0 and 1, respectively.",
function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth);
}
}
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-baseArrayLayer-00213",
"%s(): pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. "
"For 3D images these must be 0 and 1, respectively.",
function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount);
}
}
// If the the calling command's VkImage parameter's format is not a depth/stencil format,
// then bufferOffset must be a multiple of the calling command's VkImage parameter's element size
uint32_t element_size = FormatElementSize(image_state->createInfo.format, pRegions[i].imageSubresource.aspectMask);
if (!FormatIsDepthAndStencil(image_state->createInfo.format) && SafeModulo(pRegions[i].bufferOffset, element_size) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00193",
"%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple of this format's texel size (%" PRIu32 ").",
function, i, pRegions[i].bufferOffset, element_size);
}
// BufferOffset must be a multiple of 4
if (SafeModulo(pRegions[i].bufferOffset, 4) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00194",
"%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64 " must be a multiple of 4.", function, i,
pRegions[i].bufferOffset);
}
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00195",
"%s(): pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).",
function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width);
}
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00196",
"%s(): pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).",
function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height);
}
// Calculate adjusted image extent, accounting for multiplane image factors
VkExtent3D adusted_image_extent = GetImageSubresourceExtent(image_state, &pRegions[i].imageSubresource);
// imageOffset.x and (imageExtent.width + imageOffset.x) must both be >= 0 and <= image subresource width
if ((pRegions[i].imageOffset.x < 0) || (pRegions[i].imageOffset.x > static_cast<int32_t>(adusted_image_extent.width)) ||
((pRegions[i].imageOffset.x + pRegions[i].imageExtent.width) > static_cast<int32_t>(adusted_image_extent.width))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00197",
"%s(): Both pRegion[%d] imageoffset.x (%d) and (imageExtent.width + imageOffset.x) (%d) must be >= "
"zero or <= image subresource width (%d).",
function, i, pRegions[i].imageOffset.x, (pRegions[i].imageOffset.x + pRegions[i].imageExtent.width),
adusted_image_extent.width);
}
// imageOffset.y and (imageExtent.height + imageOffset.y) must both be >= 0 and <= image subresource height
if ((pRegions[i].imageOffset.y < 0) || (pRegions[i].imageOffset.y > static_cast<int32_t>(adusted_image_extent.height)) ||
((pRegions[i].imageOffset.y + pRegions[i].imageExtent.height) > static_cast<int32_t>(adusted_image_extent.height))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00198",
"%s(): Both pRegion[%d] imageoffset.y (%d) and (imageExtent.height + imageOffset.y) (%d) must be >= "
"zero or <= image subresource height (%d).",
function, i, pRegions[i].imageOffset.y, (pRegions[i].imageOffset.y + pRegions[i].imageExtent.height),
adusted_image_extent.height);
}
// imageOffset.z and (imageExtent.depth + imageOffset.z) must both be >= 0 and <= image subresource depth
if ((pRegions[i].imageOffset.z < 0) || (pRegions[i].imageOffset.z > static_cast<int32_t>(adusted_image_extent.depth)) ||
((pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth) > static_cast<int32_t>(adusted_image_extent.depth))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00200",
"%s(): Both pRegion[%d] imageoffset.z (%d) and (imageExtent.depth + imageOffset.z) (%d) must be >= "
"zero or <= image subresource depth (%d).",
function, i, pRegions[i].imageOffset.z, (pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth),
adusted_image_extent.depth);
}
// subresource aspectMask must have exactly 1 bit set
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(pRegions[i].imageSubresource.aspectMask);
if (aspect_mask_bits.count() != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00212",
"%s: aspectMasks for imageSubresource in each region must have only a single bit set.", function);
}
// image subresource aspect bit must match format
if (!VerifyAspectsPresent(pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00211",
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.",
function, i, pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format);
}
// Checks that apply only to compressed images
if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(image_state->createInfo.format);
// BufferRowLength must be a multiple of block width
if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00203",
"%s(): pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d)..",
function, i, pRegions[i].bufferRowLength, block_size.width);
}
// BufferRowHeight must be a multiple of block height
if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00204",
"%s(): pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d)..",
function, i, pRegions[i].bufferImageHeight, block_size.height);
}
// image offsets must be multiples of block dimensions
if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) ||
(SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) ||
(SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00205",
"%s(): pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel "
"width & height (%d, %d)..",
function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width, block_size.height);
}
// bufferOffset must be a multiple of block size (linear bytes)
uint32_t block_size_in_bytes = FormatElementSize(image_state->createInfo.format);
if (SafeModulo(pRegions[i].bufferOffset, block_size_in_bytes) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00206",
"%s(): pRegion[%d] bufferOffset (0x%" PRIxLEAST64
") must be a multiple of the compressed image's texel block size (%" PRIu32 ")..",
function, i, pRegions[i].bufferOffset, block_size_in_bytes);
}
// imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width
VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) &&
(pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00207",
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width "
"(%d), or when added to offset.x (%d) must equal the image subresource width (%d)..",
function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x,
mip_extent.width);
}
// imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) &&
(pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00208",
"%s(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height "
"(%d), or when added to offset.y (%d) must equal the image subresource height (%d)..",
function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y,
mip_extent.height);
}
// imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) &&
(pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00209",
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth "
"(%d), or when added to offset.z (%d) must equal the image subresource depth (%d)..",
function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z,
mip_extent.depth);
}
}
}
return skip;
}
static bool ValidateImageBounds(const debug_report_data *report_data, const IMAGE_STATE *image_state, const uint32_t regionCount,
const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) {
bool skip = false;
const VkImageCreateInfo *image_info = &(image_state->createInfo);
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D extent = pRegions[i].imageExtent;
VkOffset3D offset = pRegions[i].imageOffset;
if (IsExtentSizeZero(&extent)) // Warn on zero area subresource
{
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
kVUID_Core_Image_ZeroAreaSubregion, "%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area",
func_name, i, extent.width, extent.height, extent.depth);
}
VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
// If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1)
if (FormatIsCompressed(image_info->format)) {
auto block_extent = FormatTexelBlockExtent(image_info->format);
if (image_extent.width % block_extent.width) {
image_extent.width += (block_extent.width - (image_extent.width % block_extent.width));
}
if (image_extent.height % block_extent.height) {
image_extent.height += (block_extent.height - (image_extent.height % block_extent.height));
}
if (image_extent.depth % block_extent.depth) {
image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth));
}
}
if (0 != ExceedsBounds(&offset, &extent, &image_extent)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
msg_code, "%s: pRegion[%d] exceeds image bounds..", func_name, i);
}
}
return skip;
}
static inline bool ValidateBufferBounds(const debug_report_data *report_data, IMAGE_STATE *image_state, BUFFER_STATE *buff_state,
uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name,
const char *msg_code) {
bool skip = false;
VkDeviceSize buffer_size = buff_state->createInfo.size;
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D copy_extent = pRegions[i].imageExtent;
VkDeviceSize buffer_width = (0 == pRegions[i].bufferRowLength ? copy_extent.width : pRegions[i].bufferRowLength);
VkDeviceSize buffer_height = (0 == pRegions[i].bufferImageHeight ? copy_extent.height : pRegions[i].bufferImageHeight);
VkDeviceSize unit_size = FormatElementSize(image_state->createInfo.format,
pRegions[i].imageSubresource.aspectMask); // size (bytes) of texel or block
if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
// Switch to texel block units, rounding up for any partially-used blocks
auto block_dim = FormatTexelBlockExtent(image_state->createInfo.format);
buffer_width = (buffer_width + block_dim.width - 1) / block_dim.width;
buffer_height = (buffer_height + block_dim.height - 1) / block_dim.height;
copy_extent.width = (copy_extent.width + block_dim.width - 1) / block_dim.width;
copy_extent.height = (copy_extent.height + block_dim.height - 1) / block_dim.height;
copy_extent.depth = (copy_extent.depth + block_dim.depth - 1) / block_dim.depth;
}
// Either depth or layerCount may be greater than 1 (not both). This is the number of 'slices' to copy
uint32_t z_copies = std::max(copy_extent.depth, pRegions[i].imageSubresource.layerCount);
if (IsExtentSizeZero(©_extent) || (0 == z_copies)) {
// TODO: Issue warning here? Already warned in ValidateImageBounds()...
} else {
// Calculate buffer offset of final copied byte, + 1.
VkDeviceSize max_buffer_offset = (z_copies - 1) * buffer_height * buffer_width; // offset to slice
max_buffer_offset += ((copy_extent.height - 1) * buffer_width) + copy_extent.width; // add row,col
max_buffer_offset *= unit_size; // convert to bytes
max_buffer_offset += pRegions[i].bufferOffset; // add initial offset (bytes)
if (buffer_size < max_buffer_offset) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes..", func_name, i, buffer_size);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
const auto cb_node = GetCBState(commandBuffer);
const auto src_image_state = GetImageState(srcImage);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = ValidateBufferImageCopyData(regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer");
// Validate command buffer state
skip |= ValidateCmd(cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
// Command pool must support graphics, compute, or transfer operations
const auto pPool = GetCommandPoolState(cb_node->createInfo.commandPool);
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool",
"Cannot call vkCmdCopyImageToBuffer() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(report_data, src_image_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00182");
skip |= ValidateBufferBounds(report_data, src_image_state, dst_buffer_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00183");
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage",
"VUID-vkCmdCopyImageToBuffer-srcImage-00188");
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-00187");
skip |=
ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-dstBuffer-00192");
// Validate that SRC image & DST buffer have correct usage flags set
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyImageToBuffer-srcImage-00186", "vkCmdCopyImageToBuffer()",
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyImageToBuffer-dstBuffer-00191", "vkCmdCopyImageToBuffer()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-srcImage-01998",
"VUID-vkCmdCopyImageToBuffer-srcImage-01998");
}
skip |= InsideRenderPass(cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass");
bool hit_error = false;
const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyImageToBuffer()",
"imageSubresource", i);
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImageToBuffer()", src_invalid_layout_vuid,
"VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(
cb_node, src_image_state, &pRegions[i], i, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-imageOffset-01794");
skip |=
ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyImageToBuffer()",
"imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyImageToBuffer()",
"imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].imageSubresource, srcImageLayout);
}
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_image_state = GetImageState(dstImage);
bool skip = ValidateBufferImageCopyData(regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage");
// Validate command buffer state
skip |= ValidateCmd(cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
// Command pool must support graphics, compute, or transfer operations
const auto pPool = GetCommandPoolState(cb_node->createInfo.commandPool);
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool",
"Cannot call vkCmdCopyBufferToImage() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(report_data, dst_image_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00172");
skip |= ValidateBufferBounds(report_data, dst_image_state, src_buffer_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00171");
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage",
"VUID-vkCmdCopyBufferToImage-dstImage-00179");
skip |=
ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-srcBuffer-00176");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-00178");
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyBufferToImage-srcBuffer-00174", "vkCmdCopyBufferToImage()",
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyBufferToImage-dstImage-00177", "vkCmdCopyBufferToImage()",
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
if (api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-dstImage-01997",
"VUID-vkCmdCopyBufferToImage-dstImage-01997");
}
skip |= InsideRenderPass(cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass");
bool hit_error = false;
const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyBufferToImage()",
"imageSubresource", i);
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyBufferToImage()", dst_invalid_layout_vuid,
"VUID-vkCmdCopyBufferToImage-dstImageLayout-00180", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(
cb_node, dst_image_state, &pRegions[i], i, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-imageOffset-01793");
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyBufferToImage()",
"imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01701");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyBufferToImage()",
"imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01702");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].imageSubresource, dstImageLayout);
}
}
bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) {
bool skip = false;
const VkImageAspectFlags sub_aspect = pSubresource->aspectMask;
// The aspectMask member of pSubresource must only have a single bit set
const int num_bits = sizeof(sub_aspect) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(sub_aspect);
if (aspect_mask_bits.count() != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-aspectMask-00997",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set.");
}
const IMAGE_STATE *image_entry = GetImageState(image);
if (!image_entry) {
return skip;
}
// image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-image-00996",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR.");
}
// mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-mipLevel-01716",
"vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel,
image_entry->createInfo.mipLevels);
}
// arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-arrayLayer-01717",
"vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.",
pSubresource->arrayLayer, image_entry->createInfo.arrayLayers);
}
// subresource's aspect must be compatible with image's format.
const VkFormat img_format = image_entry->createInfo.format;
if (FormatIsMultiplane(img_format)) {
VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version
if (FormatPlaneCount(img_format) > 2u) {
allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version
}
if (sub_aspect != (sub_aspect & allowed_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), vuid,
"vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32
") must be a single-plane specifier flag.",
sub_aspect);
}
} else if (FormatIsColor(img_format)) {
if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkImageSubresource-aspectMask-parameter",
"vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be VK_IMAGE_ASPECT_COLOR.");
}
} else if (FormatIsDepthOrStencil(img_format)) {
if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-VkImageSubresource-aspectMask-parameter",
"vkGetImageSubresourceLayout(): For depth/stencil formats, VkImageSubresource.aspectMask must be "
"either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT.");
}
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageSubresourceLayoutANDROID(image);
}
return skip;
}
| 1 | 12,054 | We're capturing cb_state non-const, but the function is const so, safe enough, but after going to reader/writer locks we're all going to have to key a careful eye on anything in this pattern. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -194,6 +194,12 @@ func (c *AwsEndpointDiscoveryTest) TestDiscoveryIdentifiersRequiredRequest(input
output = &TestDiscoveryIdentifiersRequiredOutput{}
req = c.newRequest(op, input, output)
+
+ // if a custom endpoint is provided for the request,
+ // we skip endpoint discovery workflow
+ if req.Config.Endpoint != nil {
+ return
+ }
de := discovererDescribeEndpoints{
Required: true,
EndpointCache: c.endpointCache, | 1 | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package awsendpointdiscoverytest
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/crr"
"github.com/aws/aws-sdk-go/aws/request"
)
const opDescribeEndpoints = "DescribeEndpoints"
// DescribeEndpointsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeEndpoints operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeEndpoints for more information on using the DescribeEndpoints
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeEndpointsRequest method.
// req, resp := client.DescribeEndpointsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
func (c *AwsEndpointDiscoveryTest) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) {
op := &request.Operation{
Name: opDescribeEndpoints,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeEndpointsInput{}
}
output = &DescribeEndpointsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeEndpoints API operation for AwsEndpointDiscoveryTest.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AwsEndpointDiscoveryTest's
// API operation DescribeEndpoints for usage and error information.
func (c *AwsEndpointDiscoveryTest) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) {
req, out := c.DescribeEndpointsRequest(input)
return out, req.Send()
}
// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeEndpoints for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AwsEndpointDiscoveryTest) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) {
req, out := c.DescribeEndpointsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
type discovererDescribeEndpoints struct {
Client *AwsEndpointDiscoveryTest
Required bool
EndpointCache *crr.EndpointCache
Params map[string]*string
Key string
req *request.Request
}
func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) {
input := &DescribeEndpointsInput{
Operation: d.Params["op"],
}
resp, err := d.Client.DescribeEndpoints(input)
if err != nil {
return crr.Endpoint{}, err
}
endpoint := crr.Endpoint{
Key: d.Key,
}
for _, e := range resp.Endpoints {
if e.Address == nil {
continue
}
address := *e.Address
var scheme string
if idx := strings.Index(address, "://"); idx != -1 {
scheme = address[:idx]
}
if len(scheme) == 0 {
address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address)
}
cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes)
u, err := url.Parse(address)
if err != nil {
continue
}
addr := crr.WeightedAddress{
URL: u,
Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute),
}
endpoint.Add(addr)
}
d.EndpointCache.Add(endpoint)
return endpoint, nil
}
func (d *discovererDescribeEndpoints) Handler(r *request.Request) {
endpointKey := crr.BuildEndpointKey(d.Params)
d.Key = endpointKey
d.req = r
endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required)
if err != nil {
r.Error = err
return
}
if endpoint.URL != nil && len(endpoint.URL.String()) > 0 {
r.HTTPRequest.URL = endpoint.URL
}
}
const opTestDiscoveryIdentifiersRequired = "TestDiscoveryIdentifiersRequired"
// TestDiscoveryIdentifiersRequiredRequest generates a "aws/request.Request" representing the
// client's request for the TestDiscoveryIdentifiersRequired operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TestDiscoveryIdentifiersRequired for more information on using the TestDiscoveryIdentifiersRequired
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TestDiscoveryIdentifiersRequiredRequest method.
// req, resp := client.TestDiscoveryIdentifiersRequiredRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
func (c *AwsEndpointDiscoveryTest) TestDiscoveryIdentifiersRequiredRequest(input *TestDiscoveryIdentifiersRequiredInput) (req *request.Request, output *TestDiscoveryIdentifiersRequiredOutput) {
op := &request.Operation{
Name: opTestDiscoveryIdentifiersRequired,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TestDiscoveryIdentifiersRequiredInput{}
}
output = &TestDiscoveryIdentifiersRequiredOutput{}
req = c.newRequest(op, input, output)
de := discovererDescribeEndpoints{
Required: true,
EndpointCache: c.endpointCache,
Params: map[string]*string{
"op": aws.String(req.Operation.Name),
"Sdk": input.Sdk,
},
Client: c,
}
for k, v := range de.Params {
if v == nil {
delete(de.Params, k)
}
}
req.Handlers.Build.PushFrontNamed(request.NamedHandler{
Name: "crr.endpointdiscovery",
Fn: de.Handler,
})
return
}
// TestDiscoveryIdentifiersRequired API operation for AwsEndpointDiscoveryTest.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AwsEndpointDiscoveryTest's
// API operation TestDiscoveryIdentifiersRequired for usage and error information.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryIdentifiersRequired(input *TestDiscoveryIdentifiersRequiredInput) (*TestDiscoveryIdentifiersRequiredOutput, error) {
req, out := c.TestDiscoveryIdentifiersRequiredRequest(input)
return out, req.Send()
}
// TestDiscoveryIdentifiersRequiredWithContext is the same as TestDiscoveryIdentifiersRequired with the addition of
// the ability to pass a context and additional request options.
//
// See TestDiscoveryIdentifiersRequired for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryIdentifiersRequiredWithContext(ctx aws.Context, input *TestDiscoveryIdentifiersRequiredInput, opts ...request.Option) (*TestDiscoveryIdentifiersRequiredOutput, error) {
req, out := c.TestDiscoveryIdentifiersRequiredRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTestDiscoveryOptional = "TestDiscoveryOptional"
// TestDiscoveryOptionalRequest generates a "aws/request.Request" representing the
// client's request for the TestDiscoveryOptional operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TestDiscoveryOptional for more information on using the TestDiscoveryOptional
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TestDiscoveryOptionalRequest method.
// req, resp := client.TestDiscoveryOptionalRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
func (c *AwsEndpointDiscoveryTest) TestDiscoveryOptionalRequest(input *TestDiscoveryOptionalInput) (req *request.Request, output *TestDiscoveryOptionalOutput) {
op := &request.Operation{
Name: opTestDiscoveryOptional,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TestDiscoveryOptionalInput{}
}
output = &TestDiscoveryOptionalOutput{}
req = c.newRequest(op, input, output)
if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
de := discovererDescribeEndpoints{
Required: false,
EndpointCache: c.endpointCache,
Params: map[string]*string{
"op": aws.String(req.Operation.Name),
},
Client: c,
}
for k, v := range de.Params {
if v == nil {
delete(de.Params, k)
}
}
req.Handlers.Build.PushFrontNamed(request.NamedHandler{
Name: "crr.endpointdiscovery",
Fn: de.Handler,
})
}
return
}
// TestDiscoveryOptional API operation for AwsEndpointDiscoveryTest.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AwsEndpointDiscoveryTest's
// API operation TestDiscoveryOptional for usage and error information.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryOptional(input *TestDiscoveryOptionalInput) (*TestDiscoveryOptionalOutput, error) {
req, out := c.TestDiscoveryOptionalRequest(input)
return out, req.Send()
}
// TestDiscoveryOptionalWithContext is the same as TestDiscoveryOptional with the addition of
// the ability to pass a context and additional request options.
//
// See TestDiscoveryOptional for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryOptionalWithContext(ctx aws.Context, input *TestDiscoveryOptionalInput, opts ...request.Option) (*TestDiscoveryOptionalOutput, error) {
req, out := c.TestDiscoveryOptionalRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTestDiscoveryRequired = "TestDiscoveryRequired"
// TestDiscoveryRequiredRequest generates a "aws/request.Request" representing the
// client's request for the TestDiscoveryRequired operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TestDiscoveryRequired for more information on using the TestDiscoveryRequired
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TestDiscoveryRequiredRequest method.
// req, resp := client.TestDiscoveryRequiredRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
func (c *AwsEndpointDiscoveryTest) TestDiscoveryRequiredRequest(input *TestDiscoveryRequiredInput) (req *request.Request, output *TestDiscoveryRequiredOutput) {
op := &request.Operation{
Name: opTestDiscoveryRequired,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TestDiscoveryRequiredInput{}
}
output = &TestDiscoveryRequiredOutput{}
req = c.newRequest(op, input, output)
if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
de := discovererDescribeEndpoints{
Required: false,
EndpointCache: c.endpointCache,
Params: map[string]*string{
"op": aws.String(req.Operation.Name),
},
Client: c,
}
for k, v := range de.Params {
if v == nil {
delete(de.Params, k)
}
}
req.Handlers.Build.PushFrontNamed(request.NamedHandler{
Name: "crr.endpointdiscovery",
Fn: de.Handler,
})
}
return
}
// TestDiscoveryRequired API operation for AwsEndpointDiscoveryTest.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AwsEndpointDiscoveryTest's
// API operation TestDiscoveryRequired for usage and error information.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryRequired(input *TestDiscoveryRequiredInput) (*TestDiscoveryRequiredOutput, error) {
req, out := c.TestDiscoveryRequiredRequest(input)
return out, req.Send()
}
// TestDiscoveryRequiredWithContext is the same as TestDiscoveryRequired with the addition of
// the ability to pass a context and additional request options.
//
// See TestDiscoveryRequired for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *AwsEndpointDiscoveryTest) TestDiscoveryRequiredWithContext(ctx aws.Context, input *TestDiscoveryRequiredInput, opts ...request.Option) (*TestDiscoveryRequiredOutput, error) {
req, out := c.TestDiscoveryRequiredRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
type DescribeEndpointsInput struct {
_ struct{} `type:"structure"`
Operation *string `type:"string"`
}
// String returns the string representation
func (s DescribeEndpointsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEndpointsInput) GoString() string {
return s.String()
}
// SetOperation sets the Operation field's value.
func (s *DescribeEndpointsInput) SetOperation(v string) *DescribeEndpointsInput {
s.Operation = &v
return s
}
type DescribeEndpointsOutput struct {
_ struct{} `type:"structure"`
// Endpoints is a required field
Endpoints []*Endpoint `type:"list" required:"true"`
}
// String returns the string representation
func (s DescribeEndpointsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEndpointsOutput) GoString() string {
return s.String()
}
// SetEndpoints sets the Endpoints field's value.
func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput {
s.Endpoints = v
return s
}
type Endpoint struct {
_ struct{} `type:"structure"`
// Address is a required field
Address *string `type:"string" required:"true"`
// CachePeriodInMinutes is a required field
CachePeriodInMinutes *int64 `type:"long" required:"true"`
}
// String returns the string representation
func (s Endpoint) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Endpoint) GoString() string {
return s.String()
}
// SetAddress sets the Address field's value.
func (s *Endpoint) SetAddress(v string) *Endpoint {
s.Address = &v
return s
}
// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value.
func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint {
s.CachePeriodInMinutes = &v
return s
}
type TestDiscoveryIdentifiersRequiredInput struct {
_ struct{} `type:"structure"`
// Sdk is a required field
Sdk *string `type:"string" required:"true"`
}
// String returns the string representation
func (s TestDiscoveryIdentifiersRequiredInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryIdentifiersRequiredInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TestDiscoveryIdentifiersRequiredInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TestDiscoveryIdentifiersRequiredInput"}
if s.Sdk == nil {
invalidParams.Add(request.NewErrParamRequired("Sdk"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetSdk sets the Sdk field's value.
func (s *TestDiscoveryIdentifiersRequiredInput) SetSdk(v string) *TestDiscoveryIdentifiersRequiredInput {
s.Sdk = &v
return s
}
type TestDiscoveryIdentifiersRequiredOutput struct {
_ struct{} `type:"structure"`
RequestSuccessful *bool `type:"boolean"`
}
// String returns the string representation
func (s TestDiscoveryIdentifiersRequiredOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryIdentifiersRequiredOutput) GoString() string {
return s.String()
}
// SetRequestSuccessful sets the RequestSuccessful field's value.
func (s *TestDiscoveryIdentifiersRequiredOutput) SetRequestSuccessful(v bool) *TestDiscoveryIdentifiersRequiredOutput {
s.RequestSuccessful = &v
return s
}
type TestDiscoveryOptionalInput struct {
_ struct{} `type:"structure"`
Sdk *string `type:"string"`
}
// String returns the string representation
func (s TestDiscoveryOptionalInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryOptionalInput) GoString() string {
return s.String()
}
// SetSdk sets the Sdk field's value.
func (s *TestDiscoveryOptionalInput) SetSdk(v string) *TestDiscoveryOptionalInput {
s.Sdk = &v
return s
}
type TestDiscoveryOptionalOutput struct {
_ struct{} `type:"structure"`
RequestSuccessful *bool `type:"boolean"`
}
// String returns the string representation
func (s TestDiscoveryOptionalOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryOptionalOutput) GoString() string {
return s.String()
}
// SetRequestSuccessful sets the RequestSuccessful field's value.
func (s *TestDiscoveryOptionalOutput) SetRequestSuccessful(v bool) *TestDiscoveryOptionalOutput {
s.RequestSuccessful = &v
return s
}
type TestDiscoveryRequiredInput struct {
_ struct{} `type:"structure"`
Sdk *string `type:"string"`
}
// String returns the string representation
func (s TestDiscoveryRequiredInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryRequiredInput) GoString() string {
return s.String()
}
// SetSdk sets the Sdk field's value.
func (s *TestDiscoveryRequiredInput) SetSdk(v string) *TestDiscoveryRequiredInput {
s.Sdk = &v
return s
}
type TestDiscoveryRequiredOutput struct {
_ struct{} `type:"structure"`
RequestSuccessful *bool `type:"boolean"`
}
// String returns the string representation
func (s TestDiscoveryRequiredOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestDiscoveryRequiredOutput) GoString() string {
return s.String()
}
// SetRequestSuccessful sets the RequestSuccessful field's value.
func (s *TestDiscoveryRequiredOutput) SetRequestSuccessful(v bool) *TestDiscoveryRequiredOutput {
s.RequestSuccessful = &v
return s
}
| 1 | 10,154 | Probably want to wrap the endpoint discovery work that follows in this `if` statement instead of returning early. This will make it easier to add additional request code generation logic in the future. e.g. if we add any code generation after the endpoint discovery block this check will cause it to be skipped. | aws-aws-sdk-go | go |
@@ -80,7 +80,7 @@ describe('debug', () => {
it('should print an error on double jsx conversion', () => {
let Foo = <div />;
let fn = () => render(h(<Foo />), scratch);
- expect(fn).to.throw(/createElement/);
+ expect(fn).to.throw(/JSX twice/);
});
it('should add __source to the vnode in debug mode.', () => { | 1 | import { createElement, render, createRef, Component, Fragment } from 'preact';
import {
setupScratch,
teardown,
serializeHtml
} from '../../../test/_util/helpers';
import 'preact/debug';
import * as PropTypes from 'prop-types';
const h = createElement;
/** @jsx createElement */
describe('debug', () => {
let scratch;
let errors = [];
let warnings = [];
beforeEach(() => {
errors = [];
warnings = [];
scratch = setupScratch();
sinon.stub(console, 'error').callsFake(e => errors.push(e));
sinon.stub(console, 'warn').callsFake(w => warnings.push(w));
});
afterEach(() => {
/** @type {*} */
(console.error).restore();
console.warn.restore();
teardown(scratch);
});
it('should print an error on rendering on undefined parent', () => {
let fn = () => render(<div />, undefined);
expect(fn).to.throw(/render/);
});
it('should print an error on rendering on invalid parent', () => {
let fn = () => render(<div />, 6);
expect(fn).to.throw(/valid HTML node/);
expect(fn).to.throw(/<div/);
});
it('should print an error with (function) component name when available', () => {
const App = () => <div />;
let fn = () => render(<App />, 6);
expect(fn).to.throw(/<App/);
expect(fn).to.throw(/6/);
fn = () => render(<App />, {});
expect(fn).to.throw(/<App/);
expect(fn).to.throw(/[object Object]/);
});
it('should print an error with (class) component name when available', () => {
class App extends Component {
render() {
return <div />;
}
}
let fn = () => render(<App />, 6);
expect(fn).to.throw(/<App/);
});
it('should print an error on undefined component', () => {
let fn = () => render(h(undefined), scratch);
expect(fn).to.throw(/createElement/);
});
it('should print an error on invalid object component', () => {
let fn = () => render(h({}), scratch);
expect(fn).to.throw(/createElement/);
});
it('should print an error when component is an array', () => {
let fn = () => render(h([<div />]), scratch);
expect(fn).to.throw(/createElement/);
});
it('should print an error on double jsx conversion', () => {
let Foo = <div />;
let fn = () => render(h(<Foo />), scratch);
expect(fn).to.throw(/createElement/);
});
it('should add __source to the vnode in debug mode.', () => {
const vnode = h('div', {
__source: {
fileName: 'div.jsx',
lineNumber: 3
}
});
expect(vnode.__source).to.deep.equal({
fileName: 'div.jsx',
lineNumber: 3
});
expect(vnode.props.__source).to.be.undefined;
});
it('should add __self to the vnode in debug mode.', () => {
const vnode = h('div', {
__self: {}
});
expect(vnode.__self).to.deep.equal({});
expect(vnode.props.__self).to.be.undefined;
});
it('should throw errors when accessing certain attributes', () => {
const vnode = h('div', null);
expect(() => vnode).to.not.throw();
expect(() => vnode.attributes).to.throw(/use vnode.props/);
expect(() => vnode.nodeName).to.throw(/use vnode.type/);
expect(() => vnode.children).to.throw(/use vnode.props.children/);
expect(() => (vnode.attributes = {})).to.throw(/use vnode.props/);
expect(() => (vnode.nodeName = 'test')).to.throw(/use vnode.type/);
expect(() => (vnode.children = [<div />])).to.throw(
/use vnode.props.children/
);
});
it('should warn when calling setState inside the constructor', () => {
class Foo extends Component {
constructor(props) {
super(props);
this.setState({ foo: true });
}
render() {
return <div>foo</div>;
}
}
render(<Foo />, scratch);
expect(console.warn).to.be.calledOnce;
expect(console.warn.args[0]).to.match(/no-op/);
});
it('should warn when calling setState on an unmounted Component', () => {
let setState;
class Foo extends Component {
constructor(props) {
super(props);
setState = () => this.setState({ foo: true });
}
render() {
return <div>foo</div>;
}
}
render(<Foo />, scratch);
expect(console.warn).to.not.be.called;
render(null, scratch);
setState();
expect(console.warn).to.be.calledOnce;
expect(console.warn.args[0]).to.match(/no-op/);
});
it('should warn when calling forceUpdate inside the constructor', () => {
class Foo extends Component {
constructor(props) {
super(props);
this.forceUpdate();
}
render() {
return <div>foo</div>;
}
}
render(<Foo />, scratch);
expect(console.warn).to.be.calledOnce;
expect(console.warn.args[0]).to.match(/no-op/);
});
it('should warn when calling forceUpdate on an unmounted Component', () => {
let forceUpdate;
class Foo extends Component {
constructor(props) {
super(props);
forceUpdate = () => this.forceUpdate();
}
render() {
return <div>foo</div>;
}
}
render(<Foo />, scratch);
forceUpdate();
expect(console.warn).to.not.be.called;
render(null, scratch);
forceUpdate();
expect(console.warn).to.be.calledOnce;
expect(console.warn.args[0]).to.match(/no-op/);
});
it('should print an error when child is a plain object', () => {
let fn = () => render(<div>{{}}</div>, scratch);
expect(fn).to.throw(/not valid/);
});
it('should print an error on invalid refs', () => {
let fn = () => render(<div ref="a" />, scratch);
expect(fn).to.throw(/createRef/);
});
it('should not print for null as a handler', () => {
let fn = () => render(<div onclick={null} />, scratch);
expect(fn).not.to.throw();
});
it('should not print for undefined as a handler', () => {
let fn = () => render(<div onclick={undefined} />, scratch);
expect(fn).not.to.throw();
});
it('should not print for attributes starting with on for Components', () => {
const Comp = () => <p>online</p>;
let fn = () => render(<Comp online={false} />, scratch);
expect(fn).not.to.throw();
});
it('should print an error on invalid handler', () => {
let fn = () => render(<div onclick="a" />, scratch);
expect(fn).to.throw(/"onclick" property should be a function/);
});
it('should NOT print an error on valid refs', () => {
let noop = () => {};
render(<div ref={noop} />, scratch);
let ref = createRef();
render(<div ref={ref} />, scratch);
expect(console.error).to.not.be.called;
});
describe('duplicate keys', () => {
const List = props => <ul>{props.children}</ul>;
const ListItem = props => <li>{props.children}</li>;
it('should print an error on duplicate keys with DOM nodes', () => {
render(
<div>
<span key="a" />
<span key="a" />
</div>,
scratch
);
expect(console.error).to.be.calledOnce;
});
it('should allow distinct object keys', () => {
const A = { is: 'A' };
const B = { is: 'B' };
render(
<div>
<span key={A} />
<span key={B} />
</div>,
scratch
);
expect(console.error).not.to.be.called;
});
it('should print an error for duplicate object keys', () => {
const A = { is: 'A' };
render(
<div>
<span key={A} />
<span key={A} />
</div>,
scratch
);
expect(console.error).to.be.calledOnce;
});
it('should print an error on duplicate keys with Components', () => {
function App() {
return (
<List>
<ListItem key="a">a</ListItem>
<ListItem key="b">b</ListItem>
<ListItem key="b">d</ListItem>
<ListItem key="d">d</ListItem>
</List>
);
}
render(<App />, scratch);
expect(console.error).to.be.calledOnce;
});
it('should print an error on duplicate keys with Fragments', () => {
function App() {
return (
<Fragment>
<List key="list">
<ListItem key="a">a</ListItem>
<ListItem key="b">b</ListItem>
<Fragment key="b">
{/* Should be okay to duplicate keys since these are inside a Fragment */}
<ListItem key="a">c</ListItem>
<ListItem key="b">d</ListItem>
<ListItem key="c">e</ListItem>
</Fragment>
<ListItem key="f">f</ListItem>
</List>
<div key="list">sibling</div>
</Fragment>
);
}
render(<App />, scratch);
expect(console.error).to.be.calledTwice;
});
});
describe('table markup', () => {
it('missing <tbody>/<thead>/<tfoot>/<table>', () => {
const Table = () => (
<tr>
<td>hi</td>
</tr>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <table> with <thead>', () => {
const Table = () => (
<thead>
<tr>
<td>hi</td>
</tr>
</thead>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <table> with <tbody>', () => {
const Table = () => (
<tbody>
<tr>
<td>hi</td>
</tr>
</tbody>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <table> with <tfoot>', () => {
const Table = () => (
<tfoot>
<tr>
<td>hi</td>
</tr>
</tfoot>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <tr>', () => {
const Table = () => (
<table>
<tbody>
<td>Hi</td>
</tbody>
</table>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <tr> with td component', () => {
const Cell = ({ children }) => <td>{children}</td>;
const Table = () => (
<table>
<tbody>
<Cell>Hi</Cell>
</tbody>
</table>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('missing <tr> with th component', () => {
const Cell = ({ children }) => <th>{children}</th>;
const Table = () => (
<table>
<tbody>
<Cell>Hi</Cell>
</tbody>
</table>
);
render(<Table />, scratch);
expect(console.error).to.be.calledOnce;
});
it('Should accept <td> instead of <th> in <thead>', () => {
const Table = () => (
<table>
<thead>
<tr>
<td>Hi</td>
</tr>
</thead>
</table>
);
render(<Table />, scratch);
expect(console.error).to.not.be.called;
});
it('Accepts well formed table with TD components', () => {
const Cell = ({ children }) => <td>{children}</td>;
const Table = () => (
<table>
<thead>
<tr>
<th>Head</th>
</tr>
</thead>
<tbody>
<tr>
<td>Body</td>
</tr>
</tbody>
<tfoot>
<tr>
<Cell>Body</Cell>
</tr>
</tfoot>
</table>
);
render(<Table />, scratch);
expect(console.error).to.not.be.called;
});
it('Accepts well formed table', () => {
const Table = () => (
<table>
<thead>
<tr>
<th>Head</th>
</tr>
</thead>
<tbody>
<tr>
<td>Body</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Body</td>
</tr>
</tfoot>
</table>
);
render(<Table />, scratch);
expect(console.error).to.not.be.called;
});
it('Accepts minimal well formed table', () => {
const Table = () => (
<table>
<tr>
<th>Head</th>
</tr>
<tr>
<td>Body</td>
</tr>
</table>
);
render(<Table />, scratch);
expect(console.error).to.not.be.called;
});
});
describe('PropTypes', () => {
it("should fail if props don't match prop-types", () => {
function Foo(props) {
return <h1>{props.text}</h1>;
}
Foo.propTypes = {
text: PropTypes.string.isRequired
};
render(<Foo />, scratch);
expect(console.error).to.be.calledOnce;
expect(errors[0].includes('required')).to.equal(true);
});
it('should render with error logged when validator gets signal and throws exception', () => {
function Baz(props) {
return <h1>{props.unhappy}</h1>;
}
Baz.propTypes = {
unhappy: function alwaysThrows(obj, key) {
if (obj[key] === 'signal') throw Error('got prop');
}
};
render(<Baz unhappy={'signal'} />, scratch);
expect(console.error).to.be.calledOnce;
expect(errors[0].includes('got prop')).to.equal(true);
expect(serializeHtml(scratch)).to.equal('<h1>signal</h1>');
});
it('should not print to console when types are correct', () => {
function Bar(props) {
return <h1>{props.text}</h1>;
}
Bar.propTypes = {
text: PropTypes.string.isRequired
};
render(<Bar text="foo" />, scratch);
expect(console.error).to.not.be.called;
});
});
});
| 1 | 15,137 | This test was giving a false positive because my change caused it to throw a different error that contained `createElement` when it should've been throwing this error. Caught this by looking at the code coverage and noticing that the line under the condition I changed was no longer covered lol. | preactjs-preact | js |
@@ -40,3 +40,8 @@ func (i *Initializer) initHostNetworkFlows() error {
func (i *Initializer) getTunnelPortLocalIP() net.IP {
return nil
}
+
+// registerServiceforOS returns immediately on Linux.
+func (i *Initializer) registerServiceforOS() error {
+ return nil
+} | 1 | // +build linux
// Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"net"
)
// prepareHostNetwork returns immediately on Linux.
func (i *Initializer) prepareHostNetwork() error {
return nil
}
// prepareOVSBridge returns immediately on Linux.
func (i *Initializer) prepareOVSBridge() error {
return nil
}
// initHostNetworkFlows returns immediately on Linux.
func (i *Initializer) initHostNetworkFlows() error {
return nil
}
// getTunnelLocalIP returns local_ip of tunnel port.
// On linux platform, local_ip option is not needed.
func (i *Initializer) getTunnelPortLocalIP() net.IP {
return nil
}
| 1 | 37,871 | I think this is added to wrong file. You wanted to add to cmd/agent/, right? | antrea-io-antrea | go |
@@ -348,10 +348,12 @@ public class ExpectedConditions {
final String text) {
return new ExpectedCondition<Boolean>() {
+ private String elementText = null;
+
@Override
public Boolean apply(WebDriver driver) {
try {
- String elementText = element.getText();
+ elementText = element.getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.support.ui;
import com.google.common.base.Joiner;
import org.openqa.selenium.Alert;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.NoAlertPresentException;
import org.openqa.selenium.NoSuchElementException;
import org.openqa.selenium.NoSuchFrameException;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.WebElement;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Canned {@link ExpectedCondition}s which are generally useful within webdriver tests.
*/
public class ExpectedConditions {
private static final Logger log = Logger.getLogger(ExpectedConditions.class.getName());
private ExpectedConditions() {
// Utility class
}
/**
* An expectation for checking the title of a page.
*
* @param title the expected title, which must be an exact match
* @return true when the title matches, false otherwise
*/
public static ExpectedCondition<Boolean> titleIs(final String title) {
return new ExpectedCondition<Boolean>() {
private String currentTitle = "";
@Override
public Boolean apply(WebDriver driver) {
currentTitle = driver.getTitle();
return title.equals(currentTitle);
}
@Override
public String toString() {
return String.format("title to be \"%s\". Current title: \"%s\"", title, currentTitle);
}
};
}
/**
* An expectation for checking that the title contains a case-sensitive substring
*
* @param title the fragment of title expected
* @return true when the title matches, false otherwise
*/
public static ExpectedCondition<Boolean> titleContains(final String title) {
return new ExpectedCondition<Boolean>() {
private String currentTitle = "";
@Override
public Boolean apply(WebDriver driver) {
currentTitle = driver.getTitle();
return currentTitle != null && currentTitle.contains(title);
}
@Override
public String toString() {
return String.format("title to contain \"%s\". Current title: \"%s\"", title, currentTitle);
}
};
}
/**
* An expectation for the URL of the current page to be a specific url.
*
* @param url the url that the page should be on
* @return <code>true</code> when the URL is what it should be
*/
public static ExpectedCondition<Boolean> urlToBe(final String url) {
return new ExpectedCondition<Boolean>() {
private String currentUrl = "";
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
return currentUrl != null && currentUrl.equals(url);
}
@Override
public String toString() {
return String.format("url to be \"%s\". Current url: \"%s\"", url, currentUrl);
}
};
}
/**
* An expectation for the URL of the current page to contain specific text.
*
* @param fraction the fraction of the url that the page should be on
* @return <code>true</code> when the URL contains the text
*/
public static ExpectedCondition<Boolean> urlContains(final String fraction) {
return new ExpectedCondition<Boolean>() {
private String currentUrl = "";
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
return currentUrl != null && currentUrl.contains(fraction);
}
@Override
public String toString() {
return String.format("url to contain \"%s\". Current url: \"%s\"", fraction, currentUrl);
}
};
}
/**
* Expectation for the URL to match a specific regular expression
*
* @param regex the regular expression that the URL should match
* @return <code>true</code> if the URL matches the specified regular expression
*/
public static ExpectedCondition<Boolean> urlMatches(final String regex) {
return new ExpectedCondition<Boolean>() {
private String currentUrl;
private Pattern pattern;
private Matcher matcher;
@Override
public Boolean apply(WebDriver driver) {
currentUrl = driver.getCurrentUrl();
pattern = Pattern.compile(regex);
matcher = pattern.matcher(currentUrl);
return matcher.find();
}
@Override
public String toString() {
return String
.format("url to match the regex \"%s\". Current url: \"%s\"", regex, currentUrl);
}
};
}
/**
* An expectation for checking that an element is present on the DOM of a page. This does not
* necessarily mean that the element is visible.
*
* @param locator used to find the element
* @return the WebElement once it is located
*/
public static ExpectedCondition<WebElement> presenceOfElementLocated(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
return driver.findElement(locator);
}
@Override
public String toString() {
return "presence of element located by: " + locator;
}
};
}
/**
* An expectation for checking that an element is present on the DOM of a page and visible.
* Visibility means that the element is not only displayed but also has a height and width that is
* greater than 0.
*
* @param locator used to find the element
* @return the WebElement once it is located and visible
*/
public static ExpectedCondition<WebElement> visibilityOfElementLocated(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
try {
return elementIfVisible(driver.findElement(locator));
} catch (StaleElementReferenceException | NoSuchElementException e) {
// Returns null because the element is no longer or not present in DOM.
return null;
}
}
@Override
public String toString() {
return "visibility of element located by " + locator;
}
};
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param locator used to find the element
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElementsLocatedBy(
final By locator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> elements = driver.findElements(locator);
for (WebElement element : elements) {
if (!element.isDisplayed()) {
return null;
}
}
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "visibility of all elements located by " + locator;
}
};
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param elements list of WebElements
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElements(
final WebElement... elements) {
return visibilityOfAllElements(Arrays.asList(elements));
}
/**
* An expectation for checking that all elements present on the web page that match the locator
* are visible. Visibility means that the elements are not only displayed but also have a height
* and width that is greater than 0.
*
* @param elements list of WebElements
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> visibilityOfAllElements(
final List<WebElement> elements) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
for (WebElement element : elements) {
if (!element.isDisplayed()) {
return null;
}
}
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "visibility of all " + elements;
}
};
}
/**
* An expectation for checking that an element, known to be present on the DOM of a page, is
* visible. Visibility means that the element is not only displayed but also has a height and
* width that is greater than 0.
*
* @param element the WebElement
* @return the (same) WebElement once it is visible
*/
public static ExpectedCondition<WebElement> visibilityOf(final WebElement element) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
return elementIfVisible(element);
}
@Override
public String toString() {
return "visibility of " + element;
}
};
}
/**
* @return the given element if it is visible and has non-zero size, otherwise null.
*/
private static WebElement elementIfVisible(WebElement element) {
return element.isDisplayed() ? element : null;
}
/**
* An expectation for checking that there is at least one element present on a web page.
*
* @param locator used to find the element
* @return the list of WebElements once they are located
*/
public static ExpectedCondition<List<WebElement>> presenceOfAllElementsLocatedBy(
final By locator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> elements = driver.findElements(locator);
return elements.size() > 0 ? elements : null;
}
@Override
public String toString() {
return "presence of any elements located by " + locator;
}
};
}
/**
* An expectation for checking if the given text is present in the specified element.
*
* @param element the WebElement
* @param text to be present in the element
* @return true once the element contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElement(final WebElement element,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = element.getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be present in element %s", text, element);
}
};
}
/**
* An expectation for checking if the given text is present in the element that matches the given
* locator.
*
* @param locator used to find the element
* @param text to be present in the element found by the locator
* @return true once the first element located by locator contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementLocated(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = driver.findElement(locator).getText();
return elementText.contains(text);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be present in element found by %s",
text, locator);
}
};
}
/**
* An expectation for checking if the given text is present in the specified elements value
* attribute.
*
* @param element the WebElement
* @param text to be present in the element's value attribute
* @return true once the element's value attribute contains the given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementValue(final WebElement element,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = element.getAttribute("value");
if (elementText != null) {
return elementText.contains(text);
}
return false;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be the value of element %s", text, element);
}
};
}
/**
* An expectation for checking if the given text is present in the specified elements value
* attribute.
*
* @param locator used to find the element
* @param text to be present in the value attribute of the element found by the locator
* @return true once the value attribute of the first element located by locator contains the
* given text
*/
public static ExpectedCondition<Boolean> textToBePresentInElementValue(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
String elementText = driver.findElement(locator).getAttribute("value");
if (elementText != null) {
return elementText.contains(text);
}
return false;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("text ('%s') to be the value of element located by %s",
text, locator);
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frame.
*
* @param frameLocator used to find the frame (id or name)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final String frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frame.
*
* @param locator used to find the frame
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(final By locator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(driver.findElement(locator));
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + locator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified frameIndex.
*
* @param frameLocator used to find the frame (index)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final int frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking whether the given frame is available to switch to. <p> If the frame
* is available it switches the given driver to the specified webelement.
*
* @param frameLocator used to find the frame (webelement)
* @return WebDriver instance after frame has been switched
*/
public static ExpectedCondition<WebDriver> frameToBeAvailableAndSwitchToIt(
final WebElement frameLocator) {
return new ExpectedCondition<WebDriver>() {
@Override
public WebDriver apply(WebDriver driver) {
try {
return driver.switchTo().frame(frameLocator);
} catch (NoSuchFrameException e) {
return null;
}
}
@Override
public String toString() {
return "frame to be available: " + frameLocator;
}
};
}
/**
* An expectation for checking that an element is either invisible or not present on the DOM.
*
* @param locator used to find the element
* @return true if the element is not displayed or the element doesn't exist or stale element
*/
public static ExpectedCondition<Boolean> invisibilityOfElementLocated(final By locator) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return !(driver.findElement(locator).isDisplayed());
} catch (NoSuchElementException e) {
// Returns true because the element is not present in DOM. The
// try block checks if the element is present but is invisible.
return true;
} catch (StaleElementReferenceException e) {
// Returns true because stale element reference implies that element
// is no longer visible.
return true;
}
}
@Override
public String toString() {
return "element to no longer be visible: " + locator;
}
};
}
/**
* An expectation for checking that an element with text is either invisible or not present on the
* DOM.
*
* @param locator used to find the element
* @param text of the element
* @return true if no such element, stale element or displayed text not equal that provided
*/
public static ExpectedCondition<Boolean> invisibilityOfElementWithText(final By locator,
final String text) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return !driver.findElement(locator).getText().equals(text);
} catch (NoSuchElementException e) {
// Returns true because the element with text is not present in DOM. The
// try block checks if the element is present but is invisible.
return true;
} catch (StaleElementReferenceException e) {
// Returns true because stale element reference implies that element
// is no longer visible.
return true;
}
}
@Override
public String toString() {
return String.format("element containing '%s' to no longer be visible: %s",
text, locator);
}
};
}
/**
* An expectation for checking an element is visible and enabled such that you can click it.
*
* @param locator used to find the element
* @return the WebElement once it is located and clickable (visible and enabled)
*/
public static ExpectedCondition<WebElement> elementToBeClickable(final By locator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
WebElement element = visibilityOfElementLocated(locator).apply(driver);
try {
if (element != null && element.isEnabled()) {
return element;
}
return null;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return "element to be clickable: " + locator;
}
};
}
/**
* An expectation for checking an element is visible and enabled such that you can click it.
*
* @param element the WebElement
* @return the (same) WebElement once it is clickable (visible and enabled)
*/
public static ExpectedCondition<WebElement> elementToBeClickable(final WebElement element) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver driver) {
WebElement visibleElement = visibilityOf(element).apply(driver);
try {
if (visibleElement != null && visibleElement.isEnabled()) {
return visibleElement;
}
return null;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return "element to be clickable: " + element;
}
};
}
/**
* Wait until an element is no longer attached to the DOM.
*
* @param element The element to wait for.
* @return false if the element is still attached to the DOM, true otherwise.
*/
public static ExpectedCondition<Boolean> stalenessOf(final WebElement element) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver ignored) {
try {
// Calling any method forces a staleness check
element.isEnabled();
return false;
} catch (StaleElementReferenceException expected) {
return true;
}
}
@Override
public String toString() {
return String.format("element (%s) to become stale", element);
}
};
}
/**
* Wrapper for a condition, which allows for elements to update by redrawing.
*
* This works around the problem of conditions which have two parts: find an element and then
* check for some condition on it. For these conditions it is possible that an element is located
* and then subsequently it is redrawn on the client. When this happens a {@link
* StaleElementReferenceException} is thrown when the second part of the condition is checked.
*
* @param condition ExpectedCondition to wrap
* @param <T> return type of the condition provided
* @return the result of the provided condition
*/
public static <T> ExpectedCondition<T> refreshed(final ExpectedCondition<T> condition) {
return new ExpectedCondition<T>() {
@Override
public T apply(WebDriver driver) {
try {
return condition.apply(driver);
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("condition (%s) to be refreshed", condition);
}
};
}
/**
* An expectation for checking if the given element is selected.
*
* @param element WebElement to be selected
* @return true once the element is selected
*/
public static ExpectedCondition<Boolean> elementToBeSelected(final WebElement element) {
return elementSelectionStateToBe(element, true);
}
/**
* An expectation for checking if the given element is selected.
*
* @param element WebElement to be selected
* @param selected boolean state of the selection state of the element
* @return true once the element's selection stated is that of selected
*/
public static ExpectedCondition<Boolean> elementSelectionStateToBe(final WebElement element,
final boolean selected) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
return element.isSelected() == selected;
}
@Override
public String toString() {
return String.format("element (%s) to %sbe selected", element, (selected ? "" : "not "));
}
};
}
public static ExpectedCondition<Boolean> elementToBeSelected(final By locator) {
return elementSelectionStateToBe(locator, true);
}
public static ExpectedCondition<Boolean> elementSelectionStateToBe(final By locator,
final boolean selected) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
WebElement element = driver.findElement(locator);
return element.isSelected() == selected;
} catch (StaleElementReferenceException e) {
return null;
}
}
@Override
public String toString() {
return String.format("element found by %s to %sbe selected",
locator, (selected ? "" : "not "));
}
};
}
public static ExpectedCondition<Alert> alertIsPresent() {
return new ExpectedCondition<Alert>() {
@Override
public Alert apply(WebDriver driver) {
try {
return driver.switchTo().alert();
} catch (NoAlertPresentException e) {
return null;
}
}
@Override
public String toString() {
return "alert to be present";
}
};
}
public static ExpectedCondition<Boolean> numberOfWindowsToBe(final int expectedNumberOfWindows) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
return driver.getWindowHandles().size() == expectedNumberOfWindows;
} catch (WebDriverException e) {
return null;
}
}
@Override
public String toString() {
return "number of open windows to be " + expectedNumberOfWindows;
}
};
}
/**
* An expectation with the logical opposite condition of the given condition.
*
* Note that if the Condition you are inverting throws an exception that is caught by the Ignored
* Exceptions, the inversion will not take place and lead to confusing results.
*
* @param condition ExpectedCondition to be inverted
* @return true once the condition is satisfied
*/
public static ExpectedCondition<Boolean> not(final ExpectedCondition<?> condition) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
Object result = condition.apply(driver);
return result == null || result.equals(Boolean.FALSE);
}
@Override
public String toString() {
return "condition to not be valid: " + condition;
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute with a specific value
*
* @param locator used to find the element
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute with the value
*/
public static ExpectedCondition<Boolean> attributeToBe(final By locator, final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
WebElement element = driver.findElement(locator);
currentValue = element.getAttribute(attribute);
if (currentValue == null||currentValue.isEmpty()) {
currentValue = element.getCssValue(attribute);
}
return value.equals(currentValue);
}
@Override
public String toString() {
return String.format("element found by %s to have value \"%s\". Current value: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has specific text
*
* @param locator used to find the element
* @param value used as expected text
* @return Boolean true when element has text value equal to @value
*/
public static ExpectedCondition<Boolean> textToBe(final By locator, final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
try {
currentValue = driver.findElement(locator).getText();
return currentValue.equals(value);
} catch (Exception e) {
return false;
}
}
@Override
public String toString() {
return String.format("element found by %s to have text \"%s\". Current text: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has text with a value as a part of
* it
*
* @param locator used to find the element
* @param pattern used as expected text matcher pattern
* @return Boolean true when element has text value containing @value
*/
public static ExpectedCondition<Boolean> textMatches(final By locator, final Pattern pattern) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
try {
currentValue = driver.findElement(locator).getText();
return pattern.matcher(currentValue).find();
} catch (Exception e) {
return false;
}
}
@Override
public String toString() {
return String
.format("text found by %s to match pattern \"%s\". Current text: \"%s\"",
locator, pattern.pattern(), currentValue);
}
};
}
/**
* An expectation for checking number of WebElements with given locator being more than defined number
*
* @param locator used to find the element
* @param number used to define minimum number of elements
* @return Boolean true when size of elements list is more than defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBeMoreThan(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber > number ? elements : null;
}
@Override
public String toString() {
return String.format("number of elements found by %s to be more than \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking number of WebElements with given locator being less than defined
* number
*
* @param locator used to find the element
* @param number used to define maximum number of elements
* @return Boolean true when size of elements list is less than defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBeLessThan(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber < number ? elements : null;
}
@Override
public String toString() {
return String.format("number of elements found by %s to be less than \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking number of WebElements with given locator
*
* @param locator used to find the element
* @param number used to define number of elements
* @return Boolean true when size of elements list is equal to defined
*/
public static ExpectedCondition<List<WebElement>> numberOfElementsToBe(final By locator,
final Integer number) {
return new ExpectedCondition<List<WebElement>>() {
private Integer currentNumber = 0;
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> elements = webDriver.findElements(locator);
currentNumber = elements.size();
return currentNumber.equals(number) ? elements : null;
}
@Override
public String toString() {
return String
.format("number of elements found by %s to be \"%s\". Current number: \"%s\"",
locator, number, currentNumber);
}
};
}
/**
* An expectation for checking given WebElement has attribute with a specific value
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute with the value
*/
public static ExpectedCondition<Boolean> attributeToBe(final WebElement element,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
currentValue = element.getAttribute(attribute);
if (currentValue == null || currentValue.isEmpty()) {
currentValue = element.getCssValue(attribute);
}
return value.equals(currentValue);
}
@Override
public String toString() {
return String.format(attribute + " to be \"%s\". Current " + attribute + ": \"%s\"", value,
currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute which contains specific
* value
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute which contains the value
*/
public static ExpectedCondition<Boolean> attributeContains(final WebElement element,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
return getAttributeOrCssValue(element, attribute)
.map(seen -> seen.contains(value))
.orElse(false);
}
@Override
public String toString() {
return String.format("value to contain \"%s\". Current value: \"%s\"", value, currentValue);
}
};
}
/**
* An expectation for checking WebElement with given locator has attribute which contains specific
* value
*
* @param locator used to define WebElement to check its parameters
* @param attribute used to define css or html attribute
* @param value used as expected attribute value
* @return Boolean true when element has css or html attribute which contains the value
*/
public static ExpectedCondition<Boolean> attributeContains(final By locator,
final String attribute,
final String value) {
return new ExpectedCondition<Boolean>() {
private String currentValue = null;
@Override
public Boolean apply(WebDriver driver) {
return getAttributeOrCssValue(driver.findElement(locator), attribute)
.map(seen -> seen.contains(value))
.orElse(false);
}
@Override
public String toString() {
return String.format("value found by %s to contain \"%s\". Current value: \"%s\"",
locator, value, currentValue);
}
};
}
/**
* An expectation for checking WebElement any non empty value for given attribute
*
* @param element used to check its parameters
* @param attribute used to define css or html attribute
* @return Boolean true when element has css or html attribute with non empty value
*/
public static ExpectedCondition<Boolean> attributeToBeNotEmpty(final WebElement element,
final String attribute) {
return driver -> getAttributeOrCssValue(element, attribute).isPresent();
}
private static Optional<String> getAttributeOrCssValue(WebElement element, String name) {
String value = element.getAttribute(name);
if (value == null || value.isEmpty()) {
value = element.getCssValue(name);
}
if (value == null || value.isEmpty()) {
return Optional.empty();
}
return Optional.of(value);
}
/**
* An expectation for checking child WebElement as a part of parent element to be visible
*
* @param parent used to check parent element. For example table with locator
* By.id("fish")
* @param childLocator used to find the ultimate child element.
* @return visible nested element
*/
public static ExpectedCondition<List<WebElement>> visibilityOfNestedElementsLocatedBy(
final By parent,
final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
WebElement current = driver.findElement(parent);
List<WebElement> allChildren = current.findElements(childLocator);
// The original code only checked the first element. Fair enough.
if (!allChildren.isEmpty() && allChildren.get(0).isDisplayed()) {
return allChildren;
}
return null;
}
@Override
public String toString() {
return String.format("visibility of elements located by %s -> %s", parent, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to be visible
*
* @param element used as parent element. For example table with locator By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return visible subelement
*/
public static ExpectedCondition<List<WebElement>> visibilityOfNestedElementsLocatedBy(
final WebElement element, final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver webDriver) {
List<WebElement> allChildren = element.findElements(childLocator);
// The original code only checked the visibility of the first element.
if (!allChildren.isEmpty() && allChildren.get(0).isDisplayed()) {
return allChildren;
}
return null;
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", element, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to present
*
* @param locator used to check parent element. For example table with locator
* By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<WebElement> presenceOfNestedElementLocatedBy(
final By locator,
final By childLocator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver webDriver) {
return webDriver.findElement(locator).findElement(childLocator);
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", locator, childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to be present
*
* @param element used as parent element
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<WebElement> presenceOfNestedElementLocatedBy(
final WebElement element,
final By childLocator) {
return new ExpectedCondition<WebElement>() {
@Override
public WebElement apply(WebDriver webDriver) {
return element.findElement(childLocator);
}
@Override
public String toString() {
return String.format("visibility of element located by %s", childLocator);
}
};
}
/**
* An expectation for checking child WebElement as a part of parent element to present
*
* @param parent used to check parent element. For example table with locator
* By.xpath("//table")
* @param childLocator used to find child element. For example td By.xpath("./tr/td")
* @return subelement
*/
public static ExpectedCondition<List<WebElement>> presenceOfNestedElementsLocatedBy(
final By parent,
final By childLocator) {
return new ExpectedCondition<List<WebElement>>() {
@Override
public List<WebElement> apply(WebDriver driver) {
List<WebElement> allChildren = driver.findElement(parent).findElements(childLocator);
return allChildren.isEmpty() ? null : allChildren;
}
@Override
public String toString() {
return String.format("visibility of element located by %s -> %s", parent, childLocator);
}
};
}
/**
* An expectation for checking all elements from given list to be invisible
*
* @param elements used to check their invisibility
* @return Boolean true when all elements are not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOfAllElements(
final WebElement... elements) {
return invisibilityOfAllElements(Arrays.asList(elements));
}
/**
* An expectation for checking all elements from given list to be invisible
*
* @param elements used to check their invisibility
* @return Boolean true when all elements are not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOfAllElements(
final List<WebElement> elements) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver webDriver) {
return elements.stream().allMatch(ExpectedConditions::isInvisible);
}
@Override
public String toString() {
return "invisibility of all elements " + elements;
}
};
}
/**
* An expectation for checking the element to be invisible
*
* @param element used to check its invisibility
* @return Boolean true when elements is not visible anymore
*/
public static ExpectedCondition<Boolean> invisibilityOf(final WebElement element) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver webDriver) {
return isInvisible(element);
}
@Override
public String toString() {
return "invisibility of " + element;
}
};
}
private static boolean isInvisible(final WebElement element) {
try {
return !element.isDisplayed();
} catch (StaleElementReferenceException | NoSuchElementException ignored) {
// We can assume a stale element isn't displayed.
return true;
}
}
/**
* An expectation with the logical or condition of the given list of conditions.
*
* Each condition is checked until at least one of them returns true or not null.
*
* @param conditions ExpectedCondition is a list of alternative conditions
* @return true once one of conditions is satisfied
*/
public static ExpectedCondition<Boolean> or(final ExpectedCondition<?>... conditions) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
RuntimeException lastException = null;
for (ExpectedCondition<?> condition : conditions) {
try {
Object result = condition.apply(driver);
if (result != null) {
if (result instanceof Boolean) {
if (Boolean.TRUE.equals(result)) {
return true;
}
} else {
return true;
}
}
} catch (RuntimeException e) {
lastException = e;
}
}
if (lastException != null) {
throw lastException;
}
return false;
}
@Override
public String toString() {
StringBuilder message = new StringBuilder("at least one condition to be valid: ");
Joiner.on(" || ").appendTo(message, conditions);
return message.toString();
}
};
}
/**
* An expectation with the logical and condition of the given list of conditions.
*
* Each condition is checked until all of them return true or not null
*
* @param conditions ExpectedCondition is a list of alternative conditions
* @return true once all conditions are satisfied
*/
public static ExpectedCondition<Boolean> and(final ExpectedCondition<?>... conditions) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
for (ExpectedCondition<?> condition : conditions) {
Object result = condition.apply(driver);
if (result instanceof Boolean) {
if (Boolean.FALSE.equals(result)) {
return false;
}
}
if (result == null) {
return false;
}
}
return true;
}
@Override
public String toString() {
StringBuilder message = new StringBuilder("all conditions to be valid: ");
Joiner.on(" && ").appendTo(message, conditions);
return message.toString();
}
};
}
/**
* An expectation to check if js executable.
*
* Useful when you know that there should be a Javascript value or something at the stage.
*
* @param javaScript used as executable script
* @return true once javaScript executed without errors
*/
public static ExpectedCondition<Boolean> javaScriptThrowsNoExceptions(final String javaScript) {
return new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
((JavascriptExecutor) driver).executeScript(javaScript);
return true;
} catch (WebDriverException e) {
return false;
}
}
@Override
public String toString() {
return String.format("js %s to be executable", javaScript);
}
};
}
/**
* An expectation for String value from javascript
*
* @param javaScript as executable js line
* @return true once js return string
*/
public static ExpectedCondition<Object> jsReturnsValue(final String javaScript) {
return new ExpectedCondition<Object>() {
@Override
public Object apply(WebDriver driver) {
try {
Object value = ((JavascriptExecutor) driver).executeScript(javaScript);
if (value instanceof List) {
return ((List<?>) value).isEmpty() ? null : value;
}
if (value instanceof String) {
return ((String) value).isEmpty() ? null : value;
}
return value;
} catch (WebDriverException e) {
return null;
}
}
@Override
public String toString() {
return String.format("js %s to be executable", javaScript);
}
};
}
}
| 1 | 18,003 | Each `ExpectedCondition` implements `java.util.Function` These are expected to be stateless. This condition will leak previous `elementText` on the second usage, which doesn't seem ideal. | SeleniumHQ-selenium | java |
@@ -117,6 +117,7 @@ type NetworkConfig struct {
CNIVersion string `json:"cniVersion,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
+ DeviceID string `json:"deviceID"` // PCI address of a VF
MTU int `json:"mtu,omitempty"`
DNS cnitypes.DNS `json:"dns"`
IPAM ipam.IPAMConfig `json:"ipam,omitempty"` | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cniserver
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"google.golang.org/grpc"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/cniserver/ipam"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
"github.com/vmware-tanzu/antrea/pkg/agent/route"
"github.com/vmware-tanzu/antrea/pkg/agent/util"
cnipb "github.com/vmware-tanzu/antrea/pkg/apis/cni/v1beta1"
"github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1"
"github.com/vmware-tanzu/antrea/pkg/cni"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
)
// containerAccessArbitrator is used to ensure that concurrent goroutines cannot perfom operations
// on the same containerID. Other parts of the code make this assumption (in particular the
// InstallPodFlows / UninstallPodFlows methods of the OpenFlow client, which are invoked
// respectively by CmdAdd and CmdDel). The idea is to simply the locking requirements for the rest
// of the code by ensuring that all the requests for a given container are serialized.
type containerAccessArbitrator struct {
mutex sync.Mutex
cond *sync.Cond
busyContainerKeys map[string]bool // used as a set of container keys
}
func newContainerAccessArbitrator() *containerAccessArbitrator {
arbitrator := &containerAccessArbitrator{
busyContainerKeys: make(map[string]bool),
}
arbitrator.cond = sync.NewCond(&arbitrator.mutex)
return arbitrator
}
// lockContainer prevents other goroutines from accessing containerKey. If containerKey is already
// locked by another goroutine, this function will block until the container is available. Every
// call to lockContainer must be followed by a call to unlockContainer on the same containerKey.
func (arbitrator *containerAccessArbitrator) lockContainer(containerKey string) {
arbitrator.cond.L.Lock()
defer arbitrator.cond.L.Unlock()
for {
_, ok := arbitrator.busyContainerKeys[containerKey]
if !ok {
break
}
arbitrator.cond.Wait()
}
arbitrator.busyContainerKeys[containerKey] = true
}
// unlockContainer releases access to containerKey.
func (arbitrator *containerAccessArbitrator) unlockContainer(containerKey string) {
arbitrator.cond.L.Lock()
defer arbitrator.cond.L.Unlock()
delete(arbitrator.busyContainerKeys, containerKey)
arbitrator.cond.Broadcast()
}
type CNIServer struct {
cniSocket string
supportedCNIVersions map[string]bool
serverVersion string
nodeConfig *config.NodeConfig
hostProcPathPrefix string
kubeClient clientset.Interface
containerAccess *containerAccessArbitrator
podConfigurator *podConfigurator
// podUpdates is a channel for notifying Pod updates to other components, i.e NetworkPolicyController.
podUpdates chan<- v1beta1.PodReference
isChaining bool
routeClient route.Interface
}
var supportedCNIVersionSet map[string]bool
type RuntimeDNS struct {
Nameservers []string `json:"servers,omitempty"`
Search []string `json:"searches,omitempty"`
}
type RuntimeConfig struct {
DNS RuntimeDNS `json:"dns"`
}
type NetworkConfig struct {
CNIVersion string `json:"cniVersion,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
MTU int `json:"mtu,omitempty"`
DNS cnitypes.DNS `json:"dns"`
IPAM ipam.IPAMConfig `json:"ipam,omitempty"`
// Options to be passed in by the runtime.
RuntimeConfig RuntimeConfig `json:"runtimeConfig"`
RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
PrevResult cnitypes.Result `json:"-"`
}
type CNIConfig struct {
*NetworkConfig
*cnipb.CniCmdArgs
*k8sArgs
}
// updateResultIfaceConfig processes the result from the IPAM plugin and does the following:
// * updates the IP configuration for each assigned IP address: this includes computing the
// gateway (if missing) based on the subnet and setting the interface pointer to the container
// interface
// * if there is no default route, add one using the provided default gateway
func updateResultIfaceConfig(result *current.Result, defaultV4Gateway net.IP) {
for _, ipc := range result.IPs {
// result.Interfaces[0] is host interface, and result.Interfaces[1] is container interface
ipc.Interface = current.Int(1)
if ipc.Gateway == nil {
ipn := ipc.Address
netID := ipn.IP.Mask(ipn.Mask)
ipc.Gateway = ip.NextIP(netID)
}
}
foundDefaultRoute := false
defaultRouteDst := "0.0.0.0/0"
if result.Routes != nil {
for _, rt := range result.Routes {
if rt.Dst.String() == defaultRouteDst {
foundDefaultRoute = true
break
}
}
} else {
result.Routes = []*cnitypes.Route{}
}
if !foundDefaultRoute {
_, defaultRouteDstNet, _ := net.ParseCIDR(defaultRouteDst)
result.Routes = append(result.Routes, &cnitypes.Route{Dst: *defaultRouteDstNet, GW: defaultV4Gateway})
}
}
func (s *CNIServer) loadNetworkConfig(request *cnipb.CniCmdRequest) (*CNIConfig, error) {
cniConfig := &CNIConfig{}
cniConfig.CniCmdArgs = request.CniArgs
if err := json.Unmarshal(request.CniArgs.NetworkConfiguration, cniConfig); err != nil {
return cniConfig, err
}
cniConfig.k8sArgs = &k8sArgs{}
if err := cnitypes.LoadArgs(request.CniArgs.Args, cniConfig.k8sArgs); err != nil {
return cniConfig, err
}
if !s.isChaining {
s.updateLocalIPAMSubnet(cniConfig)
}
if cniConfig.MTU == 0 {
cniConfig.MTU = s.nodeConfig.NodeMTU
}
klog.Infof("Load network configurations: %v", cniConfig)
return cniConfig, nil
}
func (s *CNIServer) isCNIVersionSupported(reqVersion string) bool {
_, exist := s.supportedCNIVersions[reqVersion]
return exist
}
func (s *CNIServer) checkRequestMessage(request *cnipb.CniCmdRequest) (*CNIConfig, *cnipb.CniCmdResponse) {
cniConfig, err := s.loadNetworkConfig(request)
if err != nil {
klog.Errorf("Failed to parse network configuration: %v", err)
return nil, s.decodingFailureResponse("network config")
}
cniVersion := cniConfig.CNIVersion
// Check if CNI version in the request is supported
if !s.isCNIVersionSupported(cniVersion) {
klog.Errorf(fmt.Sprintf("Unsupported CNI version [%s], supported CNI versions %s", cniVersion, version.All.SupportedVersions()))
return cniConfig, s.incompatibleCniVersionResponse(cniVersion)
}
if s.isChaining {
return cniConfig, nil
}
// Find IPAM Service according configuration
ipamType := cniConfig.IPAM.Type
isValid := ipam.IsIPAMTypeValid(ipamType)
if !isValid {
klog.Errorf("Unsupported IPAM type %s", ipamType)
return cniConfig, s.unsupportedFieldResponse("ipam/type", ipamType)
}
return cniConfig, nil
}
func (s *CNIServer) updateLocalIPAMSubnet(cniConfig *CNIConfig) {
cniConfig.NetworkConfig.IPAM.Gateway = s.nodeConfig.GatewayConfig.IP.String()
cniConfig.NetworkConfig.IPAM.Subnet = s.nodeConfig.PodCIDR.String()
cniConfig.NetworkConfiguration, _ = json.Marshal(cniConfig.NetworkConfig)
}
func (s *CNIServer) generateCNIErrorResponse(cniErrorCode cnipb.ErrorCode, cniErrorMsg string) *cnipb.CniCmdResponse {
return &cnipb.CniCmdResponse{
Error: &cnipb.Error{
Code: cniErrorCode,
Message: cniErrorMsg,
},
}
}
func (s *CNIServer) decodingFailureResponse(what string) *cnipb.CniCmdResponse {
return s.generateCNIErrorResponse(
cnipb.ErrorCode_DECODING_FAILURE,
fmt.Sprintf("Failed to decode %s", what),
)
}
func (s *CNIServer) incompatibleCniVersionResponse(cniVersion string) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_INCOMPATIBLE_CNI_VERSION
cniErrorMsg := fmt.Sprintf("Unsupported CNI version [%s], supported versions %s", cniVersion, version.All.SupportedVersions())
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) unsupportedFieldResponse(key string, value interface{}) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_UNSUPPORTED_FIELD
cniErrorMsg := fmt.Sprintf("Network configuration does not support key %s and value %v", key, value)
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) unknownContainerResponse(containerID string) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_UNKNOWN_CONTAINER
cniErrorMsg := fmt.Sprintf("Container id %s is unknown or non-existent", containerID)
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) tryAgainLaterResponse() *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_TRY_AGAIN_LATER
cniErrorMsg := "Server is busy, please retry later"
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) ipamFailureResponse(err error) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_IPAM_FAILURE
cniErrorMsg := err.Error()
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) configInterfaceFailureResponse(err error) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_CONFIG_INTERFACE_FAILURE
cniErrorMsg := err.Error()
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) checkInterfaceFailureResponse(err error) *cnipb.CniCmdResponse {
cniErrorCode := cnipb.ErrorCode_CHECK_INTERFACE_FAILURE
cniErrorMsg := err.Error()
return s.generateCNIErrorResponse(cniErrorCode, cniErrorMsg)
}
func (s *CNIServer) invalidNetworkConfigResponse(msg string) *cnipb.CniCmdResponse {
return s.generateCNIErrorResponse(
cnipb.ErrorCode_INVALID_NETWORK_CONFIG,
msg,
)
}
func buildVersionSet() map[string]bool {
versionSet := make(map[string]bool)
for _, ver := range version.All.SupportedVersions() {
versionSet[strings.Trim(ver, " ")] = true
}
return versionSet
}
func (s *CNIServer) parsePrevResultFromRequest(networkConfig *NetworkConfig) (*current.Result, *cnipb.CniCmdResponse) {
if networkConfig.PrevResult == nil && networkConfig.RawPrevResult == nil {
klog.Errorf("Previous network configuration not specified")
return nil, s.unsupportedFieldResponse("prevResult", "")
}
if err := parsePrevResult(networkConfig); err != nil {
klog.Errorf("Failed to parse previous network configuration")
return nil, s.decodingFailureResponse("prevResult")
}
// Convert whatever the IPAM result was into the current Result type (for the current CNI
// version)
prevResult, err := current.NewResultFromResult(networkConfig.PrevResult)
if err != nil {
klog.Errorf("Failed to construct prevResult using previous network configuration")
return nil, s.unsupportedFieldResponse("prevResult", networkConfig.PrevResult)
}
return prevResult, nil
}
// validatePrevResult validates container and host interfaces configuration
// the return value is nil if prevResult is valid
func (s *CNIServer) validatePrevResult(cfgArgs *cnipb.CniCmdArgs, k8sCNIArgs *k8sArgs, prevResult *current.Result) *cnipb.CniCmdResponse {
containerID := cfgArgs.ContainerId
netNS := s.hostNetNsPath(cfgArgs.Netns)
// Find interfaces from previous configuration
containerIntf := parseContainerIfaceFromResults(cfgArgs, prevResult)
if containerIntf == nil {
klog.Errorf("Failed to find interface %s of container %s", cfgArgs.Ifname, containerID)
return s.invalidNetworkConfigResponse("prevResult does not match network configuration")
}
if err := s.podConfigurator.checkInterfaces(
containerID,
netNS,
containerIntf,
prevResult); err != nil {
return s.checkInterfaceFailureResponse(err)
}
return nil
}
func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (*cnipb.CniCmdResponse, error) {
klog.Infof("Received CmdAdd request %v", request)
cniConfig, response := s.checkRequestMessage(request)
if response != nil {
return response, nil
}
cniVersion := cniConfig.CNIVersion
result := ¤t.Result{CNIVersion: cniVersion}
netNS := s.hostNetNsPath(cniConfig.Netns)
isInfraContainer := isInfraContainer(netNS)
success := false
defer func() {
// Rollback to delete configurations once ADD is failure.
if !success {
if isInfraContainer {
klog.Warningf("CmdAdd has failed, and try to rollback")
if _, err := s.CmdDel(ctx, request); err != nil {
klog.Warningf("Failed to rollback after CNI add failure: %v", err)
}
} else {
klog.Warningf("CmdAdd has failed")
}
}
}()
infraContainer := cniConfig.getInfraContainer()
s.containerAccess.lockContainer(infraContainer)
defer s.containerAccess.unlockContainer(infraContainer)
if s.isChaining {
resp, err := s.interceptAdd(cniConfig)
if err == nil {
success = true
}
return resp, err
}
var ipamResult *current.Result
var err error
// Only allocate IP when handling CNI request from infra container.
// On windows platform, CNI plugin is called for all containers in a Pod.
if !isInfraContainer {
if ipamResult, _ = ipam.GetIPFromCache(infraContainer); ipamResult == nil {
return nil, fmt.Errorf("allocated IP address not found")
}
} else {
// Request IP Address from IPAM driver.
ipamResult, err = ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.IPAM.Type, infraContainer)
if err != nil {
klog.Errorf("Failed to add IP addresses from IPAM driver: %v", err)
return s.ipamFailureResponse(err), nil
}
}
klog.Infof("Added ip addresses from IPAM driver, %v", ipamResult)
result.IPs = ipamResult.IPs
result.Routes = ipamResult.Routes
// Ensure interface gateway setting and mapping relations between result.Interfaces and result.IPs
updateResultIfaceConfig(result, s.nodeConfig.GatewayConfig.IP)
// Setup pod interfaces and connect to ovs bridge
podName := string(cniConfig.K8S_POD_NAME)
podNamespace := string(cniConfig.K8S_POD_NAMESPACE)
updateResultDNSConfig(result, cniConfig)
if err = s.podConfigurator.configureInterfaces(
podName,
podNamespace,
cniConfig.ContainerId,
netNS,
cniConfig.Ifname,
cniConfig.MTU,
result,
isInfraContainer,
); err != nil {
klog.Errorf("Failed to configure interfaces for container %s: %v", cniConfig.ContainerId, err)
return s.configInterfaceFailureResponse(err), nil
}
// Notify the Pod update event to required components.
s.podUpdates <- v1beta1.PodReference{Name: podName, Namespace: podNamespace}
var resultBytes bytes.Buffer
_ = result.PrintTo(&resultBytes)
klog.Infof("CmdAdd succeeded")
// mark success as true to avoid rollback
success = true
return &cnipb.CniCmdResponse{CniResult: resultBytes.Bytes()}, nil
}
func (s *CNIServer) CmdDel(_ context.Context, request *cnipb.CniCmdRequest) (
*cnipb.CniCmdResponse, error) {
klog.Infof("Received CmdDel request %v", request)
cniConfig, response := s.checkRequestMessage(request)
if response != nil {
return response, nil
}
infraContainer := cniConfig.getInfraContainer()
s.containerAccess.lockContainer(infraContainer)
defer s.containerAccess.unlockContainer(infraContainer)
if s.isChaining {
return s.interceptDel(cniConfig)
}
// Release IP to IPAM driver
if err := ipam.ExecIPAMDelete(cniConfig.CniCmdArgs, cniConfig.IPAM.Type, infraContainer); err != nil {
klog.Errorf("Failed to delete IP addresses by IPAM driver: %v", err)
return s.ipamFailureResponse(err), nil
}
klog.Info("Deleted IP addresses by IPAM driver")
// Remove host interface and OVS configuration
if err := s.podConfigurator.removeInterfaces(cniConfig.ContainerId); err != nil {
klog.Errorf("Failed to remove interfaces for container %s: %v", cniConfig.ContainerId, err)
return s.configInterfaceFailureResponse(err), nil
}
return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil
}
func (s *CNIServer) CmdCheck(_ context.Context, request *cnipb.CniCmdRequest) (
*cnipb.CniCmdResponse, error) {
klog.Infof("Received CmdCheck request %v", request)
cniConfig, response := s.checkRequestMessage(request)
if response != nil {
return response, nil
}
infraContainer := cniConfig.getInfraContainer()
s.containerAccess.lockContainer(infraContainer)
defer s.containerAccess.unlockContainer(infraContainer)
if s.isChaining {
return s.interceptCheck(cniConfig)
}
if err := ipam.ExecIPAMCheck(cniConfig.CniCmdArgs, cniConfig.IPAM.Type); err != nil {
klog.Errorf("Failed to check IPAM configuration: %v", err)
return s.ipamFailureResponse(err), nil
}
cniVersion := cniConfig.CNIVersion
if valid, _ := version.GreaterThanOrEqualTo(cniVersion, "0.4.0"); valid {
if prevResult, response := s.parsePrevResultFromRequest(cniConfig.NetworkConfig); response != nil {
return response, nil
} else if response := s.validatePrevResult(cniConfig.CniCmdArgs, cniConfig.k8sArgs, prevResult); response != nil {
return response, nil
}
}
klog.Info("Succeed to check network configuration")
return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil
}
func New(
cniSocket, hostProcPathPrefix string,
nodeConfig *config.NodeConfig,
kubeClient clientset.Interface,
podUpdates chan<- v1beta1.PodReference,
isChaining bool,
routeClient route.Interface,
) *CNIServer {
return &CNIServer{
cniSocket: cniSocket,
supportedCNIVersions: supportedCNIVersionSet,
serverVersion: cni.AntreaCNIVersion,
nodeConfig: nodeConfig,
hostProcPathPrefix: hostProcPathPrefix,
kubeClient: kubeClient,
containerAccess: newContainerAccessArbitrator(),
podUpdates: podUpdates,
isChaining: isChaining,
routeClient: routeClient,
}
}
func (s *CNIServer) Initialize(
ovsBridgeClient ovsconfig.OVSBridgeClient,
ofClient openflow.Client,
ifaceStore interfacestore.InterfaceStore,
ovsDatapathType string,
) error {
var err error
s.podConfigurator, err = newPodConfigurator(ovsBridgeClient, ofClient, s.routeClient, ifaceStore, s.nodeConfig.GatewayConfig.MAC, ovsDatapathType)
if err != nil {
return fmt.Errorf("error during initialize podConfigurator: %v", err)
}
if err := s.reconcile(); err != nil {
return fmt.Errorf("error during initial reconciliation for CNI server: %v", err)
}
return nil
}
func (s *CNIServer) Run(stopCh <-chan struct{}) {
klog.Info("Starting CNI server")
defer klog.Info("Shutting down CNI server")
listener, err := util.ListenLocalSocket(s.cniSocket)
if err != nil {
klog.Fatalf("Failed to bind on %s: %v", s.cniSocket, err)
}
rpcServer := grpc.NewServer()
cnipb.RegisterCniServer(rpcServer, s)
klog.Info("CNI server is listening ...")
go func() {
if err := rpcServer.Serve(listener); err != nil {
klog.Errorf("Failed to serve connections: %v", err)
}
}()
<-stopCh
}
// interceptAdd handles Add request in policy only mode. Another CNI must already
// be called prior to Antrea CNI to allocate IP and ports. Antrea takes allocated port
// and hooks it to OVS br-int.
func (s *CNIServer) interceptAdd(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: add")
prevResult, response := s.parsePrevResultFromRequest(cniConfig.NetworkConfig)
if response != nil {
klog.Infof("Failed to parse prev result for container %s", cniConfig.ContainerId)
return response, nil
}
podName := string(cniConfig.K8S_POD_NAME)
podNamespace := string(cniConfig.K8S_POD_NAMESPACE)
result := make([]byte, 0, 0)
if err := s.podConfigurator.connectInterceptedInterface(
podName,
podNamespace,
cniConfig.ContainerId,
s.hostNetNsPath(cniConfig.Netns),
cniConfig.Ifname,
prevResult.IPs); err != nil {
return &cnipb.CniCmdResponse{CniResult: result}, fmt.Errorf("failed to connect container %s to ovs: %w", cniConfig.ContainerId, err)
}
// Notify the Pod update event to required components.
s.podUpdates <- v1beta1.PodReference{Name: podName, Namespace: podNamespace}
return &cnipb.CniCmdResponse{CniResult: cniConfig.NetworkConfiguration}, nil
}
func (s *CNIServer) interceptDel(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: delete")
return &cnipb.CniCmdResponse{CniResult: make([]byte, 0, 0)}, s.podConfigurator.disconnectInterceptedInterface(
string(cniConfig.K8S_POD_NAME),
string(cniConfig.K8S_POD_NAMESPACE),
cniConfig.ContainerId)
}
func (s *CNIServer) interceptCheck(_ *CNIConfig) (*cnipb.CniCmdResponse, error) {
klog.Infof("CNI Chaining: check")
// TODO, check for host interface setup later
return &cnipb.CniCmdResponse{CniResult: make([]byte, 0, 0)}, nil
}
// reconcile performs startup reconciliation for the CNI server. The CNI server is in charge of
// installing Pod flows, so as part of this reconciliation process we retrieve the Pod list from the
// K8s apiserver and replay the necessary flows.
func (s *CNIServer) reconcile() error {
klog.Infof("Reconciliation for CNI server")
pods, err := s.kubeClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
FieldSelector: "spec.nodeName=" + s.nodeConfig.Name,
})
if err != nil {
return fmt.Errorf("failed to list Pods running on Node %s: %v", s.nodeConfig.Name, err)
}
return s.podConfigurator.reconcile(pods.Items)
}
func init() {
supportedCNIVersionSet = buildVersionSet()
}
| 1 | 18,646 | Could we consider a more explicit name, like `devicePciAddress`? | antrea-io-antrea | go |
@@ -109,10 +109,14 @@ public class WindowsUtils {
* quote (\"?)
*/
// TODO We should be careful, in case Windows has ~1-ified the executable name as well
- pattern.append("\"?.*?\\\\");
- pattern.append(executable.getName());
+ pattern.append("(\"?.*?\\\\)?");
+ String execName = executable.getName();
+ pattern.append(execName);
+ if (!execName.endsWith(".exe")) {
+ pattern.append("(\\.exe)?");
+ }
pattern.append("\"?");
- for (String arg : cmdarray) {
+ for (int i = 1; i < cmdarray.length; i++) {
/*
* There may be a space, but maybe not (\\s?), may be a quote or maybe not (\"?), but then
* turn on block quotation (as if *everything* had a regex backslash in front of it) with \Q. | 1 | /*
* Copyright 2011 Software Freedom Conservancy.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.openqa.selenium.os;
import static org.openqa.selenium.Platform.WINDOWS;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.openqa.selenium.Platform;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.parsers.DocumentBuilderFactory;
public class WindowsUtils {
public static Boolean regVersion1 = null;
private static Logger LOG = Logger.getLogger(WindowsUtils.class.getName());
private static final boolean THIS_IS_WINDOWS = Platform.getCurrent().is(WINDOWS);
private static String wmic = null;
private static File wbem = null;
private static String taskkill = null;
private static String reg = null;
private static Properties env = null;
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Kills Windows processes by matching their command lines");
System.out.println("usage: " + WindowsUtils.class.getName() + " command arg1 arg2 ...");
}
kill(args);
}
public static void traceWith(Logger log) {
WindowsUtils.LOG = log;
}
/**
* Kill processes by name
*/
public static void killByName(String name) {
executeCommand("taskkill", "/f", "/t", "/im", name);
}
/**
* Kill processes by name, log and ignore errors
*/
public static void tryToKillByName(String name) {
if (!thisIsWindows()) {
return;
}
try {
killByName(name);
} catch (WindowsRegistryException e) {
LOG.log(Level.WARNING, "Exception thrown", e);
}
}
/**
* Searches the process list for a process with the specified command line and kills it
*
* @param cmdarray the array of command line arguments
* @throws Exception if something goes wrong while reading the process list or searching for your
* command line
*/
public static void kill(String[] cmdarray) throws Exception {
StringBuilder pattern = new StringBuilder();
File executable = new File(cmdarray[0]);
/*
* For the first argument, the executable, Windows may modify the start path in any number of
* ways. Ignore a starting quote if any (\"?), non-greedily look for anything up until the last
* backslash (.*?\\\\), then look for the executable's filename, then finally ignore a final
* quote (\"?)
*/
// TODO We should be careful, in case Windows has ~1-ified the executable name as well
pattern.append("\"?.*?\\\\");
pattern.append(executable.getName());
pattern.append("\"?");
for (String arg : cmdarray) {
/*
* There may be a space, but maybe not (\\s?), may be a quote or maybe not (\"?), but then
* turn on block quotation (as if *everything* had a regex backslash in front of it) with \Q.
* Then look for the next argument (which may have ?s, \s, "s, who knows), turning off block
* quotation. Now ignore a final quote if any (\"?)
*/
pattern.append("\\s?\"?\\Q");
pattern.append(arg);
pattern.append("\\E\"?");
}
pattern.append("\\s*");
Pattern cmd = Pattern.compile(pattern.toString(), Pattern.CASE_INSENSITIVE);
Map<String, String> procMap = procMap();
boolean killedOne = false;
for (String commandLine : procMap.keySet()) {
if (commandLine == null) {
continue;
}
Matcher m = cmd.matcher(commandLine);
if (m.matches()) {
String processID = procMap.get(commandLine);
StringBuilder logMessage = new StringBuilder("Killing PID ");
logMessage.append(processID);
logMessage.append(": ");
logMessage.append(commandLine);
LOG.info(logMessage.toString());
killPID(processID);
LOG.info("Killed");
killedOne = true;
}
}
if (!killedOne) {
StringBuilder errorMessage = new StringBuilder("Didn't find any matches for");
for (String arg : cmdarray) {
errorMessage.append(" '");
errorMessage.append(arg);
errorMessage.append('\'');
}
LOG.warning(errorMessage.toString());
}
}
/**
* Kills the specified process ID
*/
private static void killPID(String processID) {
executeCommand("taskkill", "/f", "/pid", processID);
}
/**
* Returns a map of process IDs to command lines
*
* @return a map of process IDs to command lines
* @throws Exception - if something goes wrong while reading the process list
*/
public static Map<String, String> procMap() throws Exception {
LOG.info("Reading Windows Process List...");
String output = executeCommand(findWMIC(), "process", "list", "full", "/format:rawxml.xsl");
// exec.setFailonerror(true);
LOG.info("Done, searching for processes to kill...");
// WMIC drops an ugly zero-length batch file; clean that up
File tempWmicBatchFile = new File("TempWmicBatchFile.bat");
if (tempWmicBatchFile.exists()) {
tempWmicBatchFile.delete();
}
// TODO This would be faster if it used SAX instead of DOM
Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder()
.parse(new ByteArrayInputStream(output.getBytes()));
NodeList procList = doc.getElementsByTagName("INSTANCE");
Map<String, String> processes = new HashMap<String, String>();
for (int i = 0; i < procList.getLength(); i++) {
Element process = (Element) procList.item(i);
NodeList propList = process.getElementsByTagName("PROPERTY");
Map<String, String> procProps = new HashMap<String, String>();
for (int j = 0; j < propList.getLength(); j++) {
Element property = (Element) propList.item(j);
String propName = property.getAttribute("NAME");
NodeList valList = property.getElementsByTagName("VALUE");
String value = null;
if (valList.getLength() != 0) {
Element valueElement = (Element) valList.item(0);
Text valueNode = (Text) valueElement.getFirstChild();
value = valueNode.getData();
}
procProps.put(propName, value);
}
String processID = procProps.get("ProcessId");
String commandLine = procProps.get("CommandLine");
processes.put(commandLine, processID);
}
return processes;
}
/**
* Returns the current process environment variables
*
* @return the current process environment variables
*/
public static synchronized Properties loadEnvironment() {
if (env != null) {
return env;
}
env = new Properties();
for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
env.put(entry.getKey(), entry.getValue());
}
return env;
}
/**
* Returns the path to the Windows Program Files. On non-English versions, this is not necessarily
* "C:\Program Files".
*
* @return the path to the Windows Program Files
*/
public static String getProgramFilesPath() {
return getEnvVarPath("ProgramFiles", "C:\\Program Files");
}
public static String getProgramFiles86Path() {
return getEnvVarPath("ProgramFiles(x86)", "C:\\Program Files (x86)");
}
private static String getEnvVarPath(final String envVar, final String defaultValue) {
String pf = getEnvVarIgnoreCase(envVar);
if (pf != null) {
File programFiles = new File(pf);
if (programFiles.exists()) {
return programFiles.getAbsolutePath();
}
}
return new File(defaultValue).getAbsolutePath();
}
public static ImmutableList<String> getPathsInProgramFiles(final String childPath) {
return new ImmutableList.Builder<String>()
.add(getFullPath(WindowsUtils.getProgramFilesPath(), childPath))
.add(getFullPath(WindowsUtils.getProgramFiles86Path(), childPath))
.build();
}
private static String getFullPath(String parent, String child) {
return new File(parent, child).getAbsolutePath();
}
/**
* Returns the path to Local AppData. For different users, this will be different.
*
* @return the path to Local AppData
*/
public static String getLocalAppDataPath() {
final String keyLocalAppData =
"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\Local AppData";
String localAppDataPath = readStringRegistryValue(keyLocalAppData);
String userProfile = getEnvVarIgnoreCase("USERPROFILE");
if (userProfile != null) {
return localAppDataPath.replace("%USERPROFILE%", userProfile);
}
return localAppDataPath;
}
public static String getEnvVarIgnoreCase(String var) {
Properties p = loadEnvironment();
for (String key : p.stringPropertyNames()) {
if (key.equalsIgnoreCase(var)) {
return env.getProperty(key);
}
}
return null;
}
/**
* Finds the system root directory, e.g. "c:\windows" or "c:\winnt"
*/
public static File findSystemRoot() {
Properties p = loadEnvironment();
String systemRootPath = p.getProperty("SystemRoot");
if (systemRootPath == null) {
systemRootPath = p.getProperty("SYSTEMROOT");
}
if (systemRootPath == null) {
systemRootPath = p.getProperty("systemroot");
}
if (systemRootPath == null) {
throw new RuntimeException("SystemRoot apparently not set!");
}
File systemRoot = new File(systemRootPath);
if (!systemRoot.exists()) {
throw new RuntimeException("SystemRoot doesn't exist: " + systemRootPath);
}
return systemRoot;
}
/**
* Finds WMIC.exe
*
* @return the exact path to wmic.exe, or just the string "wmic" if it couldn't be found (in which
* case you can pass that to exec to try to run it from the path)
*/
public static String findWMIC() {
if (wmic != null) {
return wmic;
}
findWBEM();
if (null != wbem) {
File wmicExe = new File(findWBEM(), "wmic.exe");
if (wmicExe.exists()) {
wmic = wmicExe.getAbsolutePath();
return wmic;
}
}
LOG.warning("Couldn't find wmic! Hope it's on the path...");
wmic = "wmic";
return wmic;
}
/**
* Finds the WBEM directory in the systemRoot directory
*
* @return the WBEM directory, or <code>null</code> if it couldn't be found
*/
public static File findWBEM() {
if (wbem != null) {
return wbem;
}
File systemRoot = findSystemRoot();
wbem = new File(systemRoot, "system32/wbem");
if (!wbem.exists()) {
LOG.severe("Couldn't find wbem!");
return null;
}
return wbem;
}
/**
* Finds taskkill.exe
*
* @return the exact path to taskkill.exe, or just the string "taskkill" if it couldn't be found
* (in which case you can pass that to exec to try to run it from the path)
*/
public static String findTaskKill() {
if (taskkill != null) {
return taskkill;
}
File systemRoot = findSystemRoot();
File taskkillExe = new File(systemRoot, "system32/taskkill.exe");
if (taskkillExe.exists()) {
taskkill = taskkillExe.getAbsolutePath();
return taskkill;
}
LOG.warning("Couldn't find taskkill! Hope it's on the path...");
taskkill = "taskkill";
return taskkill;
}
/**
* Finds reg.exe
*
* @return the exact path to reg.exe, or just the string "reg" if it couldn't be found (in which
* case you can pass that to exec to try to run it from the path)
*/
public static String findReg() {
if (reg != null) {
return reg;
}
File systemRoot = findSystemRoot();
File regExe = new File(systemRoot, "system32/reg.exe");
if (regExe.exists()) {
reg = regExe.getAbsolutePath();
return reg;
}
regExe = new File("c:\\ntreskit\\reg.exe");
if (regExe.exists()) {
reg = regExe.getAbsolutePath();
return reg;
}
reg = new ExecutableFinder().find("reg.exe");
if (reg != null) {
return reg;
}
LOG.severe("OS Version: " + System.getProperty("os.version"));
throw new WindowsRegistryException("Couldn't find reg.exe!\n" +
"Please download it from Microsoft and install it in a standard location.\n"
+
"See here for details: http://wiki.openqa.org/display/SRC/Windows+Registry+Support");
}
public static boolean isRegExeVersion1() {
if (regVersion1 != null) {
return regVersion1.booleanValue();
}
String output = executeCommand(findReg(), "/?");
boolean version1 = output.indexOf("version 1.0") != -1;
regVersion1 = Boolean.valueOf(version1);
return version1;
}
public static Class<?> discoverRegistryKeyType(String key) {
if (!doesRegistryValueExist(key)) {
return null;
}
RegKeyValue r = new RegKeyValue(key);
String output = runRegQuery(key);
Pattern pat;
if (isRegExeVersion1()) {
pat = Pattern.compile("\\s*(REG_\\S+)");
} else {
pat = Pattern.compile("\\Q" + r.value + "\\E\\s+(REG_\\S+)\\s+(.*)");
}
Matcher m = pat.matcher(output);
if (!m.find()) {
throw new WindowsRegistryException("Output didn't look right: " + output);
}
String type = m.group(1);
if ("REG_SZ".equals(type) || "REG_EXPAND_SZ".equals(type)) {
return String.class;
} else if ("REG_DWORD".equals(type)) {
return int.class;
} else {
throw new WindowsRegistryException("Unknown type: " + type);
}
}
public static String readStringRegistryValue(String key) {
RegKeyValue r = new RegKeyValue(key);
String output = runRegQuery(key);
Pattern pat;
if (isRegExeVersion1()) {
pat = Pattern.compile("\\s*(REG_\\S+)\\s+\\Q" + r.value + "\\E\\s+(.*)");
} else {
pat = Pattern.compile("\\Q" + r.value + "\\E\\s+(REG_\\S+)\\s+(.*)");
}
Matcher m = pat.matcher(output);
if (!m.find()) {
throw new WindowsRegistryException("Output didn't look right: " + output);
}
String type = m.group(1);
if (!"REG_SZ".equals(type) && !"REG_EXPAND_SZ".equals(type)) {
throw new WindowsRegistryException(
r.value + " was not a REG_SZ or a REG_EXPAND_SZ (String): " + type);
}
return m.group(2);
}
public static int readIntRegistryValue(String key) {
RegKeyValue r = new RegKeyValue(key);
String output = runRegQuery(key);
Pattern pat;
if (isRegExeVersion1()) {
pat = Pattern.compile("\\s*(REG_\\S+)\\s+\\Q" + r.value + "\\E\\s+(.*)");
} else {
pat = Pattern.compile("\\Q" + r.value + "\\E\\s+(REG_\\S+)\\s+0x(.*)");
}
Matcher m = pat.matcher(output);
if (!m.find()) {
throw new WindowsRegistryException("Output didn't look right: " + output);
}
String type = m.group(1);
if (!"REG_DWORD".equals(type)) {
throw new WindowsRegistryException(r.value + " was not a REG_DWORD (int): " + type);
}
String strValue = m.group(2);
int value;
if (isRegExeVersion1()) {
value = Integer.parseInt(strValue);
} else {
value = Integer.parseInt(strValue, 16);
}
return value;
}
public static boolean readBooleanRegistryValue(String key) {
RegKeyValue r = new RegKeyValue(key);
int value = readIntRegistryValue(key);
if (0 == value) {
return false;
}
if (1 == value) {
return true;
}
throw new WindowsRegistryException(r.value + " was not either 0 or 1: " + value);
}
public static boolean doesRegistryValueExist(String key) {
List<String> args = Lists.newArrayList();
args.add("query");
if (isRegExeVersion1()) {
args.add(key);
} else {
RegKeyValue r = new RegKeyValue(key);
args.add(r.key);
args.add("/v");
args.add(r.value);
}
try {
executeCommand(findReg(), args.toArray(new String[args.size()]));
return true;
} catch (WindowsRegistryException e) {
return false;
}
}
public static void writeStringRegistryValue(String key, String data)
throws WindowsRegistryException {
List<String> args = new ArrayList<String>();
if (isRegExeVersion1()) {
if (doesRegistryValueExist(key)) {
args.add("update");
} else {
args.add("add");
}
args.add(key + "=" + data);
} else {
args.add("add");
RegKeyValue r = new RegKeyValue(key);
args.add(r.key);
args.add("/v");
args.add(r.value);
args.add("/d");
args.add(data);
args.add("/f");
}
executeCommand(findReg(), args.toArray(new String[args.size()]));
}
private static String executeCommand(String commandName, String... args) {
CommandLine cmd = new CommandLine(commandName, args);
cmd.execute();
String output = cmd.getStdOut();
if (!cmd.isSuccessful()) {
throw new WindowsRegistryException("exec return code " + cmd.getExitCode() + ": " + output);
}
return output;
}
public static void writeIntRegistryValue(String key, int data) {
List<String> args = new ArrayList<String>();
if (isRegExeVersion1()) {
if (doesRegistryValueExist(key)) {
args.add("update");
args.add(key + "=" + Integer.toString(data));
} else {
args.add("add");
args.add(key + "=" + Integer.toString(data));
args.add("REG_DWORD");
}
} else {
args.add("add");
RegKeyValue r = new RegKeyValue(key);
args.add(r.key);
args.add("/v");
args.add(r.value);
args.add("/t");
args.add("REG_DWORD");
args.add("/d");
args.add(Integer.toString(data));
args.add("/f");
}
executeCommand(findReg(), args.toArray(new String[args.size()]));
}
public static void writeBooleanRegistryValue(String key, boolean data) {
writeIntRegistryValue(key, data ? 1 : 0);
}
public static void deleteRegistryValue(String key) {
List<String> args = new ArrayList<String>();
if (isRegExeVersion1()) {
args.add("delete");
args.add(key);
args.add("/FORCE");
} else {
RegKeyValue r = new RegKeyValue(key);
args.add("delete");
args.add(r.key);
args.add("/v");
args.add(r.value);
args.add("/f");
}
executeCommand(findReg(), args.toArray(new String[args.size()]));
}
/**
* Executes reg.exe to query the registry
*/
private static String runRegQuery(String key) {
List<String> args = new ArrayList<String>();
args.add("query");
if (isRegExeVersion1()) {
args.add(key);
} else {
RegKeyValue r = new RegKeyValue(key);
args.add(r.key);
args.add("/v");
args.add(r.value);
}
return executeCommand(findReg(), args.toArray(new String[args.size()]));
}
private static class RegKeyValue {
private String key;
private String value;
public RegKeyValue(String path) {
int i = path.lastIndexOf('\\');
key = path.substring(0, i);
value = path.substring(i + 1);
}
}
/**
* Returns true if the current OS is MS Windows; false otherwise
*
* @return true if the current OS is MS Windows; false otherwise
*/
public static boolean thisIsWindows() {
return THIS_IS_WINDOWS;
}
}
| 1 | 10,241 | Why change this from a foreach? I can't see it gaining anything here and code styles shouldn't change just for the sake of it. | SeleniumHQ-selenium | rb |
@@ -610,8 +610,9 @@ class SolrService extends \Apache_Solr_Service
$solrconfigXmlUrl = $this->_scheme . '://'
. $this->_host . ':' . $this->_port
. $this->_path . 'admin/file/?file=solrconfig.xml';
+ $response= $this->_sendRawGet($solrconfigXmlUrl);
- $solrconfigXml = simplexml_load_file($solrconfigXmlUrl);
+ $solrconfigXml = simplexml_load_string($response->getRawResponse());
if ($solrconfigXml === false) {
throw new \InvalidArgumentException('No valid xml response from schema file: ' . $solrconfigXmlUrl);
} | 1 | <?php
namespace ApacheSolrForTypo3\Solr;
/***************************************************************
* Copyright notice
*
* (c) 2009-2015 Ingo Renner <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\System\Configuration\TypoScriptConfiguration;
use ApacheSolrForTypo3\Solr\System\Solr\Service\StopWordParser;
use ApacheSolrForTypo3\Solr\System\Solr\Service\SynonymParser;
use TYPO3\CMS\Core\Utility\GeneralUtility;
/**
* Solr Service Access
*
* @author Ingo Renner <[email protected]>
*/
class SolrService extends \Apache_Solr_Service
{
const LUKE_SERVLET = 'admin/luke';
const SYSTEM_SERVLET = 'admin/system';
const PLUGINS_SERVLET = 'admin/plugins';
const CORES_SERVLET = 'admin/cores';
const SCHEMA_SERVLET = 'schema';
const SYNONYMS_SERVLET = 'schema/analysis/synonyms/';
const STOPWORDS_SERVLET = 'schema/analysis/stopwords/';
const SCHEME_HTTP = 'http';
const SCHEME_HTTPS = 'https';
/**
* Server connection scheme. http or https.
*
* @var string
*/
protected $_scheme = self::SCHEME_HTTP;
/**
* Constructed servlet URL for Luke
*
* @var string
*/
protected $_lukeUrl;
/**
* Constructed servlet URL for plugin information
*
* @var string
*/
protected $_pluginsUrl;
/**
* @var string
*/
protected $_coresUrl;
/**
* @var string
*/
protected $_extractUrl;
/**
* @var string
*/
protected $_synonymsUrl;
/**
* @var string
*/
protected $_stopWordsUrl;
/**
* @var string
*/
protected $_schemaUrl;
/**
* @var bool
*/
protected $debug = false;
/**
* @var \Apache_Solr_Response
*/
protected $responseCache = null;
/**
* @var bool
*/
protected $hasSearched = false;
/**
* @var array
*/
protected $lukeData = [];
protected $systemData = null;
protected $pluginsData = null;
protected $schemaName = null;
protected $solrconfigName = null;
/**
* @var TypoScriptConfiguration
*/
protected $configuration;
/**
* @var array
*/
protected static $pingCache = [];
/**
* @var SynonymParser
*/
protected $synonymParser = null;
/**
* @var StopWordParser
*/
protected $stopWordParser = null;
/**
* @var string
*/
protected $managedLanguage = '';
/**
* Constructor
*
* @param string $host Solr host
* @param string $port Solr port
* @param string $path Solr path
* @param string $scheme Scheme, defaults to http, can be https
* @param TypoScriptConfiguration $typoScriptConfiguration
* @param SynonymParser $synonymParser
* @param StopWordParser $stopWordParser
*/
public function __construct(
$host = '',
$port = '8983',
$path = '/solr/',
$scheme = 'http',
TypoScriptConfiguration $typoScriptConfiguration = null,
SynonymParser $synonymParser = null,
StopWordParser $stopWordParser = null
) {
$this->setScheme($scheme);
$this->configuration = is_null($typoScriptConfiguration) ? Util::getSolrConfiguration() : $typoScriptConfiguration;
$this->synonymParser = is_null($synonymParser) ? GeneralUtility::makeInstance(SynonymParser::class) : $synonymParser;
$this->stopWordParser = is_null($stopWordParser) ? GeneralUtility::makeInstance(StopWordParser::class) : $stopWordParser;
parent::__construct($host, $port, $path);
}
/**
* Creates a string representation of the Solr connection. Specifically
* will return the Solr URL.
*
* @return string The Solr URL.
*/
public function __toString()
{
return $this->_scheme . '://' . $this->_host . ':' . $this->_port . $this->_path;
}
/**
* Returns the current time in milliseconds.
*
* @return double
*/
protected function getMilliseconds()
{
return GeneralUtility::milliseconds();
}
/**
* Performs a search.
*
* @param string $query query string / search term
* @param int $offset result offset for pagination
* @param int $limit number of results to retrieve
* @param array $params additional HTTP GET parameters
* @param string $method The HTTP method (Apache_Solr_Service::METHOD_GET or Apache_Solr_Service::METHOD::POST)
* @return \Apache_Solr_Response Solr response
* @throws \RuntimeException if Solr returns a HTTP status code other than 200
*/
public function search($query, $offset = 0, $limit = 10, $params = array(), $method = self::METHOD_GET)
{
$response = parent::search($query, $offset, $limit, $params, $method);
$this->hasSearched = true;
$this->responseCache = $response;
if ($response->getHttpStatus() != 200) {
throw new \RuntimeException(
'Invalid query. Solr returned an error: '
. $response->getHttpStatus() . ' '
. $response->getHttpStatusMessage(),
1293109870
);
}
return $response;
}
/**
* Call the /admin/ping servlet, can be used to quickly tell if a connection to the
* server is available.
*
* Simply overrides the SolrPhpClient implementation, changing ping from a
* HEAD to a GET request, see http://forge.typo3.org/issues/44167
*
* Also does not report the time, see https://forge.typo3.org/issues/64551
*
* @param float|int $timeout maximum time to wait for ping in seconds, -1 for unlimited (default is 2)
* @param boolean $useCache indicates if the ping result should be cached in the instance or not
* @return bool TRUE if Solr can be reached, FALSE if not
*/
public function ping($timeout = 2, $useCache = true)
{
$httpResponse = $this->performPingRequest($timeout, $useCache);
return ($httpResponse->getStatusCode() === 200);
}
/**
* Call the /admin/ping servlet, can be used to get the runtime of a ping request.
*
* @param float|int $timeout maximum time to wait for ping in seconds, -1 for unlimited (default is 2)
* @param boolean $useCache indicates if the ping result should be cached in the instance or not
* @return double runtime in milliseconds
* @throws \ApacheSolrForTypo3\Solr\PingFailedException
*/
public function getPingRoundTripRuntime($timeout = 2, $useCache = true)
{
$start = $this->getMilliseconds();
$httpResponse = $this->performPingRequest($timeout, $useCache);
$end = $this->getMilliseconds();
if ($httpResponse->getStatusCode() !== 200) {
$message = 'Solr ping failed with unexpected response code: ' . $httpResponse->getStatusCode();
/** @var $exception \ApacheSolrForTypo3\Solr\PingFailedException */
$exception = GeneralUtility::makeInstance('ApacheSolrForTypo3\Solr\PingFailedException', $message);
$exception->setHttpResponse($httpResponse);
throw $exception;
}
return $end - $start;
}
/**
* Performs a ping request and returns the result.
*
* @param int $timeout
* @param boolean $useCache indicates if the ping result should be cached in the instance or not
* @return \Apache_Solr_HttpTransport_Response
*/
protected function performPingRequest($timeout = 2, $useCache = true)
{
$cacheKey = (string) ($this);
if ($useCache && isset(static::$pingCache[$cacheKey])) {
return static::$pingCache[$cacheKey];
}
$pingResult = $this->getHttpTransport()->performGetRequest($this->_pingUrl, $timeout);
if ($useCache) {
static::$pingCache[$cacheKey] = $pingResult;
}
return $pingResult;
}
/**
* Performs a content and meta data extraction request.
*
* @param ExtractingQuery $query An extraction query
* @return array An array containing the extracted content [0] and meta data [1]
*/
public function extractByQuery(ExtractingQuery $query)
{
$headers = array(
'Content-Type' => 'multipart/form-data; boundary=' . $query->getMultiPartPostDataBoundary()
);
try {
$response = $this->requestServlet(
self::EXTRACT_SERVLET,
$query->getQueryParameters(),
'POST',
$headers,
$query->getRawPostFileData()
);
} catch (\Exception $e) {
GeneralUtility::devLog('Extracting text and meta data through Solr Cell over HTTP POST',
'solr', 3, array(
'query' => (array)$query,
'parameters' => $query->getQueryParameters(),
'file' => $query->getFile(),
'headers' => $headers,
'query url' => self::EXTRACT_SERVLET,
'exception' => $e->getMessage()
));
}
return array(
$response->extracted,
(array)$response->extracted_metadata
);
}
/**
* Make a request to a servlet (a path) that's not a standard path.
*
* @param string $servlet Path to be added to the base Solr path.
* @param array $parameters Optional, additional request parameters when constructing the URL.
* @param string $method HTTP method to use, defaults to GET.
* @param array $requestHeaders Key value pairs of header names and values. Should include 'Content-Type' for POST and PUT.
* @param string $rawPost Must be an empty string unless method is POST or PUT.
* @param float|bool $timeout Read timeout in seconds, defaults to FALSE.
* @return \Apache_Solr_Response Response object
* @throws \Apache_Solr_HttpTransportException if returned HTTP status is other than 200
*/
public function requestServlet(
$servlet,
$parameters = array(),
$method = 'GET',
$requestHeaders = array(),
$rawPost = '',
$timeout = false
) {
$httpTransport = $this->getHttpTransport();
if ($method == 'GET' || $method == 'HEAD') {
// Make sure we are not sending a request body.
$rawPost = '';
}
// Add default parameters
$parameters['wt'] = self::SOLR_WRITER;
$parameters['json.nl'] = $this->_namedListTreatment;
$url = $this->_constructUrl($servlet, $parameters);
if ($method == self::METHOD_GET) {
$httpResponse = $httpTransport->performGetRequest($url, $timeout);
} elseif ($method == self::METHOD_POST) {
// FIXME should respect all headers, not only Content-Type
$httpResponse = $httpTransport->performPostRequest($url, $rawPost,
$requestHeaders['Content-Type'], $timeout);
}
if (empty($httpResponse)) {
throw new \InvalidArgumentException('$method should be GET or POST');
}
$solrResponse = new \Apache_Solr_Response($httpResponse,
$this->_createDocuments, $this->_collapseSingleValueArrays);
if ($solrResponse->getHttpStatus() != 200) {
throw new \Apache_Solr_HttpTransportException($solrResponse);
}
return $solrResponse;
}
/**
* Return a valid http URL given this server's scheme, host, port, and path
* and a provided servlet name.
*
* @param string $servlet Servlet name
* @param array $params Additional URL parameters to attach to the end of the URL
* @return string Servlet URL
*/
protected function _constructUrl($servlet, $params = array())
{
$url = parent::_constructUrl($servlet, $params);
if (!GeneralUtility::isFirstPartOfStr($url, $this->_scheme)) {
$parsedUrl = parse_url($url);
// unfortunately can't use str_replace as it replace all
// occurrences of $needle and can't be limited to replace only once
$url = $this->_scheme . substr($url, strlen($parsedUrl['scheme']));
}
return $url;
}
/**
* Returns the set scheme
*
* @return string
*/
public function getScheme()
{
return $this->_scheme;
}
/**
* Set the scheme used. If empty will fallback to constants
*
* @param string $scheme Either http or https
* @throws \UnexpectedValueException
*/
public function setScheme($scheme)
{
// Use the provided scheme or use the default
if (empty($scheme)) {
throw new \UnexpectedValueException('Scheme parameter is empty',
1380756390);
} else {
if (in_array($scheme,
array(self::SCHEME_HTTP, self::SCHEME_HTTPS))) {
$this->_scheme = $scheme;
} else {
throw new \UnexpectedValueException('Unsupported scheme parameter, scheme must be http or https',
1380756442);
}
}
if ($this->_urlsInited) {
$this->_initUrls();
}
}
/**
* get field meta data for the index
*
* @param int $numberOfTerms Number of top terms to fetch for each field
* @return array
*/
public function getFieldsMetaData($numberOfTerms = 0)
{
return $this->getLukeMetaData($numberOfTerms)->fields;
}
/**
* Retrieves meta data about the index from the luke request handler
*
* @param int $numberOfTerms Number of top terms to fetch for each field
* @return \Apache_Solr_Response Index meta data
*/
public function getLukeMetaData($numberOfTerms = 0)
{
if (!isset($this->lukeData[$numberOfTerms])) {
$lukeUrl = $this->_constructUrl(
self::LUKE_SERVLET,
array(
'numTerms' => $numberOfTerms,
'wt' => self::SOLR_WRITER,
'fl' => '*'
)
);
$this->lukeData[$numberOfTerms] = $this->_sendRawGet($lukeUrl);
}
return $this->lukeData[$numberOfTerms];
}
/**
* Central method for making a get operation against this Solr Server
*
* @param string $url
* @param float|bool $timeout Read timeout in seconds
* @return \Apache_Solr_Response
*/
protected function _sendRawGet($url, $timeout = false)
{
$logSeverity = 0; // info
try {
$response = parent::_sendRawGet($url, $timeout);
} catch (\Apache_Solr_HttpTransportException $e) {
$logSeverity = 3; // fatal error
$response = $e->getResponse();
}
if ($this->configuration->getLoggingQueryRawGet() || $response->getHttpStatus() != 200) {
$logData = array(
'query url' => $url,
'response' => (array)$response
);
if (!empty($e)) {
$logData['exception'] = $e->__toString();
} else {
// trigger data parsing
$response->response;
$logData['response data'] = print_r($response, true);
}
GeneralUtility::devLog('Querying Solr using GET', 'solr',
$logSeverity, $logData);
}
return $response;
}
/**
* Returns whether a search has been executed or not.
*
* @return bool TRUE if a search has been executed, FALSE otherwise
*/
public function hasSearched()
{
return $this->hasSearched;
}
/**
* Gets the most recent response (if any)
*
* @return \Apache_Solr_Response Most recent response, or NULL if a search has not been executed yet.
*/
public function getResponse()
{
return $this->responseCache;
}
/**
* Enable/Disable debug mode
*
* @param bool $debug TRUE to enable debug mode, FALSE to turn off, off by default
*/
public function setDebug($debug)
{
$this->debug = (boolean)$debug;
}
/**
* Gets information about the plugins installed in Solr
*
* @return array A nested array of plugin data.
*/
public function getPluginsInformation()
{
if (empty($this->pluginsData)) {
$pluginsInformation = $this->_sendRawGet($this->_pluginsUrl);
// access a random property to trigger response parsing
$pluginsInformation->responseHeader;
$this->pluginsData = $pluginsInformation;
}
return $this->pluginsData;
}
/**
* Gets the name of the schema.xml file installed and in use on the Solr
* server.
*
* @return string Name of the active schema.xml
*/
public function getSchemaName()
{
if (is_null($this->schemaName)) {
$systemInformation = $this->getSystemInformation();
$this->schemaName = $systemInformation->core->schema;
}
return $this->schemaName;
}
/**
* Gets information about the Solr server
*
* @return array A nested array of system data.
*/
public function getSystemInformation()
{
if (empty($this->systemData)) {
$systemInformation = $this->system();
// access a random property to trigger response parsing
$systemInformation->responseHeader;
$this->systemData = $systemInformation;
}
return $this->systemData;
}
/**
* Gets the name of the solrconfig.xml file installed and in use on the Solr
* server.
*
* @return string Name of the active solrconfig.xml
*/
public function getSolrconfigName()
{
if (is_null($this->solrconfigName)) {
$solrconfigXmlUrl = $this->_scheme . '://'
. $this->_host . ':' . $this->_port
. $this->_path . 'admin/file/?file=solrconfig.xml';
$solrconfigXml = simplexml_load_file($solrconfigXmlUrl);
if ($solrconfigXml === false) {
throw new \InvalidArgumentException('No valid xml response from schema file: ' . $solrconfigXmlUrl);
}
$this->solrconfigName = (string)$solrconfigXml->attributes()->name;
}
return $this->solrconfigName;
}
/**
* Gets the Solr server's version number.
*
* @return string Solr version number
*/
public function getSolrServerVersion()
{
$systemInformation = $this->getSystemInformation();
// don't know why $systemInformation->lucene->solr-spec-version won't work
$luceneInformation = (array)$systemInformation->lucene;
return $luceneInformation['solr-spec-version'];
}
/**
* Deletes all index documents of a certain type and does a commit
* afterwards.
*
* @param string $type The type of documents to delete, usually a table name.
* @param bool $commit Will commit immediately after deleting the documents if set, defaults to TRUE
*/
public function deleteByType($type, $commit = true)
{
$this->deleteByQuery('type:' . trim($type));
if ($commit) {
$this->commit(false, false, false);
}
}
/**
* Raw Delete Method. Takes a raw post body and sends it to the update service. Body should be
* a complete and well formed "delete" xml document
*
* @param string $rawPost Expected to be utf-8 encoded xml document
* @param float|int $timeout Maximum expected duration of the delete operation on the server (otherwise, will throw a communication exception)
* @return \Apache_Solr_Response
*/
public function delete($rawPost, $timeout = 3600)
{
$response = $this->_sendRawPost($this->_updateUrl, $rawPost, $timeout);
GeneralUtility::devLog(
'Delete Query sent.',
'solr',
1,
array(
'query' => $rawPost,
'query url' => $this->_updateUrl,
'response' => (array)$response
)
);
return $response;
}
/**
* Central method for making a post operation against this Solr Server
*
* @param string $url
* @param string $rawPost
* @param float|bool $timeout Read timeout in seconds
* @param string $contentType
* @return \Apache_Solr_Response
*/
protected function _sendRawPost(
$url,
$rawPost,
$timeout = false,
$contentType = 'text/xml; charset=UTF-8'
) {
$logSeverity = 0; // info
try {
$response = parent::_sendRawPost($url, $rawPost, $timeout,
$contentType);
} catch (\Apache_Solr_HttpTransportException $e) {
$logSeverity = 3; // fatal error
$response = $e->getResponse();
}
if ($this->configuration->getLoggingQueryRawPost() || $response->getHttpStatus() != 200) {
$logData = array(
'query url' => $url,
'content' => $rawPost,
'response' => (array)$response
);
if (!empty($e)) {
$logData['exception'] = $e->__toString();
}
GeneralUtility::devLog('Querying Solr using POST', 'solr',
$logSeverity, $logData);
}
return $response;
}
/**
* Get currently configured synonyms
*
* @param string $baseWord If given a base word, retrieves the synonyms for that word only
* @return array
*/
public function getSynonyms($baseWord = '')
{
$this->initializeSynonymsUrl();
$synonymsUrl = $this->_synonymsUrl;
if (!empty($baseWord)) {
$synonymsUrl .= '/' . $baseWord;
}
$response = $this->_sendRawGet($synonymsUrl);
return $this->synonymParser->parseJson($baseWord, $response->getRawResponse());
}
/**
* Add list of synonyms for base word to managed synonyms map
*
* @param string $baseWord
* @param array $synonyms
*
* @return \Apache_Solr_Response
*
* @throws \Apache_Solr_InvalidArgumentException If $baseWord or $synonyms are empty
*/
public function addSynonym($baseWord, array $synonyms)
{
$this->initializeSynonymsUrl();
$json = $this->synonymParser->toJson($baseWord, $synonyms);
return $this->_sendRawPost($this->_synonymsUrl, $json,
$this->getHttpTransport()->getDefaultTimeout(), 'application/json');
}
/**
* Remove a synonym from the synonyms map
*
* @param string $baseWord
* @return \Apache_Solr_Response
* @throws \Apache_Solr_InvalidArgumentException
*/
public function deleteSynonym($baseWord)
{
$this->initializeSynonymsUrl();
if (empty($baseWord)) {
throw new \Apache_Solr_InvalidArgumentException('Must provide base word.');
}
return $this->_sendRawDelete($this->_synonymsUrl . '/' . $baseWord);
}
/**
* Central method for making a HTTP DELETE operation against the Solr server
*
* @param string $url
* @param bool|float $timeout Read timeout in seconds
* @return \Apache_Solr_Response
*/
protected function _sendRawDelete($url, $timeout = false)
{
$logSeverity = 0; // info
try {
$httpTransport = $this->getHttpTransport();
$httpResponse = $httpTransport->performDeleteRequest($url,
$timeout);
$solrResponse = new \Apache_Solr_Response($httpResponse,
$this->_createDocuments, $this->_collapseSingleValueArrays);
if ($solrResponse->getHttpStatus() != 200) {
throw new \Apache_Solr_HttpTransportException($solrResponse);
}
} catch (\Apache_Solr_HttpTransportException $e) {
$logSeverity = 3; // fatal error
$solrResponse = $e->getResponse();
}
if ($this->configuration->getLoggingQueryRawDelete() || $solrResponse->getHttpStatus() != 200) {
$logData = array(
'query url' => $url,
'response' => (array)$solrResponse
);
if (!empty($e)) {
$logData['exception'] = $e->__toString();
} else {
// trigger data parsing
$solrResponse->response;
$logData['response data'] = print_r($solrResponse, true);
}
GeneralUtility::devLog('Querying Solr using DELETE', 'solr',
$logSeverity, $logData);
}
return $solrResponse;
}
/**
* Get currently configured stop words
*
* @return array
*/
public function getStopWords()
{
$this->initializeStopWordsUrl();
$response = $this->_sendRawGet($this->_stopWordsUrl);
return $this->stopWordParser->parseJson($response->getRawResponse());
}
/**
* Adds stop words to the managed stop word list
*
* @param array|string $stopWords string for a single word, array for multiple words
* @return \Apache_Solr_Response
* @throws \Apache_Solr_InvalidArgumentException If $stopWords is empty
*/
public function addStopWords($stopWords)
{
$this->initializeStopWordsUrl();
$json = $this->stopWordParser->toJson($stopWords);
return $this->_sendRawPost($this->_stopWordsUrl, $json,
$this->getHttpTransport()->getDefaultTimeout(), 'application/json');
}
/**
* Deletes a words from the managed stop word list
*
* @param string $stopWord stop word to delete
* @return \Apache_Solr_Response
* @throws \Apache_Solr_InvalidArgumentException If $stopWords is empty
*/
public function deleteStopWord($stopWord)
{
$this->initializeStopWordsUrl();
if (empty($stopWord)) {
throw new \Apache_Solr_InvalidArgumentException('Must provide stop word.');
}
return $this->_sendRawDelete($this->_stopWordsUrl . '/' . $stopWord);
}
/**
* Reloads the current core
*
* @return \Apache_Solr_Response
*/
public function reloadCore()
{
$coreName = array_pop(explode('/', trim($this->_path, '/')));
$coreAdminReloadUrl = $this->_coresUrl . '?action=reload&core=' . $coreName;
return $this->_sendRawGet($coreAdminReloadUrl);
}
/**
* initializes various URLs, including the Luke URL
*
* @return void
*/
protected function _initUrls()
{
parent::_initUrls();
$this->_lukeUrl = $this->_constructUrl(
self::LUKE_SERVLET,
array(
'numTerms' => '0',
'wt' => self::SOLR_WRITER
)
);
$this->_pluginsUrl = $this->_constructUrl(
self::PLUGINS_SERVLET,
array('wt' => self::SOLR_WRITER)
);
$pathElements = explode('/', trim($this->_path, '/'));
$this->_coresUrl =
$this->_scheme . '://' .
$this->_host . ':' .
$this->_port . '/' .
$pathElements[0] . '/' .
self::CORES_SERVLET;
$this->_schemaUrl = $this->_constructUrl(self::SCHEMA_SERVLET);
}
/**
* @return void
*/
protected function initializeSynonymsUrl()
{
if (trim($this->_synonymsUrl) !== '') {
return;
}
$this->_synonymsUrl = $this->_constructUrl(self::SYNONYMS_SERVLET) . $this->getManagedLanguage();
}
/**
* @return void
*/
protected function initializeStopWordsUrl()
{
if (trim($this->_stopWordsUrl) !== '') {
return;
}
$this->_stopWordsUrl = $this->_constructUrl(self::STOPWORDS_SERVLET) . $this->getManagedLanguage();
}
/**
* Get the language map name for the text field.
* This is necessary to select the correct synonym map.
*
* @return string
*/
protected function getManagedLanguage()
{
if ($this->managedLanguage !== '') {
return $this->managedLanguage;
}
$schema = $this->getSchema();
$language = 'english';
if (is_object($schema) && isset($schema->fieldTypes)) {
foreach ($schema->fieldTypes as $fieldType) {
if ($fieldType->name === 'text') {
foreach ($fieldType->queryAnalyzer->filters as $filter) {
if ($filter->class === 'solr.ManagedSynonymFilterFactory') {
$language = $filter->managed;
}
}
}
}
}
$this->managedLanguage = $language;
return $language;
}
/**
* Get the configured schema for the current core
*
* @return \stdClass
*/
protected function getSchema()
{
$response = $this->_sendRawGet($this->_schemaUrl);
return json_decode($response->getRawResponse())->schema;
}
}
| 1 | 5,942 | Is this supposed to be part of this PR? | TYPO3-Solr-ext-solr | php |
@@ -17,6 +17,13 @@ const getHasListItem = (hasListItem, tagName, isListItemRole) => {
return hasListItem || (tagName === 'LI' && isListItemRole) || isListItemRole;
};
+const getIsHidden = actualNode => {
+ return (
+ window.getComputedStyle(actualNode, null).getPropertyValue('display') ===
+ 'none'
+ );
+};
+
let base = {
badNodes: [],
isEmpty: true, | 1 | const ALLOWED_TAGS = [
'STYLE',
'META',
'LINK',
'MAP',
'AREA',
'SCRIPT',
'DATALIST',
'TEMPLATE'
];
const getIsListItemRole = (role, tagName) => {
return role === 'listitem' || (tagName === 'LI' && !role);
};
const getHasListItem = (hasListItem, tagName, isListItemRole) => {
return hasListItem || (tagName === 'LI' && isListItemRole) || isListItemRole;
};
let base = {
badNodes: [],
isEmpty: true,
hasNonEmptyTextNode: false,
hasListItem: false,
liItemsWithRole: 0
};
let out = virtualNode.children.reduce((out, { actualNode }) => {
/*eslint
max-statements: ["error", 20]
complexity: ["error", 11]
*/
const tagName = actualNode.nodeName.toUpperCase();
if (actualNode.nodeType === 1) {
if (!ALLOWED_TAGS.includes(tagName)) {
const role = (actualNode.getAttribute('role') || '').toLowerCase();
const isListItemRole = getIsListItemRole(role, tagName);
out.hasListItem = getHasListItem(
out.hasListItem,
tagName,
isListItemRole
);
if (isListItemRole) {
out.isEmpty = false;
}
if (tagName === 'LI' && !isListItemRole) {
out.liItemsWithRole++;
}
if (tagName !== 'LI' && !isListItemRole) {
out.badNodes.push(actualNode);
}
}
}
if (actualNode.nodeType === 3) {
if (actualNode.nodeValue.trim() !== '') {
out.hasNonEmptyTextNode = true;
}
}
return out;
}, base);
const virtualNodeChildrenOfTypeLi = virtualNode.children.filter(
({ actualNode }) => {
return actualNode.nodeName.toUpperCase() === 'LI';
}
);
const allLiItemsHaveRole =
out.liItemsWithRole > 0 &&
virtualNodeChildrenOfTypeLi.length === out.liItemsWithRole;
if (out.badNodes.length) {
this.relatedNodes(out.badNodes);
}
const isInvalidListItem = !(
out.hasListItem ||
(out.isEmpty && !allLiItemsHaveRole)
);
return isInvalidListItem || !!out.badNodes.length || out.hasNonEmptyTextNode;
| 1 | 13,008 | DRY, worth extracting this method to axe.utils | dequelabs-axe-core | js |
@@ -35,6 +35,7 @@ public interface CapabilityType {
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
+ String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents"; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
String PLATFORM = "platform";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String SUPPORTS_FINDING_BY_CSS = "cssSelectorsEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
/**
* @deprecated Use PAGE_LOAD_STRATEGY instead
*/
@Deprecated
String PAGE_LOADING_STRATEGY = "pageLoadingStrategy";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
/**
* Moved InternetExplorer specific CapabilityTypes into InternetExplorerDriver.java for consistency
*/
@Deprecated
String ENABLE_PERSISTENT_HOVERING = "enablePersistentHover";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 13,114 | I think there's another spot for this in DefaultCapabilityMatcher | SeleniumHQ-selenium | java |
@@ -142,9 +142,14 @@ class KeyConfigParser(QObject):
def save(self):
"""Save the key config file."""
log.destroy.debug("Saving key config to {}".format(self._configfile))
- with qtutils.savefile_open(self._configfile, encoding='utf-8') as f:
- data = str(self)
- f.write(data)
+
+ try:
+ with qtutils.savefile_open(self._configfile,
+ encoding='utf-8') as f:
+ data = str(self)
+ f.write(data)
+ except OSError as e:
+ message.error("Could not save key config: {}".format(e))
@cmdutils.register(instance='key-config', maxsplit=1, no_cmd_split=True,
no_replace_variables=True) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Parser for the key configuration."""
import collections
import os.path
import itertools
from PyQt5.QtCore import pyqtSignal, QObject
from qutebrowser.config import configdata, textwrapper
from qutebrowser.commands import cmdutils, cmdexc
from qutebrowser.utils import log, utils, qtutils, message, usertypes
class KeyConfigError(Exception):
"""Raised on errors with the key config.
Attributes:
lineno: The config line in which the exception occurred.
"""
def __init__(self, msg=None):
super().__init__(msg)
self.lineno = None
class DuplicateKeychainError(KeyConfigError):
"""Error raised when there's a duplicate key binding."""
def __init__(self, keychain):
super().__init__("Duplicate key chain {}!".format(keychain))
self.keychain = keychain
class KeyConfigParser(QObject):
"""Parser for the keybind config.
Attributes:
_configfile: The filename of the config or None.
_cur_section: The section currently being processed by _read().
_cur_command: The command currently being processed by _read().
is_dirty: Whether the config is currently dirty.
Class attributes:
UNBOUND_COMMAND: The special command used for unbound keybindings.
Signals:
changed: Emitted when the internal data has changed.
arg: Name of the mode which was changed.
config_dirty: Emitted when the config should be re-saved.
"""
changed = pyqtSignal(str)
config_dirty = pyqtSignal()
UNBOUND_COMMAND = '<unbound>'
def __init__(self, configdir, fname, relaxed=False, parent=None):
"""Constructor.
Args:
configdir: The directory to save the configs in.
fname: The filename of the config.
relaxed: If given, unknown commands are ignored.
"""
super().__init__(parent)
self.is_dirty = False
self._cur_section = None
self._cur_command = None
# Mapping of section name(s) to key binding -> command dicts.
self.keybindings = collections.OrderedDict()
self._configfile = os.path.join(configdir, fname)
if not os.path.exists(self._configfile):
self._load_default()
else:
self._read(relaxed)
self._load_default(only_new=True)
log.init.debug("Loaded bindings: {}".format(self.keybindings))
def __str__(self):
"""Get the config as string."""
lines = configdata.KEY_FIRST_COMMENT.strip('\n').splitlines()
lines.append('')
for sectname, sect in self.keybindings.items():
lines.append('[{}]'.format(sectname))
lines += self._str_section_desc(sectname)
lines.append('')
data = collections.OrderedDict()
for key, cmd in sect.items():
if cmd in data:
data[cmd].append(key)
else:
data[cmd] = [key]
for cmd, keys in data.items():
lines.append(cmd)
for k in keys:
lines.append(' ' * 4 + k)
lines.append('')
return '\n'.join(lines) + '\n'
def __repr__(self):
return utils.get_repr(self, constructor=True,
configfile=self._configfile)
def _str_section_desc(self, sectname):
"""Get the section description string for sectname."""
wrapper = textwrapper.TextWrapper()
lines = []
try:
seclines = configdata.KEY_SECTION_DESC[sectname].splitlines()
except KeyError:
return []
else:
for secline in seclines:
if 'http://' in secline or 'https://' in secline:
lines.append('# ' + secline)
else:
lines += wrapper.wrap(secline)
return lines
def save(self):
"""Save the key config file."""
log.destroy.debug("Saving key config to {}".format(self._configfile))
with qtutils.savefile_open(self._configfile, encoding='utf-8') as f:
data = str(self)
f.write(data)
@cmdutils.register(instance='key-config', maxsplit=1, no_cmd_split=True,
no_replace_variables=True)
@cmdutils.argument('command', completion=usertypes.Completion.bind)
def bind(self, key, command=None, *, mode='normal', force=False):
"""Bind a key to a command.
Args:
key: The keychain or special key (inside `<...>`) to bind.
command: The command to execute, with optional args, or None to
print the current binding.
mode: A comma-separated list of modes to bind the key in
(default: `normal`).
force: Rebind the key if it is already bound.
"""
if utils.is_special_key(key):
# <Ctrl-t>, <ctrl-T>, and <ctrl-t> should be considered equivalent
key = key.lower()
if command is None:
cmd = self.get_bindings_for(mode).get(key, None)
if cmd is None:
message.info("{} is unbound in {} mode".format(key, mode))
else:
message.info("{} is bound to '{}' in {} mode".format(key, cmd,
mode))
return
modenames = self._normalize_sectname(mode).split(',')
for m in modenames:
if m not in configdata.KEY_DATA:
raise cmdexc.CommandError("Invalid mode {}!".format(m))
try:
modes = [usertypes.KeyMode[m] for m in modenames]
self._validate_command(command, modes)
except KeyConfigError as e:
raise cmdexc.CommandError(str(e))
try:
self._add_binding(mode, key, command, force=force)
except DuplicateKeychainError as e:
raise cmdexc.CommandError("Duplicate keychain {} - use --force to "
"override!".format(str(e.keychain)))
except KeyConfigError as e:
raise cmdexc.CommandError(e)
for m in modenames:
self.changed.emit(m)
self._mark_config_dirty()
@cmdutils.register(instance='key-config')
def unbind(self, key, mode='normal'):
"""Unbind a keychain.
Args:
key: The keychain or special key (inside <...>) to unbind.
mode: A comma-separated list of modes to unbind the key in
(default: `normal`).
"""
if utils.is_special_key(key):
# <Ctrl-t>, <ctrl-T>, and <ctrl-t> should be considered equivalent
key = key.lower()
mode = self._normalize_sectname(mode)
for m in mode.split(','):
if m not in configdata.KEY_DATA:
raise cmdexc.CommandError("Invalid mode {}!".format(m))
try:
sect = self.keybindings[mode]
except KeyError:
raise cmdexc.CommandError("Can't find mode section '{}'!".format(
mode))
try:
del sect[key]
except KeyError:
raise cmdexc.CommandError("Can't find binding '{}' in section "
"'{}'!".format(key, mode))
else:
if key in itertools.chain.from_iterable(
configdata.KEY_DATA[mode].values()):
try:
self._add_binding(mode, key, self.UNBOUND_COMMAND)
except DuplicateKeychainError:
pass
for m in mode.split(','):
self.changed.emit(m)
self._mark_config_dirty()
def _normalize_sectname(self, s):
"""Normalize a section string like 'foo, bar,baz' to 'bar,baz,foo'."""
if s.startswith('!'):
inverted = True
s = s[1:]
else:
inverted = False
sections = ','.join(sorted(s.split(',')))
if inverted:
sections = '!' + sections
return sections
def _load_default(self, *, only_new=False):
"""Load the built-in default key bindings.
Args:
only_new: If set, only keybindings which are completely unused
(same command/key not bound) are added.
"""
# {'sectname': {'keychain1': 'command', 'keychain2': 'command'}, ...}
bindings_to_add = collections.OrderedDict()
for sectname, sect in configdata.KEY_DATA.items():
sectname = self._normalize_sectname(sectname)
bindings_to_add[sectname] = collections.OrderedDict()
for command, keychains in sect.items():
for e in keychains:
if not only_new or self._is_new(sectname, command, e):
assert e not in bindings_to_add[sectname]
bindings_to_add[sectname][e] = command
for sectname, sect in bindings_to_add.items():
if not sect:
if not only_new:
self.keybindings[sectname] = collections.OrderedDict()
else:
for keychain, command in sect.items():
self._add_binding(sectname, keychain, command)
self.changed.emit(sectname)
if bindings_to_add:
self._mark_config_dirty()
def _is_new(self, sectname, command, keychain):
"""Check if a given binding is new.
A binding is considered new if both the command is not bound to any key
yet, and the key isn't used anywhere else in the same section.
"""
if utils.is_special_key(keychain):
keychain = keychain.lower()
try:
bindings = self.keybindings[sectname]
except KeyError:
return True
if keychain in bindings:
return False
else:
return command not in bindings.values()
def _read(self, relaxed=False):
"""Read the config file from disk and parse it.
Args:
relaxed: Ignore unknown commands.
"""
try:
with open(self._configfile, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
line = line.rstrip()
try:
if not line.strip() or line.startswith('#'):
continue
elif line.startswith('[') and line.endswith(']'):
sectname = line[1:-1]
self._cur_section = self._normalize_sectname(
sectname)
elif line.startswith((' ', '\t')):
line = line.strip()
self._read_keybinding(line)
else:
line = line.strip()
self._read_command(line)
except KeyConfigError as e:
if relaxed:
continue
else:
e.lineno = i
raise
except OSError:
log.keyboard.exception("Failed to read key bindings!")
for sectname in self.keybindings:
self.changed.emit(sectname)
def _mark_config_dirty(self):
"""Mark the config as dirty."""
self.is_dirty = True
self.config_dirty.emit()
def _validate_command(self, line, modes=None):
"""Check if a given command is valid.
Args:
line: The commandline to validate.
modes: A list of modes to validate the commands for, or None.
"""
from qutebrowser.config import config
if line == self.UNBOUND_COMMAND:
return
commands = line.split(';;')
try:
first_cmd = commands[0].split(maxsplit=1)[0].strip()
cmd = cmdutils.cmd_dict[first_cmd]
if cmd.no_cmd_split:
commands = [line]
except (KeyError, IndexError):
pass
for cmd in commands:
if not cmd.strip():
raise KeyConfigError("Got empty command (line: {!r})!".format(
line))
commands = [c.split(maxsplit=1)[0].strip() for c in commands]
for cmd in commands:
aliases = config.section('aliases')
if cmd in cmdutils.cmd_dict:
cmdname = cmd
elif cmd in aliases:
cmdname = aliases[cmd].split(maxsplit=1)[0].strip()
else:
raise KeyConfigError("Invalid command '{}'!".format(cmd))
cmd_obj = cmdutils.cmd_dict[cmdname]
for m in modes or []:
cmd_obj.validate_mode(m)
def _read_command(self, line):
"""Read a command from a line."""
if self._cur_section is None:
raise KeyConfigError("Got command '{}' without getting a "
"section!".format(line))
else:
for rgx, repl in configdata.CHANGED_KEY_COMMANDS:
if rgx.match(line):
line = rgx.sub(repl, line)
self._mark_config_dirty()
break
self._validate_command(line)
self._cur_command = line
def _read_keybinding(self, line):
"""Read a key binding from a line."""
if self._cur_command is None:
raise KeyConfigError("Got key binding '{}' without getting a "
"command!".format(line))
else:
assert self._cur_section is not None
self._add_binding(self._cur_section, line, self._cur_command)
def _add_binding(self, sectname, keychain, command, *, force=False):
"""Add a new binding from keychain to command in section sectname."""
if utils.is_special_key(keychain):
# <Ctrl-t>, <ctrl-T>, and <ctrl-t> should be considered equivalent
keychain = keychain.lower()
log.keyboard.vdebug("Adding binding {} -> {} in mode {}.".format(
keychain, command, sectname))
if sectname not in self.keybindings:
self.keybindings[sectname] = collections.OrderedDict()
if keychain in self.get_bindings_for(sectname):
if force or command == self.UNBOUND_COMMAND:
self.unbind(keychain, mode=sectname)
else:
raise DuplicateKeychainError(keychain)
section = self.keybindings[sectname]
if (command != self.UNBOUND_COMMAND and
section.get(keychain, None) == self.UNBOUND_COMMAND):
# re-binding an unbound keybinding
del section[keychain]
self.keybindings[sectname][keychain] = command
def get_bindings_for(self, section):
"""Get a dict with all merged key bindings for a section."""
bindings = {}
for sectstring, d in self.keybindings.items():
if sectstring.startswith('!'):
inverted = True
sectstring = sectstring[1:]
else:
inverted = False
sects = [s.strip() for s in sectstring.split(',')]
matches = any(s == section for s in sects)
if (not inverted and matches) or (inverted and not matches):
bindings.update(d)
try:
bindings.update(self.keybindings['all'])
except KeyError:
pass
bindings = {k: v for k, v in bindings.items()
if v != self.UNBOUND_COMMAND}
return bindings
def get_reverse_bindings_for(self, section):
"""Get a dict of commands to a list of bindings for the section."""
cmd_to_keys = {}
for key, full_cmd in self.get_bindings_for(section).items():
for cmd in full_cmd.split(';;'):
cmd = cmd.strip()
cmd_to_keys.setdefault(cmd, [])
# put special bindings last
if utils.is_special_key(key):
cmd_to_keys[cmd].append(key)
else:
cmd_to_keys[cmd].insert(0, key)
return cmd_to_keys
| 1 | 17,949 | `QtOSError` inherits `OSError`, so I don't think it's needed to list them both here. | qutebrowser-qutebrowser | py |
@@ -228,7 +228,6 @@ func (t *Transport) startSubscriber(ctx context.Context, sub subscriptionWithTop
}
// Ok, ready to start pulling.
err := conn.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
- logger.Info("got an event!")
msg := &Message{
Attributes: m.Attributes,
Data: m.Data, | 1 | package pubsub
import (
"context"
"errors"
"fmt"
"go.uber.org/zap"
"strings"
"sync"
"cloud.google.com/go/pubsub"
"github.com/cloudevents/sdk-go/pkg/cloudevents"
cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/pubsub/internal"
)
// Transport adheres to transport.Transport.
var _ transport.Transport = (*Transport)(nil)
const (
TransportName = "Pub/Sub"
)
type subscriptionWithTopic struct {
topicID string
subscriptionID string
}
// Transport acts as both a pubsub topic and a pubsub subscription .
type Transport struct {
// Encoding
Encoding Encoding
// DefaultEncodingSelectionFn allows for other encoding selection strategies to be injected.
DefaultEncodingSelectionFn EncodingSelector
codec transport.Codec
// Codec Mutex
coMu sync.Mutex
// PubSub
// AllowCreateTopic controls if the transport can create a topic if it does
// not exist.
AllowCreateTopic bool
// AllowCreateSubscription controls if the transport can create a
// subscription if it does not exist.
AllowCreateSubscription bool
projectID string
topicID string
subscriptionID string
gccMux sync.Mutex
subscriptions []subscriptionWithTopic
client *pubsub.Client
connectionsBySubscription map[string]*internal.Connection
connectionsByTopic map[string]*internal.Connection
// Receiver
Receiver transport.Receiver
// Converter is invoked if the incoming transport receives an undecodable
// message.
Converter transport.Converter
}
// New creates a new pubsub transport.
func New(ctx context.Context, opts ...Option) (*Transport, error) {
t := &Transport{}
if err := t.applyOptions(opts...); err != nil {
return nil, err
}
if t.client == nil {
// Auth to pubsub.
client, err := pubsub.NewClient(ctx, t.projectID)
if err != nil {
return nil, err
}
// Success.
t.client = client
}
if t.connectionsBySubscription == nil {
t.connectionsBySubscription = make(map[string]*internal.Connection, 0)
}
if t.connectionsByTopic == nil {
t.connectionsByTopic = make(map[string]*internal.Connection, 0)
}
return t, nil
}
func (t *Transport) applyOptions(opts ...Option) error {
for _, fn := range opts {
if err := fn(t); err != nil {
return err
}
}
return nil
}
func (t *Transport) loadCodec(ctx context.Context) bool {
if t.codec == nil {
t.coMu.Lock()
if t.DefaultEncodingSelectionFn != nil && t.Encoding != Default {
logger := cecontext.LoggerFrom(ctx)
logger.Warn("transport has a DefaultEncodingSelectionFn set but Encoding is not Default. DefaultEncodingSelectionFn will be ignored.")
t.codec = &Codec{
Encoding: t.Encoding,
}
} else {
t.codec = &Codec{
Encoding: t.Encoding,
DefaultEncodingSelectionFn: t.DefaultEncodingSelectionFn,
}
}
t.coMu.Unlock()
}
return true
}
func (t *Transport) getConnection(ctx context.Context, topic, subscription string) *internal.Connection {
if subscription != "" {
if conn, ok := t.connectionsBySubscription[subscription]; ok {
return conn
}
}
if topic != "" {
if conn, ok := t.connectionsByTopic[topic]; ok {
return conn
}
}
return nil
}
func (t *Transport) getOrCreateConnection(ctx context.Context, topic, subscription string) *internal.Connection {
t.gccMux.Lock()
defer t.gccMux.Unlock()
// Get.
if conn := t.getConnection(ctx, topic, subscription); conn != nil {
return conn
}
// Create.
conn := &internal.Connection{
AllowCreateSubscription: t.AllowCreateSubscription,
AllowCreateTopic: t.AllowCreateTopic,
Client: t.client,
ProjectID: t.projectID,
TopicID: topic,
SubscriptionID: subscription,
}
// Save for later.
if subscription != "" {
t.connectionsBySubscription[subscription] = conn
}
if topic != "" {
t.connectionsByTopic[topic] = conn
}
return conn
}
// Send implements Transport.Send
func (t *Transport) Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) {
// TODO populate response context properly.
if ok := t.loadCodec(ctx); !ok {
return ctx, nil, fmt.Errorf("unknown encoding set on transport: %d", t.Encoding)
}
topic := cecontext.TopicFrom(ctx)
if topic == "" {
topic = t.topicID
}
conn := t.getOrCreateConnection(ctx, topic, "")
msg, err := t.codec.Encode(ctx, event)
if err != nil {
return ctx, nil, err
}
if m, ok := msg.(*Message); ok {
respEvent, err := conn.Publish(ctx, &pubsub.Message{
Attributes: m.Attributes,
Data: m.Data,
})
return ctx, respEvent, err
}
return ctx, nil, fmt.Errorf("failed to encode Event into a Message")
}
// SetReceiver implements Transport.SetReceiver
func (t *Transport) SetReceiver(r transport.Receiver) {
t.Receiver = r
}
// SetConverter implements Transport.SetConverter
func (t *Transport) SetConverter(c transport.Converter) {
t.Converter = c
}
// HasConverter implements Transport.HasConverter
func (t *Transport) HasConverter() bool {
return t.Converter != nil
}
func (t *Transport) startSubscriber(ctx context.Context, sub subscriptionWithTopic, done func(error)) {
logger := cecontext.LoggerFrom(ctx)
logger.Infof("starting subscriber for Topic %q, Subscription %q", sub.topicID, sub.subscriptionID)
conn := t.getOrCreateConnection(ctx, sub.topicID, sub.subscriptionID)
logger.Info("conn is", conn)
if conn == nil {
err := fmt.Errorf("failed to find connection for Topic: %q, Subscription: %q", sub.topicID, sub.subscriptionID)
done(err)
return
}
// Ok, ready to start pulling.
err := conn.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
logger.Info("got an event!")
msg := &Message{
Attributes: m.Attributes,
Data: m.Data,
}
event, err := t.codec.Decode(ctx, msg)
// If codec returns and error, try with the converter if it is set.
if err != nil && t.HasConverter() {
event, err = t.Converter.Convert(ctx, msg, err)
}
if err != nil {
logger.Errorw("failed to decode message", zap.Error(err))
m.Nack()
return
}
if err := t.Receiver.Receive(ctx, *event, nil); err != nil {
logger.Warnw("pubsub receiver return err", zap.Error(err))
m.Nack()
return
}
m.Ack()
})
done(err)
}
// StartReceiver implements Transport.StartReceiver
// NOTE: This is a blocking call.
func (t *Transport) StartReceiver(ctx context.Context) error {
// Load the codec.
if ok := t.loadCodec(ctx); !ok {
return fmt.Errorf("unknown encoding set on transport: %d", t.Encoding)
}
cctx, cancel := context.WithCancel(ctx)
defer cancel()
n := len(t.subscriptions)
// Make the channels for quit and errors.
quit := make(chan struct{}, n)
errc := make(chan error, n)
// Start up each subscription.
for _, sub := range t.subscriptions {
go t.startSubscriber(cctx, sub, func(err error) {
if err != nil {
errc <- err
} else {
quit <- struct{}{}
}
})
}
// Collect errors and done calls until we have n of them.
errs := []string(nil)
for success := 0; success < n; success++ {
var err error
select {
case <-ctx.Done(): // Block for parent context to finish.
success--
case err = <-errc: // Collect errors
case <-quit:
}
if cancel != nil {
// Stop all other subscriptions.
cancel()
cancel = nil
}
if err != nil {
errs = append(errs, err.Error())
}
}
close(quit)
close(errc)
return errors.New(strings.Join(errs, "\n"))
}
| 1 | 9,452 | Instead of patching vendor, let's update the version of sdk-go to a more recent one that doesn't have this line. | google-knative-gcp | go |
@@ -185,6 +185,7 @@ func DefaultConfiguration() *Configuration {
config.Please.DownloadLocation = "https://get.please.build"
config.Please.NumOldVersions = 10
config.Parse.BuiltinPleasings = true
+ config.Parse.BuildFileName = []string{"BUILD"}
config.Build.Arch = cli.NewArch(runtime.GOOS, runtime.GOARCH)
config.Build.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules.
config.Build.Nonce = "1402" // Arbitrary nonce to invalidate config when needed. | 1 | // Utilities for reading the Please config files.
package core
import (
"crypto/sha1"
"encoding/gob"
"fmt"
"io"
"os"
"path"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/shlex"
"github.com/jessevdk/go-flags"
"gopkg.in/gcfg.v1"
"cli"
)
// OsArch is the os/arch pair, like linux_amd64 etc.
const OsArch = runtime.GOOS + "_" + runtime.GOARCH
// ConfigFileName is the file name for the typical repo config - this is normally checked in
const ConfigFileName string = ".plzconfig"
// ArchConfigFileName is the architecture-specific config file which overrides the repo one.
// Also normally checked in if needed.
const ArchConfigFileName string = ".plzconfig_" + OsArch
// LocalConfigFileName is the file name for the local repo config - this is not normally checked
// in and used to override settings on the local machine.
const LocalConfigFileName string = ".plzconfig.local"
// MachineConfigFileName is the file name for the machine-level config - can use this to override
// things for a particular machine (eg. build machine with different caching behaviour).
const MachineConfigFileName = "/etc/plzconfig"
// UserConfigFileName is the file name for user-specific config (for all their repos).
const UserConfigFileName = "~/.please/plzconfig"
// The available container implementations that we support.
const (
ContainerImplementationNone = "none"
ContainerImplementationDocker = "docker"
)
// GithubDownloadLocation is plz's Github repo, which will become the default download location in future.
const GithubDownloadLocation = "https://github.com/thought-machine/please"
// GithubAPILocation is as above, but for the API endpoints.
const GithubAPILocation = "https://api.github.com/repos/thought-machine/please"
func readConfigFile(config *Configuration, filename string) error {
log.Debug("Reading config from %s...", filename)
if err := gcfg.ReadFileInto(config, filename); err != nil && os.IsNotExist(err) {
return nil // It's not an error to not have the file at all.
} else if gcfg.FatalOnly(err) != nil {
return err
} else if err != nil {
log.Warning("Error in config file: %s", err)
}
return nil
}
// ReadDefaultConfigFiles reads all the config files from the default locations and
// merges them into a config object.
// The repo root must have already have been set before calling this.
func ReadDefaultConfigFiles(profile string) (*Configuration, error) {
return ReadConfigFiles([]string{
MachineConfigFileName,
ExpandHomePath(UserConfigFileName),
path.Join(RepoRoot, ConfigFileName),
path.Join(RepoRoot, ArchConfigFileName),
path.Join(RepoRoot, LocalConfigFileName),
}, profile)
}
// ReadConfigFiles reads all the config locations, in order, and merges them into a config object.
// Values are filled in by defaults initially and then overridden by each file in turn.
func ReadConfigFiles(filenames []string, profile string) (*Configuration, error) {
config := DefaultConfiguration()
for _, filename := range filenames {
if err := readConfigFile(config, filename); err != nil {
return config, err
}
if profile != "" {
if err := readConfigFile(config, filename+"."+profile); err != nil {
return config, err
}
}
}
// Set default values for slices. These add rather than overwriting so we can't set
// them upfront as we would with other config values.
if usingBazelWorkspace {
setDefault(&config.Parse.BuildFileName, []string{"BUILD.bazel", "BUILD"})
} else {
setDefault(&config.Parse.BuildFileName, []string{"BUILD"})
}
setBuildPath(&config.Build.Path, config.Build.PassEnv)
setDefault(&config.Build.PassEnv, []string{})
setDefault(&config.Cover.FileExtension, []string{".go", ".py", ".java", ".js", ".cc", ".h", ".c"})
setDefault(&config.Cover.ExcludeExtension, []string{".pb.go", "_pb2.py", ".pb.cc", ".pb.h", "_test.py", "_test.go", "_pb.go", "_bindata.go", "_test_main.cc"})
setDefault(&config.Proto.Language, []string{"cc", "py", "java", "go", "js"})
// Default values for these guys depend on config.Please.Location.
defaultPath(&config.Go.BuildIDTool, config.Please.Location, "go_buildid_replacer")
defaultPath(&config.Go.TestTool, config.Please.Location, "please_go_test")
defaultPath(&config.Go.FilterTool, config.Please.Location, "please_go_filter")
defaultPath(&config.Python.PexTool, config.Please.Location, "please_pex")
defaultPath(&config.Java.JavacWorker, config.Please.Location, "javac_worker")
defaultPath(&config.Java.JarCatTool, config.Please.Location, "jarcat")
defaultPath(&config.Java.PleaseMavenTool, config.Please.Location, "please_maven")
defaultPath(&config.Java.JUnitRunner, config.Please.Location, "junit_runner.jar")
// Default values for these guys depend on config.Java.JavaHome if that's been set.
if config.Java.JavaHome != "" {
defaultPathIfExists(&config.Java.JlinkTool, config.Java.JavaHome, "bin/jlink")
}
if (config.Cache.RPCPrivateKey == "") != (config.Cache.RPCPublicKey == "") {
return config, fmt.Errorf("Must pass both rpcprivatekey and rpcpublickey properties for cache")
}
if len(config.Aliases) > 0 {
log.Warning("The [aliases] section of .plzconfig is deprecated in favour of [alias]. See https://please.build/config.html for more information.")
}
// We can only verify options by reflection (we need struct tags) so run them quickly through this.
return config, config.ApplyOverrides(map[string]string{
"test.defaultcontainer": config.Test.DefaultContainer,
"python.testrunner": config.Python.TestRunner,
})
}
// setDefault sets a slice of strings in the config if the set one is empty.
func setDefault(conf *[]string, def []string) {
if len(*conf) == 0 {
*conf = def
}
}
// setDefault checks if "PATH" is in passEnv, if it is set config.build.Path to use the environment variable.
func setBuildPath(conf *[]string, passEnv []string) {
pathVal := []string{"/usr/local/bin", "/usr/bin", "/bin"}
for _, i := range passEnv {
if i == "PATH" {
pathVal = strings.Split(os.Getenv("PATH"), ":")
}
}
setDefault(conf, pathVal)
}
// defaultPath sets a variable to a location in a directory if it's not already set.
func defaultPath(conf *string, dir, file string) {
if *conf == "" {
*conf = path.Join(dir, file)
}
}
// defaultPathIfExists sets a variable to a location in a directory if it's not already set and if the location exists.
func defaultPathIfExists(conf *string, dir, file string) {
if *conf == "" {
location := path.Join(dir, file)
// check that the location is valid
if _, err := os.Stat(location); err == nil {
*conf = location
}
}
}
// DefaultConfiguration returns the default configuration object with no overrides.
func DefaultConfiguration() *Configuration {
config := Configuration{buildEnvStored: &storedBuildEnv{}}
config.Please.Location = "~/.please"
config.Please.SelfUpdate = true
config.Please.Autoclean = true
config.Please.DownloadLocation = "https://get.please.build"
config.Please.NumOldVersions = 10
config.Parse.BuiltinPleasings = true
config.Build.Arch = cli.NewArch(runtime.GOOS, runtime.GOARCH)
config.Build.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules.
config.Build.Nonce = "1402" // Arbitrary nonce to invalidate config when needed.
config.Build.Timeout = cli.Duration(10 * time.Minute)
config.Build.Config = "opt" // Optimised builds by default
config.Build.FallbackConfig = "opt" // Optimised builds as a fallback on any target that doesn't have a matching one set
config.Build.PleaseSandboxTool = "please_sandbox"
config.BuildConfig = map[string]string{}
config.BuildEnv = map[string]string{}
config.Aliases = map[string]string{}
config.Cache.HTTPTimeout = cli.Duration(5 * time.Second)
config.Cache.RPCTimeout = cli.Duration(5 * time.Second)
config.Cache.Dir = ".plz-cache"
config.Cache.DirCacheHighWaterMark = 10 * cli.GiByte
config.Cache.DirCacheLowWaterMark = 8 * cli.GiByte
config.Cache.DirClean = true
config.Cache.Workers = runtime.NumCPU() + 2 // Mirrors the number of workers in please.go.
config.Cache.RPCMaxMsgSize.UnmarshalFlag("200MiB")
config.Metrics.PushFrequency = cli.Duration(400 * time.Millisecond)
config.Metrics.PushTimeout = cli.Duration(500 * time.Millisecond)
config.Metrics.PerUser = true
config.Test.Timeout = cli.Duration(10 * time.Minute)
config.Test.DefaultContainer = ContainerImplementationDocker
config.Docker.DefaultImage = "ubuntu:trusty"
config.Docker.AllowLocalFallback = false
config.Docker.Timeout = cli.Duration(20 * time.Minute)
config.Docker.ResultsTimeout = cli.Duration(20 * time.Second)
config.Docker.RemoveTimeout = cli.Duration(20 * time.Second)
config.Go.GoTool = "go"
config.Go.CgoCCTool = "gcc"
config.Go.GoPath = "$TMP_DIR:$TMP_DIR/src:$TMP_DIR/$PKG_DIR:$TMP_DIR/third_party/go:$TMP_DIR/third_party/"
config.Python.PipTool = "pip3"
config.Python.DefaultInterpreter = "python3"
config.Python.TestRunner = "unittest"
config.Python.UsePyPI = true
// Annoyingly pip on OSX doesn't seem to work with this flag (you get the dreaded
// "must supply either home or prefix/exec-prefix" error). Goodness knows why *adding* this
// flag - which otherwise seems exactly what we want - provokes that error, but the logic
// of pip is rather a mystery to me.
if runtime.GOOS != "darwin" {
config.Python.PipFlags = "--isolated"
}
config.Java.DefaultTestPackage = ""
config.Java.SourceLevel = "8"
config.Java.TargetLevel = "8"
config.Java.ReleaseLevel = ""
config.Java.DefaultMavenRepo = []cli.URL{"https://repo1.maven.org/maven2"}
config.Java.JavacFlags = "-Werror -Xlint:-options" // bootstrap class path warnings are pervasive without this.
config.Java.JlinkTool = "jlink"
config.Java.JavaHome = ""
config.Cpp.CCTool = "gcc"
config.Cpp.CppTool = "g++"
config.Cpp.LdTool = "ld"
config.Cpp.ArTool = "ar"
config.Cpp.AsmTool = "nasm"
config.Cpp.DefaultOptCflags = "--std=c99 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCflags = "--std=c99 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.DefaultOptCppflags = "--std=c++11 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCppflags = "--std=c++11 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.Coverage = true
// At some point in the future it might make sense to remove UnitTest++ as the default
// test runner - but for now it's still the default for compatibility.
config.Cpp.TestMain = BuildLabel{
Subrepo: "pleasings",
PackageName: "cc",
Name: "unittest_main",
}
config.Proto.ProtocTool = "protoc"
// We're using the most common names for these; typically gRPC installs the builtin plugins
// as grpc_python_plugin etc.
config.Proto.ProtocGoPlugin = "protoc-gen-go"
config.Proto.GrpcPythonPlugin = "grpc_python_plugin"
config.Proto.GrpcJavaPlugin = "protoc-gen-grpc-java"
config.Proto.GrpcCCPlugin = "grpc_cpp_plugin"
config.Proto.PythonDep = "//third_party/python:protobuf"
config.Proto.JavaDep = "//third_party/java:protobuf"
config.Proto.GoDep = "//third_party/go:protobuf"
config.Proto.JsDep = ""
config.Proto.PythonGrpcDep = "//third_party/python:grpc"
config.Proto.JavaGrpcDep = "//third_party/java:grpc-all"
config.Proto.GoGrpcDep = "//third_party/go:grpc"
config.Bazel.Compatibility = usingBazelWorkspace
return &config
}
// A Configuration contains all the settings that can be configured about Please.
// This is parsed from .plzconfig etc; we also auto-generate help messages from its tags.
type Configuration struct {
Please struct {
Version cli.Version `help:"Defines the version of plz that this repo is supposed to use currently. If it's not present or the version matches the currently running version no special action is taken; otherwise if SelfUpdate is set Please will attempt to download an appropriate version, otherwise it will issue a warning and continue.\n\nNote that if this is not set, you can run plz update to update to the latest version available on the server." var:"PLZ_VERSION"`
Location string `help:"Defines the directory Please is installed into.\nDefaults to ~/.please but you might want it to be somewhere else if you're installing via another method (e.g. the debs and install script still use /opt/please)."`
SelfUpdate bool `help:"Sets whether plz will attempt to update itself when the version set in the config file is different."`
DownloadLocation cli.URL `help:"Defines the location to download Please from when self-updating. Defaults to the Please web server, but you can point it to some location of your own if you prefer to keep traffic within your network or use home-grown versions."`
NumOldVersions int `help:"Number of old versions to keep from autoupdates."`
Autoclean bool `help:"Automatically clean stale versions without prompting"`
NumThreads int `help:"Number of parallel build operations to run.\nIs overridden by the equivalent command-line flag, if that's passed." example:"6"`
Motd []string `help:"Message of the day; is displayed once at the top during builds. If multiple are given, one is randomly chosen."`
DefaultRepo string `help:"Location of the default repository; this is used if plz is invoked when not inside a repo, it changes to that directory then does its thing."`
} `help:"The [please] section in the config contains non-language-specific settings defining how Please should operate."`
Parse struct {
ExperimentalDir []string `help:"Directory containing experimental code. This is subject to some extra restrictions:\n - Code in the experimental dir can override normal visibility constraints\n - Code outside the experimental dir can never depend on code inside it\n - Tests are excluded from general detection." example:"experimental"`
BuildFileName []string `help:"Sets the names that Please uses instead of BUILD for its build files.\nFor clarity the documentation refers to them simply as BUILD files but you could reconfigure them here to be something else.\nOne case this can be particularly useful is in cases where you have a subdirectory named build on a case-insensitive file system like HFS+." var:"BUILD_FILE_NAMES"`
BlacklistDirs []string `help:"Directories to blacklist when recursively searching for BUILD files (e.g. when using plz build ... or similar).\nThis is generally useful when you have large directories within your repo that don't need to be searched, especially things like node_modules that have come from external package managers."`
PreloadBuildDefs []string `help:"Files to preload by the parser before loading any BUILD files.\nSince this is done before the first package is parsed they must be files in the repository, they cannot be subinclude() paths." example:"build_defs/go_bindata.build_defs"`
BuiltinPleasings bool `help:"Adds github.com/thought-machine/pleasings as a default subrepo named pleasings. This makes some builtin extensions available, but is not fully deterministic (it always uses the latest version). You may prefer to disable this and define your own subrepo for it (or not use it at all, of course)."`
} `help:"The [parse] section in the config contains settings specific to parsing files."`
Display struct {
UpdateTitle bool `help:"Updates the title bar of the shell window Please is running in as the build progresses. This isn't on by default because not everyone's shell is configured to reset it again after and we don't want to alter it forever."`
SystemStats bool `help:"Whether or not to show basic system resource usage in the interactive display. Has no effect without that configured."`
} `help:"Please has an animated display mode which shows the currently building targets.\nBy default it will autodetect whether it is using an interactive TTY session and choose whether to use it or not, although you can force it on or off via flags.\n\nThe display is heavily inspired by Buck's SuperConsole."`
Events struct {
Port int `help:"Port to start the streaming build event server on."`
} `help:"The [events] section in the config contains settings relating to the internal build event system & streaming them externally."`
Build struct {
Arch cli.Arch `help:"Architecture to compile for. Defaults to the host architecture."`
Timeout cli.Duration `help:"Default timeout for Dockerised tests, in seconds. Default is twenty minutes."`
Path []string `help:"The PATH variable that will be passed to the build processes.\nDefaults to /usr/local/bin:/usr/bin:/bin but of course can be modified if you need to get binaries from other locations." example:"/usr/local/bin:/usr/bin:/bin"`
Config string `help:"The build config to use when one is not chosen on the command line. Defaults to opt." example:"opt | dbg"`
FallbackConfig string `help:"The build config to use when one is chosen and a required target does not have one by the same name. Also defaults to opt." example:"opt | dbg"`
Lang string `help:"Sets the language passed to build rules when building. This can be important for some tools (although hopefully not many) - we've mostly observed it with Sass."`
Sandbox bool `help:"True to sandbox individual build actions, which isolates them using namespaces. Only works on Linux and requires please_sandbox to be installed separately." var:"BUILD_SANDBOX"`
PleaseSandboxTool string `help:"The location of the please_sandbox tool to use."`
Nonce string `help:"This is an arbitrary string that is added to the hash of every build target. It provides a way to force a rebuild of everything when it's changed.\nWe will bump the default of this whenever we think it's required - although it's been a pretty long time now and we hope that'll continue."`
PassEnv []string `help:"A list of environment variables to pass from the current environment to build rules. For example\n\nPassEnv = HTTP_PROXY\n\nwould copy your HTTP_PROXY environment variable to the build env for any rules."`
}
BuildConfig map[string]string `help:"A section of arbitrary key-value properties that are made available in the BUILD language. These are often useful for writing custom rules that need some configurable property.\n\n[buildconfig]\nandroid-tools-version = 23.0.2\n\nFor example, the above can be accessed as CONFIG.ANDROID_TOOLS_VERSION."`
BuildEnv map[string]string `help:"A set of extra environment variables to define for build rules. For example:\n\n[buildenv]\nsecret-passphrase = 12345\n\nThis would become SECRET_PASSPHRASE for any rules. These can be useful for passing secrets into custom rules; any variables containing SECRET or PASSWORD won't be logged.\n\nIt's also useful if you'd like internal tools to honour some external variable."`
Cache struct {
Workers int `help:"Number of workers for uploading artifacts to remote caches, which is done asynchronously."`
Dir string `help:"Sets the directory to use for the dir cache.\nThe default is .plz-cache, if set to the empty string the dir cache will be disabled."`
DirCacheHighWaterMark cli.ByteSize `help:"Starts cleaning the directory cache when it is over this number of bytes.\nCan also be given with human-readable suffixes like 10G, 200MB etc."`
DirCacheLowWaterMark cli.ByteSize `help:"When cleaning the directory cache, it's reduced to at most this size."`
DirClean bool `help:"Controls whether entries in the dir cache are cleaned or not. If disabled the cache will only grow."`
DirCompress bool `help:"Compresses stored artifacts in the dir cache. They are slower to store & retrieve but more compact."`
HTTPURL cli.URL `help:"Base URL of the HTTP cache.\nNot set to anything by default which means the cache will be disabled."`
HTTPWriteable bool `help:"If True this plz instance will write content back to the HTTP cache.\nBy default it runs in read-only mode."`
HTTPTimeout cli.Duration `help:"Timeout for operations contacting the HTTP cache, in seconds."`
RPCURL cli.URL `help:"Base URL of the RPC cache.\nNot set to anything by default which means the cache will be disabled."`
RPCWriteable bool `help:"If True this plz instance will write content back to the RPC cache.\nBy default it runs in read-only mode."`
RPCTimeout cli.Duration `help:"Timeout for operations contacting the RPC cache, in seconds."`
RPCPublicKey string `help:"File containing a PEM-encoded private key which is used to authenticate to the RPC cache." example:"my_key.pem"`
RPCPrivateKey string `help:"File containing a PEM-encoded certificate which is used to authenticate to the RPC cache." example:"my_cert.pem"`
RPCCACert string `help:"File containing a PEM-encoded certificate which is used to validate the RPC cache's certificate." example:"ca.pem"`
RPCSecure bool `help:"Forces SSL on for the RPC cache. It will be activated if any of rpcpublickey, rpcprivatekey or rpccacert are set, but this can be used if none of those are needed and SSL is still in use."`
RPCMaxMsgSize cli.ByteSize `help:"Maximum size of a single message that we'll send to the RPC server.\nThis should agree with the server's limit, if it's higher the artifacts will be rejected.\nThe value is given as a byte size so can be suffixed with M, GB, KiB, etc."`
} `help:"Please has several built-in caches that can be configured in its config file.\n\nThe simplest one is the directory cache which by default is written into the .plz-cache directory. This allows for fast retrieval of code that has been built before (for example, when swapping Git branches).\n\nThere is also a remote RPC cache which allows using a centralised server to store artifacts. A typical pattern here is to have your CI system write artifacts into it and give developers read-only access so they can reuse its work.\n\nFinally there's a HTTP cache which is very similar, but a little obsolete now since the RPC cache outperforms it and has some extra features. Otherwise the two have similar semantics and share quite a bit of implementation.\n\nPlease has server implementations for both the RPC and HTTP caches."`
Metrics struct {
PushGatewayURL cli.URL `help:"The URL of the pushgateway to send metrics to."`
PushFrequency cli.Duration `help:"The frequency, in milliseconds, to push statistics at." example:"400ms"`
PushTimeout cli.Duration `help:"Timeout on pushes to the metrics repository." example:"500ms"`
PerTest bool `help:"Emit per-test duration metrics. Off by default because they generate increased load on Prometheus."`
PerUser bool `help:"Emit per-user metrics. On by default for compatibility, but will generate more load on Prometheus."`
} `help:"A section of options relating to reporting metrics. Currently only pushing metrics to a Prometheus pushgateway is supported, which is enabled by the pushgatewayurl setting."`
CustomMetricLabels map[string]string `help:"Allows defining custom labels to be applied to metrics. The key is the name of the label, and the value is a command to be run, the output of which becomes the label's value. For example, to attach the current Git branch to all metrics:\n\n[custommetriclabels]\nbranch = git rev-parse --abbrev-ref HEAD\n\nBe careful when defining new labels, it is quite possible to overwhelm the metric collector by creating metric sets with too high cardinality."`
Test struct {
Timeout cli.Duration `help:"Default timeout applied to all tests. Can be overridden on a per-rule basis."`
DefaultContainer string `help:"Sets the default type of containerisation to use for tests that are given container = True.\nCurrently the only available option is 'docker', we expect to add support for more engines in future." options:"none,docker"`
Sandbox bool `help:"True to sandbox individual tests, which isolates them using namespaces. Somewhat experimental, only works on Linux and requires please_sandbox to be installed separately." var:"TEST_SANDBOX"`
}
Cover struct {
FileExtension []string `help:"Extensions of files to consider for coverage.\nDefaults to a reasonably obvious set for the builtin rules including .go, .py, .java, etc."`
ExcludeExtension []string `help:"Extensions of files to exclude from coverage.\nTypically this is for generated code; the default is to exclude protobuf extensions like .pb.go, _pb2.py, etc."`
}
Docker struct {
DefaultImage string `help:"The default image used for any test that doesn't specify another."`
AllowLocalFallback bool `help:"If True, will attempt to run the test locally if containerised running fails."`
Timeout cli.Duration `help:"Default timeout for containerised tests. Can be overridden on a per-rule basis."`
ResultsTimeout cli.Duration `help:"Timeout to wait when trying to retrieve results from inside the container. Default is 20 seconds."`
RemoveTimeout cli.Duration `help:"Timeout to wait when trying to remove a container after running a test. Defaults to 20 seconds."`
} `help:"Please supports running individual tests within Docker containers for isolation. This is useful for tests that mutate some global state (such as an embedded database, or open a server on a particular port). To do so, simply mark a test rule with container = True."`
Gc struct {
Keep []BuildLabel `help:"Marks targets that gc should always keep. Can include meta-targets such as //test/... and //docs:all."`
KeepLabel []string `help:"Defines a target label to be kept; for example, if you set this to go, no Go targets would ever be considered for deletion." example:"go"`
} `help:"Please supports a form of 'garbage collection', by which it means identifying targets that are not used for anything. By default binary targets and all their transitive dependencies are always considered non-garbage, as are any tests directly on those. The config options here allow tweaking this behaviour to retain more things.\n\nNote that it's a very good idea that your BUILD files are in the standard format when running this."`
Go struct {
GoTool string `help:"The binary to use to invoke Go & its subtools with." var:"GO_TOOL"`
BuildIDTool string `help:"The binary to use to override Go's BuildIds'." var:"BUILDID_TOOL"`
GoRoot string `help:"If set, will set the GOROOT environment variable appropriately during build actions."`
TestTool string `help:"Sets the location of the please_go_test tool that is used to template the test main for go_test rules." var:"GO_TEST_TOOL"`
GoPath string `help:"If set, will set the GOPATH environment variable appropriately during build actions." var:"GOPATH"`
ImportPath string `help:"Sets the default Go import path at the root of this repository.\nFor example, in the Please repo, we might set it to github.com/thought-machine/please to allow imports from that package within the repo." var:"GO_IMPORT_PATH"`
CgoCCTool string `help:"Sets the location of CC while building cgo_library and cgo_test rules. Defaults to gcc" var:"CGO_CC_TOOL"`
FilterTool string `help:"Sets the location of the please_go_filter tool that is used to filter source files against build constraints." var:"GO_FILTER_TOOL"`
DefaultStatic bool `help:"Sets Go binaries to default to static linking. Note that enabling this may have negative consequences for some code, including Go's DNS lookup code in the net module." var:"GO_DEFAULT_STATIC"`
} `help:"Please has built-in support for compiling Go, and of course is written in Go itself.\nSee the config subfields or the Go rules themselves for more information.\n\nNote that Please is a bit more flexible than Go about directory layout - for example, it is possible to have multiple packages in a directory, but it's not a good idea to push this too far since Go's directory layout is inextricably linked with its import paths."`
Python struct {
PipTool string `help:"The tool that is invoked during pip_library rules." var:"PIP_TOOL"`
PipFlags string `help:"Additional flags to pass to pip invocations in pip_library rules." var:"PIP_FLAGS"`
PexTool string `help:"The tool that's invoked to build pexes. Defaults to please_pex in the install directory." var:"PEX_TOOL"`
DefaultInterpreter string `help:"The interpreter used for python_binary and python_test rules when none is specified on the rule itself. Defaults to python but you could of course set it to, say, pypy." var:"DEFAULT_PYTHON_INTERPRETER"`
TestRunner string `help:"The test runner used to discover & run Python tests; one of unittest, pytest or behave." var:"PYTHON_TEST_RUNNER" options:"unittest,pytest,behave"`
ModuleDir string `help:"Defines a directory containing modules from which they can be imported at the top level.\nBy default this is empty but by convention we define our pip_library rules in third_party/python and set this appropriately. Hence any of those third-party libraries that try something like import six will have it work as they expect, even though it's actually in a different location within the .pex." var:"PYTHON_MODULE_DIR"`
DefaultPipRepo cli.URL `help:"Defines a location for a pip repo to download wheels from.\nBy default pip_library uses PyPI (although see below on that) but you may well want to use this define another location to upload your own wheels to.\nIs overridden by the repo argument to pip_library." var:"PYTHON_DEFAULT_PIP_REPO"`
WheelRepo cli.URL `help:"Defines a location for a remote repo that python_wheel rules will download from. See python_wheel for more information." var:"PYTHON_WHEEL_REPO"`
UsePyPI bool `help:"Whether or not to use PyPI for pip_library rules or not. Defaults to true, if you disable this you will presumably want to set DefaultPipRepo to use one of your own.\nIs overridden by the use_pypi argument to pip_library." var:"USE_PYPI"`
WheelNameScheme string `help:"Defines a custom templatized wheel naming scheme. Templatized variables should be surrounded in curly braces, and the available options are: url_base, package_name, and version. The default search pattern is '{url_base}/{package_name}-{version}-${{OS}}-${{ARCH}}.whl' along with a few common variants." var:"PYTHON_WHEEL_NAME_SCHEME"`
} `help:"Please has built-in support for compiling Python.\nPlease's Python artifacts are pex files, which are essentially self-executable zip files containing all needed dependencies, bar the interpreter itself. This fits our aim of at least semi-static binaries for each language.\nSee https://github.com/pantsbuild/pex for more information.\nNote that due to differences between the environment inside a pex and outside some third-party code may not run unmodified (for example, it cannot simply open() files). It's possible to work around a lot of this, but if it all becomes too much it's possible to mark pexes as not zip-safe which typically resolves most of it at a modest speed penalty."`
Java struct {
JavacTool string `help:"Defines the tool used for the Java compiler. Defaults to javac." var:"JAVAC_TOOL"`
JlinkTool string `help:"Defines the tool used for the Java linker. Defaults to jlink." var:"JLINK_TOOL"`
JavaHome string `help:"Defines the path of the Java Home folder." var:"JAVA_HOME"`
JavacWorker string `help:"Defines the tool used for the Java persistent compiler. This is significantly (approx 4x) faster for large Java trees than invoking javac separately each time. Default to javac_worker in the install directory, but can be switched off to fall back to javactool and separate invocation." var:"JAVAC_WORKER"`
JarCatTool string `help:"Defines the tool used to concatenate .jar files which we use to build the output of java_binary, java_test and various other rules. Defaults to jarcat in the Please install directory." var:"JARCAT_TOOL"`
PleaseMavenTool string `help:"Defines the tool used to fetch information from Maven in maven_jars rules.\nDefaults to please_maven in the Please install directory." var:"PLEASE_MAVEN_TOOL"`
JUnitRunner string `help:"Defines the .jar containing the JUnit runner. This is built into all java_test rules since it's necessary to make JUnit do anything useful.\nDefaults to junit_runner.jar in the Please install directory." var:"JUNIT_RUNNER"`
DefaultTestPackage string `help:"The Java classpath to search for functions annotated with @Test. If not specified the compiled sources will be searched for files named *Test.java." var:"DEFAULT_TEST_PACKAGE"`
ReleaseLevel string `help:"The default Java release level when compiling.\nSourceLevel and TargetLevel are ignored if this is set. Bear in mind that this flag is only supported in Java version 9+." var:"JAVA_RELEASE_LEVEL"`
SourceLevel string `help:"The default Java source level when compiling. Defaults to 8." var:"JAVA_SOURCE_LEVEL"`
TargetLevel string `help:"The default Java bytecode level to target. Defaults to 8." var:"JAVA_TARGET_LEVEL"`
JavacFlags string `help:"Additional flags to pass to javac when compiling libraries." example:"-Xmx1200M" var:"JAVAC_FLAGS"`
JavacTestFlags string `help:"Additional flags to pass to javac when compiling tests." example:"-Xmx1200M" var:"JAVAC_TEST_FLAGS"`
DefaultMavenRepo []cli.URL `help:"Default location to load artifacts from in maven_jar rules. Can be overridden on a per-rule basis." var:"DEFAULT_MAVEN_REPO"`
} `help:"Please has built-in support for compiling Java.\nIt builds uber-jars for binary and test rules which contain all dependencies and can be easily deployed, and with the help of some of Please's additional tools they are deterministic as well.\n\nWe've only tested support for Java 7 and 8, although it's likely newer versions will work with little or no change."`
Cpp struct {
CCTool string `help:"The tool invoked to compile C code. Defaults to gcc but you might want to set it to clang, for example." var:"CC_TOOL"`
CppTool string `help:"The tool invoked to compile C++ code. Defaults to g++ but you might want to set it to clang++, for example." var:"CPP_TOOL"`
LdTool string `help:"The tool invoked to link object files. Defaults to ld but you could also set it to gold, for example." var:"LD_TOOL"`
ArTool string `help:"The tool invoked to archive static libraries. Defaults to ar." var:"AR_TOOL"`
AsmTool string `help:"The tool invoked as an assembler. Currently only used on OSX for cc_embed_binary rules and so defaults to nasm." var:"ASM_TOOL"`
LinkWithLdTool bool `help:"If true, instructs Please to use the tool set earlier in ldtool to link binaries instead of cctool.\nThis is an esoteric setting that most people don't want; a vanilla ld will not perform all steps necessary here (you'll get lots of missing symbol messages from having no libc etc). Generally best to leave this disabled unless you have very specific requirements." var:"LINK_WITH_LD_TOOL"`
DefaultOptCflags string `help:"Compiler flags passed to all C rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c99 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CFLAGS"`
DefaultDbgCflags string `help:"Compiler rules passed to all C rules during dbg builds.\nDefaults to --std=c99 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CFLAGS"`
DefaultOptCppflags string `help:"Compiler flags passed to all C++ rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c++11 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CPPFLAGS"`
DefaultDbgCppflags string `help:"Compiler rules passed to all C++ rules during dbg builds.\nDefaults to --std=c++11 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CPPFLAGS"`
DefaultLdflags string `help:"Linker flags passed to all C++ rules.\nBy default this is empty." var:"DEFAULT_LDFLAGS"`
DefaultNamespace string `help:"Namespace passed to all cc_embed_binary rules when not overridden by the namespace argument to that rule.\nNot set by default, if you want to use those rules you'll need to set it or pass it explicitly to each one." var:"DEFAULT_NAMESPACE"`
PkgConfigPath string `help:"Custom PKG_CONFIG_PATH for pkg-config.\nBy default this is empty." var:"PKG_CONFIG_PATH"`
Coverage bool `help:"If true (the default), coverage will be available for C and C++ build rules.\nThis is still a little experimental but should work for GCC. Right now it does not work for Clang (it likely will in Clang 4.0 which will likely support --fprofile-dir) and so this can be useful to disable it.\nIt's also useful in some cases for CI systems etc if you'd prefer to avoid the overhead, since the tests have to be compiled with extra instrumentation and without optimisation." var:"CPP_COVERAGE"`
TestMain BuildLabel `help:"The build target to use for the default main for C++ test rules." example:"@pleasings//cc:unittest_main" var:"CC_TEST_MAIN"`
} `help:"Please has built-in support for compiling C and C++ code. We don't support every possible nuance of compilation for these languages, but aim to provide something fairly straightforward.\nTypically there is little problem compiling & linking against system libraries although Please has no insight into those libraries and when they change, so cannot rebuild targets appropriately.\n\nThe C and C++ rules are very similar and simply take a different set of tools and flags to facilitate side-by-side usage."`
Proto struct {
ProtocTool string `help:"The binary invoked to compile .proto files. Defaults to protoc." var:"PROTOC_TOOL"`
ProtocGoPlugin string `help:"The binary passed to protoc as a plugin to generate Go code. Defaults to protoc-gen-go.\nWe've found this easier to manage with a go_get rule instead though, so you can also pass a build label here. See the Please repo for an example." var:"PROTOC_GO_PLUGIN"`
GrpcPythonPlugin string `help:"The plugin invoked to compile Python code for grpc_library.\nDefaults to protoc-gen-grpc-python." var:"GRPC_PYTHON_PLUGIN"`
GrpcJavaPlugin string `help:"The plugin invoked to compile Java code for grpc_library.\nDefaults to protoc-gen-grpc-java." var:"GRPC_JAVA_PLUGIN"`
GrpcCCPlugin string `help:"The plugin invoked to compile C++ code for grpc_library.\nDefaults to grpc_cpp_plugin." var:"GRPC_CC_PLUGIN"`
Language []string `help:"Sets the default set of languages that proto rules are built for.\nChosen from the set of {cc, java, go, py}.\nDefaults to all of them!" var:"PROTO_LANGUAGES"`
PythonDep string `help:"An in-repo dependency that's applied to any Python proto libraries." var:"PROTO_PYTHON_DEP"`
JavaDep string `help:"An in-repo dependency that's applied to any Java proto libraries." var:"PROTO_JAVA_DEP"`
GoDep string `help:"An in-repo dependency that's applied to any Go proto libraries." var:"PROTO_GO_DEP"`
JsDep string `help:"An in-repo dependency that's applied to any Javascript proto libraries." var:"PROTO_JS_DEP"`
PythonGrpcDep string `help:"An in-repo dependency that's applied to any Python gRPC libraries." var:"GRPC_PYTHON_DEP"`
JavaGrpcDep string `help:"An in-repo dependency that's applied to any Java gRPC libraries." var:"GRPC_JAVA_DEP"`
GoGrpcDep string `help:"An in-repo dependency that's applied to any Go gRPC libraries." var:"GRPC_GO_DEP"`
} `help:"Please has built-in support for compiling protocol buffers, which are a form of codegen to define common data types which can be serialised and communicated between different languages.\nSee https://developers.google.com/protocol-buffers/ for more information.\n\nThere is also support for gRPC, which is an implementation of protobuf's RPC framework. See http://www.grpc.io/ for more information.\n\nNote that you must have the protocol buffers compiler (and gRPC plugins, if needed) installed on your machine to make use of these rules."`
Licences struct {
Accept []string `help:"Licences that are accepted in this repository.\nWhen this is empty licences are ignored. As soon as it's set any licence detected or assigned must be accepted explicitly here.\nThere's no fuzzy matching, so some package managers (especially PyPI and Maven, but shockingly not npm which rather nicely uses SPDX) will generate a lot of slightly different spellings of the same thing, which will all have to be accepted here. We'd rather that than trying to 'cleverly' match them which might result in matching the wrong thing."`
Reject []string `help:"Licences that are explicitly rejected in this repository.\nAn astute observer will notice that this is not very different to just not adding it to the accept section, but it does have the advantage of explicitly documenting things that the team aren't allowed to use."`
} `help:"Please has some limited support for declaring acceptable licences and detecting them from some libraries. You should not rely on this for complete licence compliance, but it can be a useful check to try to ensure that unacceptable licences do not slip in."`
Aliases map[string]string `help:"It is possible to define aliases for new commands in your .plzconfig file. These are essentially string-string replacements of the command line, for example 'deploy = run //tools:deployer --' makes 'plz deploy' run a particular tool."`
Alias map[string]*Alias `help:"Allows defining alias replacements with more detail than the [aliases] section. Otherwise follows the same process, i.e. performs replacements of command strings."`
Provider map[string]*struct {
Target BuildLabel `help:"The in-repo target to build this provider."`
} `help:"Allows configuring BUILD file providers, which are subprocesses that know how to provide the contents of a BUILD file when none exists. For example, a Go provider might infer the contents of a BUILD file from the Go source files directly."`
Bazel struct {
Compatibility bool `help:"Activates limited Bazel compatibility mode. When this is active several rule arguments are available under different names (e.g. compiler_flags -> copts etc), the WORKSPACE file is interpreted, Makefile-style replacements like $< and $@ are made in genrule commands, etc.\nNote that Skylark is not generally supported and many aspects of compatibility are fairly superficial; it's unlikely this will work for complex setups of either tool." var:"BAZEL_COMPATIBILITY"`
} `help:"Bazel is an open-sourced version of Google's internal build tool. Please draws a lot of inspiration from the original tool although the two have now diverged in various ways.\nNonetheless, if you've used Bazel, you will likely find Please familiar."`
// buildEnvStored is a cached form of BuildEnv.
buildEnvStored *storedBuildEnv
}
// An Alias represents aliases in the config.
type Alias struct {
Cmd string `help:"Command to run for this alias."`
Desc string `help:"Description of this alias"`
Subcommand []string `help:"Known subcommands of this command"`
Flag []string `help:"Known flags of this command"`
PositionalLabels bool `help:"Treats positional arguments after commands as build labels for the purpose of tab completion."`
}
type storedBuildEnv struct {
Env []string
Once sync.Once
}
// Hash returns a hash of the parts of this configuration that affect building targets in general.
// Most parts are considered not to (e.g. cache settings) or affect specific targets (e.g. changing
// tool paths which get accounted for on the targets that use them).
func (config *Configuration) Hash() []byte {
h := sha1.New()
// These fields are the ones that need to be in the general hash; other things will be
// picked up by relevant rules (particularly tool paths etc).
// Note that container settings are handled separately.
for _, f := range config.Parse.BuildFileName {
h.Write([]byte(f))
}
h.Write([]byte(config.Build.Lang))
h.Write([]byte(config.Build.Nonce))
for _, l := range config.Licences.Reject {
h.Write([]byte(l))
}
for _, env := range config.GetBuildEnv() {
h.Write([]byte(env))
}
return h.Sum(nil)
}
// ContainerisationHash returns the hash of the containerisation part of the config.
func (config *Configuration) ContainerisationHash() []byte {
h := sha1.New()
encoder := gob.NewEncoder(h)
if err := encoder.Encode(config.Docker); err != nil {
panic(err)
}
return h.Sum(nil)
}
// GetBuildEnv returns the build environment configured for this config object.
func (config *Configuration) GetBuildEnv() []string {
config.buildEnvStored.Once.Do(func() {
env := []string{
// Need to know these for certain rules.
"ARCH=" + config.Build.Arch.Arch,
"OS=" + config.Build.Arch.OS,
// These are slightly modified forms that are more convenient for some things.
"XARCH=" + config.Build.Arch.XArch(),
"XOS=" + config.Build.Arch.XOS(),
// It's easier to just make these available for Go-based rules.
"GOARCH=" + config.Build.Arch.GoArch(),
"GOOS=" + config.Build.Arch.OS,
}
// from the BuildEnv config keyword
for k, v := range config.BuildEnv {
pair := strings.Replace(strings.ToUpper(k), "-", "_", -1) + "=" + v
env = append(env, pair)
}
// from the user's environment based on the PassEnv config keyword
for _, k := range config.Build.PassEnv {
if v, isSet := os.LookupEnv(k); isSet {
env = append(env, k+"="+v)
}
}
sort.Strings(env)
config.buildEnvStored.Env = env
})
return config.buildEnvStored.Env
}
// TagsToFields returns a map of string represent the properties of CONFIG object to the config Structfield
func (config *Configuration) TagsToFields() map[string]reflect.StructField {
tags := make(map[string]reflect.StructField)
v := reflect.ValueOf(config).Elem()
for i := 0; i < v.NumField(); i++ {
if field := v.Field(i); field.Kind() == reflect.Struct {
for j := 0; j < field.NumField(); j++ {
if tag := field.Type().Field(j).Tag.Get("var"); tag != "" {
tags[tag] = field.Type().Field(j)
}
}
}
}
return tags
}
// ApplyOverrides applies a set of overrides to the config.
// The keys of the given map are dot notation for the config setting.
func (config *Configuration) ApplyOverrides(overrides map[string]string) error {
match := func(s1 string) func(string) bool {
return func(s2 string) bool {
return strings.ToLower(s2) == s1
}
}
elem := reflect.ValueOf(config).Elem()
for k, v := range overrides {
split := strings.Split(strings.ToLower(k), ".")
if len(split) != 2 {
return fmt.Errorf("Bad option format: %s", k)
}
field := elem.FieldByNameFunc(match(split[0]))
if !field.IsValid() {
return fmt.Errorf("Unknown config field: %s", split[0])
} else if field.Kind() == reflect.Map {
field.SetMapIndex(reflect.ValueOf(split[1]), reflect.ValueOf(v))
continue
} else if field.Kind() != reflect.Struct {
return fmt.Errorf("Unsettable config field: %s", split[0])
}
subfield, ok := field.Type().FieldByNameFunc(match(split[1]))
if !ok {
return fmt.Errorf("Unknown config field: %s", split[1])
}
field = field.FieldByNameFunc(match(split[1]))
switch field.Kind() {
case reflect.String:
// verify this is a legit setting for this field
if options := subfield.Tag.Get("options"); options != "" {
if !cli.ContainsString(v, strings.Split(options, ",")) {
return fmt.Errorf("Invalid value %s for field %s; options are %s", v, k, options)
}
}
if field.Type().Name() == "URL" {
field.Set(reflect.ValueOf(cli.URL(v)))
} else {
field.Set(reflect.ValueOf(v))
}
case reflect.Bool:
v = strings.ToLower(v)
// Mimics the set of truthy things gcfg accepts in our config file.
field.SetBool(v == "true" || v == "yes" || v == "on" || v == "1")
case reflect.Int:
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("Invalid value for an integer field: %s", v)
}
field.Set(reflect.ValueOf(i))
case reflect.Int64:
var d cli.Duration
if err := d.UnmarshalText([]byte(v)); err != nil {
return fmt.Errorf("Invalid value for a duration field: %s", v)
}
field.Set(reflect.ValueOf(d))
case reflect.Slice:
// Comma-separated values are accepted.
if field.Type().Elem().Kind() == reflect.Struct {
// Assume it must be a slice of BuildLabel.
l := []BuildLabel{}
for _, s := range strings.Split(v, ",") {
l = append(l, ParseBuildLabel(s, ""))
}
field.Set(reflect.ValueOf(l))
} else if field.Type().Elem().Name() == "URL" {
urls := []cli.URL{}
for _, s := range strings.Split(v, ",") {
urls = append(urls, cli.URL(s))
}
field.Set(reflect.ValueOf(urls))
} else {
field.Set(reflect.ValueOf(strings.Split(v, ",")))
}
default:
return fmt.Errorf("Can't override config field %s (is %s)", k, field.Kind())
}
}
return nil
}
// Completions returns a list of possible completions for the given option prefix.
func (config *Configuration) Completions(prefix string) []flags.Completion {
ret := []flags.Completion{}
t := reflect.TypeOf(config).Elem()
for i := 0; i < t.NumField(); i++ {
if field := t.Field(i); field.Type.Kind() == reflect.Struct {
for j := 0; j < field.Type.NumField(); j++ {
subfield := field.Type.Field(j)
if name := strings.ToLower(field.Name + "." + subfield.Name); strings.HasPrefix(name, prefix) {
help := subfield.Tag.Get("help")
if options := subfield.Tag.Get("options"); options != "" {
for _, option := range strings.Split(options, ",") {
ret = append(ret, flags.Completion{Item: name + ":" + option, Description: help})
}
} else {
ret = append(ret, flags.Completion{Item: name + ":", Description: help})
}
}
}
}
}
return ret
}
// UpdateArgsWithAliases applies the aliases in this config to the given set of arguments.
func (config *Configuration) UpdateArgsWithAliases(args []string) []string {
for idx, arg := range args[1:] {
// Please should not touch anything that comes after `--`
if arg == "--" {
break
}
for k, v := range config.AllAliases() {
if arg == k {
// We could insert every token in v into os.Args at this point and then we could have
// aliases defined in terms of other aliases but that seems rather like overkill so just
// stick the replacement in wholesale instead.
// Do not ask about the inner append and the empty slice.
cmd, err := shlex.Split(v.Cmd)
if err != nil {
log.Fatalf("Invalid alias replacement for %s: %s", k, err)
}
return append(append(append([]string{}, args[:idx+1]...), cmd...), args[idx+2:]...)
}
}
}
return args
}
// AllAliases returns all the aliases defined in this config
func (config *Configuration) AllAliases() map[string]*Alias {
ret := map[string]*Alias{}
for k, v := range config.Aliases {
ret[k] = &Alias{Cmd: v}
}
for k, v := range config.Alias {
ret[k] = v
}
return ret
}
// PrintAliases prints the set of aliases defined in the config.
func (config *Configuration) PrintAliases(w io.Writer) {
aliases := config.AllAliases()
names := make([]string, 0, len(aliases))
maxlen := 0
for alias := range aliases {
names = append(names, alias)
if len(alias) > maxlen {
maxlen = len(alias)
}
}
sort.Strings(names)
w.Write([]byte("\nAvailable commands for this repository:\n"))
tmpl := fmt.Sprintf(" %%-%ds %%s\n", maxlen)
for _, name := range names {
fmt.Fprintf(w, tmpl, name, aliases[name].Desc)
}
}
| 1 | 8,527 | Don't think this should be here. The default is set somewhere else. | thought-machine-please | go |
@@ -1229,7 +1229,13 @@ class CloudTaurusTest(BaseCloudTest):
def stop_test(self):
if self.master:
self.log.info("Ending cloud test...")
- self.master.stop()
+ if not self._last_status:
+ self.get_master_status()
+
+ if self._last_status["progress"] >= 100:
+ self.master.stop()
+ else:
+ self.master.terminate()
def get_test_status_text(self):
if not self._sessions: | 1 | """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import re
import sys
import time
import traceback
import zipfile
from abc import abstractmethod
from collections import defaultdict, OrderedDict, Counter, namedtuple
from functools import wraps
from ssl import SSLError
import yaml
from requests.exceptions import ReadTimeout
from urwid import Pile, Text
from bzt import AutomatedShutdown
from bzt import TaurusInternalException, TaurusConfigError, TaurusException, TaurusNetworkError, NormalShutdown
from bzt.bza import User, Session, Test, Workspace, MultiTest
from bzt.engine import Reporter, Provisioning, ScenarioExecutor, Configuration, Service, Singletone
from bzt.modules.aggregator import DataPoint, KPISet, ConsolidatingAggregator, ResultsProvider, AggregatorListener
from bzt.modules.console import WidgetProvider, PrioritizedWidget
from bzt.modules.functional import FunctionalResultsReader, FunctionalAggregator, FunctionalSample
from bzt.modules.monitoring import Monitoring, MonitoringListener
from bzt.modules.services import Unpacker
from bzt.six import BytesIO, iteritems, HTTPError, r_input, URLError, b, string_types, text_type
from bzt.utils import dehumanize_time, BetterDict, ensure_is_dict, ExceptionalDownloader, ProgressBarContext
from bzt.utils import to_json, open_browser, get_full_path, get_files_recursive, replace_in_config, humanize_bytes
TAURUS_TEST_TYPE = "taurus"
FUNC_TEST_TYPE = "functionalApi"
CLOUD_CONFIG_FILTER_RULES = {
"execution": True,
"scenarios": True,
"services": True,
"locations": True,
"locations-weighted": True,
"settings": {
"verbose": True
},
"modules": {
"jmeter": {
"version": True,
"properties": True,
"system-properties": True,
"xml-jtl-flags": True,
},
"gatling": {
"version": True,
"properties": True,
"java-opts": True,
"additional-classpath": True
},
"grinder": {
"properties": True,
"properties-file": True
},
"selenium": {
"additional-classpath": True,
"virtual-display": True,
"compile-target-java": True
},
"junit": {
"compile-target-java": True
},
"testng": {
"compile-target-java": True
},
"local": {
"sequential": True
},
"proxy2jmx": {
"token": True
},
"shellexec": {
"env": True
},
"!blazemeter": {
"class": True,
"request-logging-limit": True,
"token": True,
"address": True,
"data-address": True,
"test": True,
"project": True,
"use-deprecated-api": True,
"default-location": True,
"browser-open": True,
"delete-test-files": True,
"report-name": True,
"timeout": True,
"public-report": True,
"check-interval": True,
"detach": True,
},
"consolidator": {
"rtimes-len": True
},
}
}
CLOUD_CONFIG_FILTER_RULES['modules']['!cloud'] = CLOUD_CONFIG_FILTER_RULES['modules']['!blazemeter']
NETWORK_PROBLEMS = (IOError, URLError, SSLError, ReadTimeout, TaurusNetworkError)
def send_with_retry(method):
@wraps(method)
def _impl(self, *args, **kwargs):
if not isinstance(self, BlazeMeterUploader):
raise TaurusInternalException("send_with_retry should only be applied to BlazeMeterUploader methods")
try:
method(self, *args, **kwargs)
except (IOError, TaurusNetworkError):
self.log.debug("Error sending data: %s", traceback.format_exc())
self.log.warning("Failed to send data, will retry in %s sec...", self._user.timeout)
try:
time.sleep(self._user.timeout)
method(self, *args, **kwargs)
self.log.info("Succeeded with retry")
except NETWORK_PROBLEMS:
self.log.error("Fatal error sending data: %s", traceback.format_exc())
self.log.warning("Will skip failed data and continue running")
return _impl
def get_with_retry(method):
@wraps(method)
def _impl(self, *args, **kwargs):
if not isinstance(self, CloudProvisioning):
raise TaurusInternalException("get_with_retry should only be applied to CloudProvisioning class methods")
while True:
try:
return method(self, *args, **kwargs)
except NETWORK_PROBLEMS:
self.log.debug("Error making request: %s", traceback.format_exc())
self.log.warning("Failed to make request, will retry in %s sec...", self.user.timeout)
time.sleep(self.user.timeout)
return _impl
def parse_blazemeter_test_link(link):
"""
https://a.blazemeter.com/app/#/accounts/97961/workspaces/89846/projects/229969/tests/5823512
:param link:
:return:
"""
if not isinstance(link, (string_types, text_type)):
return None
regex = r'https://a.blazemeter.com/app/#/accounts/(\d+)/workspaces/(\d+)/projects/(\d+)/tests/(\d+)(?:/\w+)?'
match = re.match(regex, link)
if match is None:
return None
TestParams = namedtuple('TestParams', 'account_id,workspace_id,project_id,test_id')
return TestParams(*[int(x) for x in match.groups()])
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
"""
Reporter class
:type _test: bzt.bza.Test
:type _master: bzt.bza.Master
"""
def __init__(self):
super(BlazeMeterUploader, self).__init__()
self.browser_open = 'start'
self.kpi_buffer = []
self.send_interval = 30
self._last_status_check = time.time()
self.send_data = True
self.upload_artifacts = True
self.send_monitoring = True
self.monitoring_buffer = None
self.public_report = False
self.last_dispatch = 0
self.results_url = None
self._user = User()
self._test = None
self._master = None
self._session = None
self.first_ts = sys.maxsize
self.last_ts = 0
self.report_name = None
self._dpoint_serializer = DatapointSerializer(self)
def prepare(self):
"""
Read options for uploading, check that they're sane
"""
super(BlazeMeterUploader, self).prepare()
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.public_report = self.settings.get("public-report", self.public_report)
self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
token = self.settings.get("token", "")
if not token:
self.log.warning("No BlazeMeter API key provided, will upload anonymously")
self._user.token = token
# usual fields
self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
self._user.address = self.settings.get("address", self._user.address)
self._user.data_address = self.settings.get("data-address", self._user.data_address)
self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
# direct data feeding case
sess_id = self.parameters.get("session-id")
if sess_id:
self._session = Session(self._user, {'id': sess_id})
self._session['userId'] = self.parameters.get("user-id", None)
self._session['testId'] = self.parameters.get("test-id", None)
self._test = Test(self._user, {'id': self._session['testId']})
exc = TaurusConfigError("Need signature for session")
self._session.data_signature = self.parameters.get("signature", exc)
self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
self.send_data = self.parameters.get("send-data", self.send_data)
self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
else:
try:
self._user.ping() # to check connectivity and auth
except HTTPError:
self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
raise
if token:
wsp = self._user.accounts().workspaces()
if not wsp:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
self._test = finder.resolve_external_test()
else:
self._test = Test(self._user, {'id': None})
self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = r_input("Please enter report-name: ")
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
for service in self.engine.services:
if isinstance(service, Monitoring):
service.add_listener(self)
def startup(self):
"""
Initiate online test
"""
super(BlazeMeterUploader, self).startup()
self._user.log = self.log.getChild(self.__class__.__name__)
if not self._session:
url = self._start_online()
self.log.info("Started data feeding: %s", url)
if self.browser_open in ('start', 'both'):
open_browser(url)
if self._user.token and self.public_report:
report_link = self._master.make_report_public()
self.log.info("Public report link: %s", report_link)
def _start_online(self):
"""
Start online test
"""
self.log.info("Initiating data feeding...")
if self._test['id']:
self._session, self._master = self._test.start_external()
else:
self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
self._test['id'] = self._session['testId']
if self._test.token:
self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
if self.report_name:
self._session.set({"name": str(self.report_name)})
return self.results_url
def __get_jtls_and_more(self):
"""
Compress all files in artifacts dir to single zipfile
:rtype: (bzt.six.BytesIO,dict)
"""
mfile = BytesIO()
listing = {}
logs = set()
for handler in self.engine.log.parent.handlers:
if isinstance(handler, logging.FileHandler):
logs.add(handler.baseFilename)
max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB
with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
for root, _, files in os.walk(self.engine.artifacts_dir):
for filename in files:
full_path = os.path.join(root, filename)
if full_path in logs:
logs.remove(full_path)
fsize = os.path.getsize(full_path)
if fsize <= max_file_size:
zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
listing[full_path] = fsize
else:
msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
self.log.warning(msg, filename, max_file_size)
for filename in logs: # upload logs unconditionally
zfh.write(filename, os.path.basename(filename))
listing[filename] = os.path.getsize(filename)
return mfile, listing
def __upload_artifacts(self):
"""
If token provided, upload artifacts folder contents and bzt.log
"""
if not self._session.token:
return
worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
if worker_index:
suffix = '-%s' % worker_index
else:
suffix = ''
artifacts_zip = "artifacts%s.zip" % suffix
mfile, zip_listing = self.__get_jtls_and_more()
self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
self._session.upload_file(artifacts_zip, mfile.getvalue())
self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
handlers = self.engine.log.parent.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
fname = handler.baseFilename
self.log.info("Uploading %s", fname)
fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
modified_name = fhead + suffix + ftail
with open(fname, 'rb') as _file:
self._session.upload_file(modified_name, _file.read())
_file.seek(-4096, 2)
tail = _file.read()
tail = tail[tail.index(b("\n")) + 1:]
self._session.upload_file(modified_name + ".tail.bz", tail)
def post_process(self):
"""
Upload results if possible
"""
if not self._session:
self.log.debug("No feeding session obtained, nothing to finalize")
return
self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
try:
self.log.info("Sending remaining KPI data to server...")
if self.send_data:
self.__send_data(self.kpi_buffer, False, True)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
finally:
self._postproc_phase2()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
self.log.info("Online report link: %s", self.results_url)
def _postproc_phase2(self):
try:
if self.upload_artifacts:
self.__upload_artifacts()
except (IOError, TaurusNetworkError):
self.log.warning("Failed artifact upload: %s", traceback.format_exc())
finally:
self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
self.log.debug("Set last check time to: %s", self._last_status_check)
tries = self.send_interval # NOTE: you dirty one...
while not self._last_status_check and tries > 0:
self.log.info("Waiting for ping...")
time.sleep(self.send_interval)
tries -= 1
self._postproc_phase3()
def _postproc_phase3(self):
try:
if self.send_data:
self.end_online()
if self._user.token and self.engine.stopping_reason:
exc_class = self.engine.stopping_reason.__class__.__name__
note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
self.append_note_to_session(note)
if self._master:
self.append_note_to_master(note)
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.debug("Failed to finish online: %s", traceback.format_exc())
self.log.warning("Failed to finish online: %s", exc)
def end_online(self):
"""
Finish online test
"""
if not self._session:
self.log.debug("Feeding not started, so not stopping")
else:
self.log.info("Ending data feeding...")
if self._user.token:
self._session.stop()
else:
self._session.stop_anonymous()
def append_note_to_session(self, note):
self._session.fetch()
if 'note' in self._session:
note = self._session['note'] + '\n' + note
note = note.strip()
if note:
self._session.set({'note': note})
def append_note_to_master(self, note):
self._master.fetch()
if 'note' in self._master:
note = self._master['note'] + '\n' + note
note = note.strip()
if note:
self._master.set({'note': note})
def check(self):
"""
Send data if any in buffer
"""
self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
if self.last_dispatch < (time.time() - self.send_interval):
self.last_dispatch = time.time()
if self.send_data and len(self.kpi_buffer):
self.__send_data(self.kpi_buffer)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
return super(BlazeMeterUploader, self).check()
@send_with_retry
def __send_data(self, data, do_check=True, is_final=False):
"""
:type data: list[bzt.modules.aggregator.DataPoint]
"""
if not self._session:
return
serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
self._session.send_kpi_data(serialized, do_check)
def aggregated_second(self, data):
"""
Send online data
:param data: DataPoint
"""
if self.send_data:
self.kpi_buffer.append(data)
def monitoring_data(self, data):
if self.send_monitoring:
self.monitoring_buffer.record_data(data)
@send_with_retry
def __send_monitoring(self):
engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
if not engine_id:
engine_id = "0"
data = self.monitoring_buffer.get_monitoring_json(self._session)
self._session.send_monitoring_data(engine_id, data)
def __format_listing(self, zip_listing):
lines = []
for fname in sorted(zip_listing.keys()):
bytestr = humanize_bytes(zip_listing[fname])
if fname.startswith(self.engine.artifacts_dir):
fname = fname[len(self.engine.artifacts_dir) + 1:]
lines.append(bytestr + " " + fname)
return "\n".join(lines)
class MonitoringBuffer(object):
def __init__(self, size_limit, parent_log):
self.size_limit = size_limit
self.data = defaultdict(OrderedDict)
self.log = parent_log.getChild(self.__class__.__name__)
# data :: dict(datasource -> dict(interval -> datapoint))
# datapoint :: dict(metric -> value)
def record_data(self, data):
for monitoring_item in data:
item = copy.deepcopy(monitoring_item)
source = item.pop('source')
timestamp = int(item['ts'])
item['interval'] = 1
buff = self.data[source]
if timestamp in buff:
buff[timestamp].update(item)
else:
buff[timestamp] = item
sources = list(self.data)
for source in sources:
if len(self.data[source]) > self.size_limit:
self._downsample(self.data[source])
self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
def _downsample(self, buff):
size = 1
while len(buff) > self.size_limit:
self._merge_small_intervals(buff, size)
size += 1
def _merge_small_intervals(self, buff, size):
timestamps = list(buff)
merged_already = set()
for left, right in zip(timestamps, timestamps[1:]):
if left in merged_already:
continue
if buff[left]['interval'] <= size:
self._merge_datapoints(buff[left], buff[right])
buff.pop(right)
merged_already.add(left)
merged_already.add(right)
@staticmethod
def _merge_datapoints(left, right):
sum_size = float(left['interval'] + right['interval'])
for metric in set(right):
if metric in ('ts', 'interval'):
continue
if metric in left:
left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
else:
left[metric] = right[metric]
left['interval'] = sum_size
def get_monitoring_json(self, session):
"""
:type session: Session
"""
results = {}
hosts = []
kpis = {}
for source, buff in iteritems(self.data):
for timestamp, item in iteritems(buff):
if source == 'local':
source = platform.node()
if source not in results:
results[source] = {
"name": source,
"intervals": OrderedDict()
}
if source not in hosts:
hosts.append(source)
src = results[source]
tstmp = timestamp * 1000
tstmp_key = '%d' % tstmp
if tstmp_key not in src['intervals']:
src['intervals'][tstmp_key] = {
"start": tstmp,
"duration": item['interval'] * 1000,
"indicators": {}
}
for field, value in iteritems(item):
if field.lower().startswith('conn-all'):
field = 'Connections'
elif field.lower().startswith('cpu'):
field = 'CPU'
elif field.lower().startswith('mem'):
field = 'Memory'
value *= 100
elif field == 'bytes-recv' or field.lower().startswith('net'):
field = 'Network I/O'
else:
continue # maybe one day BZA will accept all other metrics...
if field not in kpis:
kpis[field] = field
src['intervals'][tstmp_key]['indicators'][field] = {
"value": value,
"name": field,
"std": 0,
"mean": 0,
"sum": 0,
"min": 0,
"max": 0,
"sumOfSquares": 0,
"n": 1
}
kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
return {
"reportInfo": {
"sessionId": session['id'],
"timestamp": time.time(),
"userId": session['userId'],
"testId": session['testId'],
"type": "MONITOR",
"testName": ""
},
"kpis": kpis,
"hosts": hosts,
"results": results
}
class DatapointSerializer(object):
def __init__(self, owner):
"""
:type owner: BlazeMeterUploader
"""
super(DatapointSerializer, self).__init__()
self.owner = owner
self.multi = 1000 # miltiplier factor for reporting
def get_kpi_body(self, data_buffer, is_final):
# - reporting format:
# {labels: <data>, # see below
# sourceID: <id of BlazeMeterClient object>,
# [is_final: True]} # for last report
#
# - elements of 'data' are described in __get_label()
#
# - elements of 'intervals' are described in __get_interval()
# every interval contains info about response codes have gotten on it.
report_items = BetterDict()
if data_buffer:
self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
# following data is received in the cumulative way
for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
report_item = self.__get_label(label, kpi_set)
self.__add_errors(report_item, kpi_set) # 'Errors' tab
report_items[label] = report_item
# fill 'Timeline Report' tab with intervals data
# intervals are received in the additive way
for dpoint in data_buffer:
time_stamp = dpoint[DataPoint.TIMESTAMP]
for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
exc = TaurusInternalException('Cumulative KPISet is non-consistent')
report_item = report_items.get(label, exc)
report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list
data = {"labels": report_items, "sourceID": id(self.owner)}
if is_final:
data['final'] = True
return to_json(data)
@staticmethod
def __add_errors(report_item, kpi_set):
errors = kpi_set[KPISet.ERRORS]
for error in errors:
if error["type"] == KPISet.ERRTYPE_ERROR:
report_item['errors'].append({
'm': error['msg'],
"rc": error['rc'],
"count": error['cnt'],
})
else:
report_item['assertions'].append({
'failureMessage': error['msg'],
'name': 'All Assertions',
'failures': error['cnt']
# TODO: "count", "errors" = ? (according do Udi's format description)
})
def __get_label(self, name, cumul):
return {
"n": cumul[KPISet.SAMPLE_COUNT], # total count of samples
"name": name if name else 'ALL', # label
"interval": 1, # not used
"intervals": [], # list of intervals, fill later
"samplesNotCounted": 0, # not used
"assertionsNotCounted": 0, # not used
"failedEmbeddedResources": [], # not used
"failedEmbeddedResourcesSpilloverCount": 0, # not used
"otherErrorsCount": 0, # not used
"errors": [], # list of errors, fill later
"assertions": [], # list of assertions, fill later
"percentileHistogram": [], # not used
"percentileHistogramLatency": [], # not used
"percentileHistogramBytes": [], # not used
"empty": False, # not used
"summary": self.__get_summary(cumul) # summary info
}
def __get_summary(self, cumul):
return {
"first": self.owner.first_ts,
"last": self.owner.last_ts,
"duration": self.owner.last_ts - self.owner.first_ts,
"failed": cumul[KPISet.FAILURES],
"hits": cumul[KPISet.SAMPLE_COUNT],
"avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
"min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
"max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
"std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
"tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
"tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
"tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
"latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
"latencyMax": 0,
"latencyMin": 0,
"latencySTD": 0,
"bytes": cumul[KPISet.BYTE_COUNT],
"bytesMax": 0,
"bytesMin": 0,
"bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
"bytesSTD": 0,
"otherErrorsSpillcount": 0,
}
def __get_interval(self, item, time_stamp):
# rc_list - list of info about response codes:
# {'n': <number of code encounters>,
# 'f': <number of failed request (e.q. important for assertions)>
# 'rc': <string value of response code>}
rc_list = []
for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
return {
"ec": item[KPISet.FAILURES],
"ts": time_stamp,
"na": item[KPISet.CONCURRENCY],
"n": item[KPISet.SAMPLE_COUNT],
"failed": item[KPISet.FAILURES],
"rc": rc_list,
"t": {
"min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
"max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
KPISet.PERCENTILES] else 0,
"sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": self.multi * item[KPISet.STDEV_RESP_TIME],
"avg": self.multi * item[KPISet.AVG_RESP_TIME]
},
"lt": {
"min": 0,
"max": 0,
"sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": self.multi * item[KPISet.AVG_LATENCY]
},
"by": {
"min": 0,
"max": 0,
"sum": item[KPISet.BYTE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
},
}
class ProjectFinder(object):
"""
:type user: User
"""
def __init__(self, parameters, settings, user, workspaces, parent_log):
super(ProjectFinder, self).__init__()
self.default_test_name = "Taurus Test"
self.parameters = parameters
self.settings = settings
self.log = parent_log.getChild(self.__class__.__name__)
self.user = user
self.workspaces = workspaces
self.is_functional = False
def _find_project(self, proj_name):
"""
:rtype: bzt.bza.Project
"""
if isinstance(proj_name, (int, float)): # TODO: what if it's string "123"?
proj_id = int(proj_name)
self.log.debug("Treating project name as ID: %s", proj_id)
project = self.workspaces.projects(ident=proj_id).first()
if not project:
raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id)
return project
elif proj_name is not None:
return self.workspaces.projects(name=proj_name).first()
return None
def _ws_proj_switch(self, project):
if project:
return project
else:
return self.workspaces
def resolve_external_test(self):
proj_name = self.parameters.get("project", self.settings.get("project", None))
test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
project = self._find_project(proj_name)
if not project and proj_name:
project = self._default_or_create_project(proj_name)
test = self._ws_proj_switch(project).tests(name=test_name, test_type='external').first()
if not test:
if not project:
project = self._default_or_create_project(proj_name)
test = project.create_test(test_name, {"type": "external"})
return test
def resolve_account(self, account_name):
account = None
if isinstance(account_name, (int, float)): # TODO: what if it's string "123"?
acc_id = int(account_name)
self.log.debug("Treating account name as ID: %s", acc_id)
account = self.user.accounts(ident=acc_id).first()
if not account:
raise TaurusConfigError("BlazeMeter account not found by ID: %s" % acc_id)
elif account_name:
account = self.user.accounts(name=account_name).first()
if not account:
raise TaurusConfigError("BlazeMeter account not found by name: %s" % account_name)
if account:
return account
self.user.fetch()
account = self.user.accounts(ident=self.user['defaultProject']['accountId']).first()
self.log.debug("Using default account: %s", account)
return account
def resolve_workspace(self, account, workspace_name):
workspace = None
if isinstance(workspace_name, (int, float)): # TODO: what if it's string "123"?
workspace_id = int(workspace_name)
self.log.debug("Treating workspace name as ID: %s", workspace_id)
workspace = account.workspaces(ident=workspace_id).first()
if not workspace:
raise TaurusConfigError("BlazeMeter workspace not found by ID: %s" % workspace_id)
elif workspace_name is not None:
workspace = account.workspaces(name=workspace_name).first()
if not workspace:
raise TaurusConfigError("BlazeMeter workspace not found: %s" % workspace_name)
if workspace is None:
workspace = account.workspaces(ident=self.user['defaultProject']['workspaceId']).first()
self.log.debug("Using first workspace: %s" % workspace)
return workspace
def resolve_project(self, workspace, project_name):
project = None
if isinstance(project_name, (int, float)): # TODO: what if it's string "123"?
proj_id = int(project_name)
self.log.debug("Treating project name as ID: %s", proj_id)
project = workspace.projects(ident=proj_id).first()
if not project:
raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id)
elif project_name is not None:
project = workspace.projects(name=project_name).first()
if project is None:
project = self._create_project_or_use_default(workspace, project_name)
return project
def resolve_test(self, project, test_name, taurus_only=False):
test = None
is_int = isinstance(test_name, (int, float))
is_digit = isinstance(test_name, (string_types, text_type)) and test_name.isdigit()
if self.is_functional:
test_type = FUNC_TEST_TYPE
elif taurus_only:
test_type = TAURUS_TEST_TYPE
else:
test_type = None
if is_int or is_digit:
test_id = int(test_name)
self.log.debug("Treating project name as ID: %s", test_id)
test = project.multi_tests(ident=test_id).first()
if not test:
test = project.tests(ident=test_id, test_type=test_type).first()
if not test:
raise TaurusConfigError("BlazeMeter test not found by ID: %s" % test_id)
elif test_name is not None:
test = project.multi_tests(name=test_name).first()
if not test:
test = project.tests(name=test_name, test_type=test_type).first()
return test
def resolve_test_type(self):
use_deprecated = self.settings.get("use-deprecated-api", True)
default_location = self.settings.get("default-location", None)
account_name = self.parameters.get("account", self.settings.get("account", None))
workspace_name = self.parameters.get("workspace", self.settings.get("workspace", None))
project_name = self.parameters.get("project", self.settings.get("project", None))
test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
launch_existing_test = self.settings.get("launch-existing-test", False)
send_report_email = self.settings.get("send-report-email", False)
test_spec = parse_blazemeter_test_link(test_name)
self.log.debug("Parsed test link: %s", test_spec)
look_for_taurus_only = not launch_existing_test
if test_spec is not None:
# if we're to launch existing test - look for any type, otherwise - taurus only
account, workspace, project, test = self.user.test_by_ids(test_spec.account_id, test_spec.workspace_id,
test_spec.project_id, test_spec.test_id,
taurus_only=look_for_taurus_only)
if test is None:
raise TaurusConfigError("Test not found: %s", test_name)
self.log.info("Found test by link: %s", test_name)
else:
account = self.resolve_account(account_name)
workspace = self.resolve_workspace(account, workspace_name)
project = self.resolve_project(workspace, project_name)
test = self.resolve_test(project, test_name, taurus_only=look_for_taurus_only)
if isinstance(test, MultiTest):
self.log.debug("Detected test type: multi")
test_class = CloudCollectionTest
elif isinstance(test, Test):
self.log.debug("Detected test type: standard")
test_class = CloudTaurusTest
else:
if launch_existing_test:
raise TaurusConfigError("Can't find test for launching: %r" % test_name)
if use_deprecated or self.settings.get("cloud-mode") == 'taurusCloud':
self.log.debug("Will create standard test")
test_class = CloudTaurusTest
else:
self.log.debug("Will create a multi test")
test_class = CloudCollectionTest
assert test_class is not None
router = test_class(self.user, test, project, test_name, default_location, launch_existing_test,
self.log)
router._workspaces = self.workspaces
router.cloud_mode = self.settings.get("cloud-mode", None)
router.dedicated_ips = self.settings.get("dedicated-ips", False)
router.is_functional = self.is_functional
router.send_report_email = send_report_email
return router
def _create_project_or_use_default(self, workspace, proj_name):
if proj_name:
return workspace.create_project(proj_name)
else:
info = self.user.fetch()
self.log.debug("Looking for default project: %s", info['defaultProject']['id'])
project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
if not project:
project = workspace.create_project("Taurus Tests Project")
return project
def _default_or_create_project(self, proj_name):
if proj_name:
return self.workspaces.first().create_project(proj_name)
else:
info = self.user.fetch()
project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
if not project:
project = self.workspaces.first().create_project("Taurus Tests Project")
return project
class BaseCloudTest(object):
"""
:type _user: bzt.bza.User
:type _project: bzt.bza.Project
:type _test: bzt.bza.Test
:type master: bzt.bza.Master
:type cloud_mode: str
"""
def __init__(self, user, test, project, test_name, default_location, launch_existing_test, parent_log):
self.default_test_name = "Taurus Test"
self.log = parent_log.getChild(self.__class__.__name__)
self.default_location = default_location
self._test_name = test_name
self._last_status = None
self._sessions = None
self._started = False
self._user = user
self._project = project
self._test = test
self.launch_existing_test = launch_existing_test
self.master = None
self._workspaces = None
self.cloud_mode = None
self.dedicated_ips = False
self.is_functional = False
self.send_report_email = False
@abstractmethod
def prepare_locations(self, executors, engine_config):
pass
def prepare_cloud_config(self, engine_config):
config = copy.deepcopy(engine_config)
if not isinstance(config[ScenarioExecutor.EXEC], list):
config[ScenarioExecutor.EXEC] = [config[ScenarioExecutor.EXEC]]
provisioning = config.get(Provisioning.PROV)
for execution in config[ScenarioExecutor.EXEC]:
execution[ScenarioExecutor.CONCURR] = execution.get(ScenarioExecutor.CONCURR).get(provisioning, None)
execution[ScenarioExecutor.THRPT] = execution.get(ScenarioExecutor.THRPT).get(provisioning, None)
config.filter(CLOUD_CONFIG_FILTER_RULES)
config['local-bzt-version'] = engine_config.get('version', 'N/A')
for key in list(config.keys()):
if not config[key]:
config.pop(key)
self.cleanup_defaults(config)
if self.dedicated_ips:
config[CloudProvisioning.DEDICATED_IPS] = True
assert isinstance(config, Configuration)
return config
@abstractmethod
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
pass
@abstractmethod
def launch_test(self):
"""launch cloud test"""
pass
@abstractmethod
def start_if_ready(self):
"""start cloud test if all engines are ready"""
pass
@abstractmethod
def get_test_status_text(self):
pass
@abstractmethod
def stop_test(self):
pass
def get_master_status(self):
self._last_status = self.master.get_status()
return self._last_status
@staticmethod
def cleanup_defaults(config):
# cleanup configuration from empty values
default_values = {
'concurrency': None,
'iterations': None,
'ramp-up': None,
'steps': None,
'throughput': None,
'hold-for': 0,
'files': []
}
for execution in config[ScenarioExecutor.EXEC]:
if isinstance(execution['concurrency'], dict):
execution['concurrency'] = {k: v for k, v in iteritems(execution['concurrency']) if v is not None}
if not execution['concurrency']:
execution['concurrency'] = None
for key, value in iteritems(default_values):
if key in execution and execution[key] == value:
execution.pop(key)
return config
class CloudTaurusTest(BaseCloudTest):
def prepare_locations(self, executors, engine_config):
available_locations = {}
is_taurus4 = self.cloud_mode == 'taurusCloud'
workspace = Workspace(self._project, {'id': self._project['workspaceId']})
for loc in workspace.locations(include_private=is_taurus4):
available_locations[loc['id']] = loc
if CloudProvisioning.LOC in engine_config and not is_taurus4:
self.log.warning("Deprecated test API doesn't support global locations")
for executor in executors:
if CloudProvisioning.LOC in executor.execution \
and isinstance(executor.execution[CloudProvisioning.LOC], dict):
exec_locations = executor.execution[CloudProvisioning.LOC]
self._check_locations(exec_locations, available_locations)
elif CloudProvisioning.LOC in engine_config and is_taurus4:
self._check_locations(engine_config[CloudProvisioning.LOC], available_locations)
else:
default_loc = self._get_default_location(available_locations)
executor.execution[CloudProvisioning.LOC] = BetterDict()
executor.execution[CloudProvisioning.LOC].merge({default_loc: 1})
executor.get_load() # we need it to resolve load settings into full form
def _get_default_location(self, available_locations):
if self.default_location and self.default_location in available_locations:
return self.default_location
self.log.debug("Default location %s not found", self.default_location)
for location_id in sorted(available_locations):
location = available_locations[location_id]
if not location_id.startswith('harbor-') and location['sandbox']:
return location_id
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("No sandbox or default location available, please specify locations manually")
def _check_locations(self, locations, available_locations):
for location in locations:
if location not in available_locations:
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("Invalid location requested: %s" % location)
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
if self.launch_existing_test:
return
if self._test is not None:
test_type = self._test.get("configuration").get("type")
should_be_func = (self.is_functional and test_type != FUNC_TEST_TYPE)
should_be_taurus = (not self.is_functional and test_type != TAURUS_TEST_TYPE)
if should_be_func or should_be_taurus:
self.log.debug("Can't reuse test type %r as Taurus test, will create new one", test_type)
self._test = None
if self._test is None:
test_config = {
"type": FUNC_TEST_TYPE if self.is_functional else TAURUS_TEST_TYPE,
"plugins": {
"taurus": {
"filename": "" # without this line it does not work
}
}
}
self._test = self._project.create_test(self._test_name, test_config)
if delete_old_files:
self._test.delete_files()
taurus_config = yaml.dump(taurus_config, default_flow_style=False, explicit_start=True, canonical=False)
self._test.upload_files(taurus_config, rfiles)
self._test.update_props({'configuration': {'executionType': self.cloud_mode}})
self._test.update_props({
'configuration': {'plugins': {'reportEmail': {"enabled": self.send_report_email}}}
})
def launch_test(self):
self.log.info("Initiating cloud test with %s ...", self._test.address)
self.master = self._test.start(as_functional=self.is_functional)
return self.master.address + '/app/#/masters/%s' % self.master['id']
def start_if_ready(self):
self._started = True
def stop_test(self):
if self.master:
self.log.info("Ending cloud test...")
self.master.stop()
def get_test_status_text(self):
if not self._sessions:
self._sessions = self.master.sessions()
if not self._sessions:
return
mapping = BetterDict() # dict(executor -> dict(scenario -> dict(location -> servers count)))
for session in self._sessions:
try:
name_split = [part.strip() for part in session['name'].split('/')]
location = session['configuration']['location']
count = session['configuration']['serversCount']
ex_item = mapping.get(name_split[0], force_set=True)
if len(name_split) > 1:
name = name_split[1]
else:
name = "N/A"
ex_item.get(name, force_set=True)[location] = count
except KeyError:
self._sessions = None
txt = "%s #%s\n" % (self._test['name'], self.master['id'])
for executor, scenarios in iteritems(mapping):
txt += " %s" % executor
for scenario, locations in iteritems(scenarios):
txt += " %s:\n" % scenario
for location, count in iteritems(locations):
txt += " Agents in %s: %s\n" % (location, count)
return txt
class CloudCollectionTest(BaseCloudTest):
def prepare_locations(self, executors, engine_config):
available_locations = {}
for loc in self._workspaces.locations(include_private=True):
available_locations[loc['id']] = loc
global_locations = engine_config.get(CloudProvisioning.LOC)
self._check_locations(global_locations, available_locations)
for executor in executors:
if CloudProvisioning.LOC in executor.execution:
exec_locations = executor.execution[CloudProvisioning.LOC]
self._check_locations(exec_locations, available_locations)
else:
if not global_locations:
default_loc = self._get_default_location(available_locations)
executor.execution[CloudProvisioning.LOC] = BetterDict()
executor.execution[CloudProvisioning.LOC].merge({default_loc: 1})
executor.get_load() # we need it to resolve load settings into full form
if global_locations and all(CloudProvisioning.LOC in executor.execution for executor in executors):
self.log.warning("Each execution has locations specified, global locations won't have any effect")
engine_config.pop(CloudProvisioning.LOC)
def _get_default_location(self, available_locations):
for location_id in sorted(available_locations):
location = available_locations[location_id]
if location['sandbox']:
return location_id
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("No sandbox or default location available, please specify locations manually")
def _check_locations(self, locations, available_locations):
for location in locations:
if location not in available_locations:
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("Invalid location requested: %s" % location)
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
if self.launch_existing_test:
return
# TODO: handle delete_old_files ?
if not self._project:
raise TaurusInternalException() # TODO: build unit test to catch this situation
collection_draft = self._user.collection_draft(self._test_name, taurus_config, rfiles)
if self._test is None:
self.log.debug("Creating cloud collection test")
self._test = self._project.create_multi_test(collection_draft)
else:
self.log.debug("Overriding cloud collection test")
collection_draft['projectId'] = self._project['id']
self._test.update_collection(collection_draft)
def launch_test(self):
self.log.info("Initiating cloud test with %s ...", self._test.address)
self.master = self._test.start()
return self.master.address + '/app/#/masters/%s' % self.master['id']
def start_if_ready(self):
if self._started:
return
if self._last_status is None:
return
sessions = self._last_status.get("sessions")
if sessions and all(session["status"] == "JMETER_CONSOLE_INIT" for session in sessions):
self.log.info("All servers are ready, starting cloud test")
self.master.force_start()
self._started = True
def await_test_end(self):
iterations = 0
while True:
if iterations > 100:
self.log.debug("Await: iteration limit reached")
return
status = self.master.get_status()
if status.get("status") == "ENDED":
return
iterations += 1
time.sleep(1.0)
def stop_test(self):
if self._started and self._test:
self.log.info("Shutting down cloud test...")
self._test.stop()
self.await_test_end()
elif self.master:
self.log.info("Shutting down cloud test...")
self.master.stop()
def get_test_status_text(self):
if not self._sessions:
sessions = self.master.sessions()
if not sessions:
return
self._sessions = {session["id"]: session for session in sessions}
if not self._last_status:
return
mapping = BetterDict() # dict(scenario -> dict(location -> servers count))
for session_status in self._last_status["sessions"]:
try:
session_id = session_status["id"]
session = self._sessions[session_id]
location = session_status["locationId"]
servers_count = len(session_status["readyStatus"]["servers"])
name_split = [part.strip() for part in session['name'].split('/')]
if len(name_split) > 1:
scenario = name_split[1]
else:
scenario = "N/A"
scenario_item = mapping.get(scenario, force_set=True)
scenario_item.get(location, 0, force_set=True)
scenario_item[location] += servers_count
except (KeyError, TypeError):
self._sessions = None
txt = "%s #%s\n" % (self._test['name'], self.master['id'])
for scenario, locations in iteritems(mapping):
txt += " %s:\n" % scenario
for location, count in iteritems(locations):
txt += " Agents in %s: %s\n" % (location, count)
return txt
class MasterProvisioning(Provisioning):
def get_rfiles(self):
rfiles = []
additional_files = []
for executor in self.executors:
executor_rfiles = executor.get_resource_files()
config = to_json(self.engine.config.get('execution'))
config += to_json(self.engine.config.get('scenarios'))
config += to_json(executor.settings)
for rfile in executor_rfiles:
if not os.path.exists(self.engine.find_file(rfile)): # TODO: what about files started from 'http://'?
raise TaurusConfigError("%s: resource file '%s' not found" % (executor, rfile))
if to_json(rfile) not in config: # TODO: might be check is needed to improve
additional_files.append(rfile)
rfiles += executor_rfiles
if additional_files:
raise TaurusConfigError("Following files can't be handled in cloud: %s" % additional_files)
rfiles = list(set(rfiles))
self.log.debug("All resource files are: %s", rfiles)
return rfiles
def _fix_filenames(self, old_names):
# check for concurrent base names
old_full_names = [get_full_path(self.engine.find_file(x)) for x in old_names]
rbases = [os.path.basename(get_full_path(rfile)) for rfile in old_full_names]
rpaths = [get_full_path(rfile, step_up=1) for rfile in old_full_names]
while rbases:
base, path = rbases.pop(), rpaths.pop()
if base in rbases:
index = rbases.index(base)
if path != rpaths[index]:
msg = 'Resource "%s" occurs more than one time, rename to avoid data loss'
raise TaurusConfigError(msg % base)
old_full_names = self.__pack_dirs(old_full_names)
new_base_names = [os.path.basename(f) for f in old_full_names]
self.log.debug('Replace file names in config: %s with %s', old_names, new_base_names)
replace_in_config(self.engine.config, old_names, new_base_names, log=self.log)
old_full_names = list(set(old_full_names))
return old_full_names
def __pack_dirs(self, source_list):
result_list = [] # files for upload
packed_list = [] # files for unpacking
for source in source_list:
source = get_full_path(source)
if os.path.isfile(source):
result_list.append(source)
else: # source is dir
self.log.debug("Compress directory '%s'", source)
base_dir_name = os.path.basename(source)
zip_name = self.engine.create_artifact(base_dir_name, '.zip')
relative_prefix_len = len(os.path.dirname(source))
with zipfile.ZipFile(zip_name, 'w') as zip_file:
for _file in get_files_recursive(source):
zip_file.write(_file, _file[relative_prefix_len:])
result_list.append(zip_name)
packed_list.append(base_dir_name + '.zip')
if packed_list:
services = self.engine.config.get(Service.SERV, [], force_set=True)
services.append({'module': Unpacker.UNPACK, Unpacker.FILES: packed_list, 'run-at': 'local'})
return result_list
class CloudProvisioning(MasterProvisioning, WidgetProvider):
"""
:type user: bzt.bza.User
:type results_reader: ResultsFromBZA
:type router: BaseCloudTest
:type _workspaces: bzt.bza.BZAObjectsList[bzt.bza.Workspace]
"""
LOC = "locations"
LOC_WEIGHTED = "locations-weighted"
DEDICATED_IPS = "dedicated-ips"
def __init__(self):
super(CloudProvisioning, self).__init__()
self.results_url = None
self.results_reader = None
self.user = User()
self.__last_master_status = None
self.browser_open = 'start'
self.widget = None
self.detach = False
self.router = None
self.test_ended = False
self.check_interval = 5.0
self._last_check_time = None
self.public_report = False
self.report_name = None
self._workspaces = []
self.launch_existing_test = None
self.disallow_empty_execution = False
def _merge_with_blazemeter_config(self):
if 'blazemeter' not in self.engine.config.get('modules'):
self.log.debug("Module 'blazemeter' wasn't found in base config")
return
bm_mod = self.engine.instantiate_module('blazemeter')
bm_settings = copy.deepcopy(bm_mod.settings)
bm_settings.update(self.settings)
self.settings = bm_settings
def prepare(self):
self._merge_with_blazemeter_config()
self._configure_client()
self._workspaces = self.user.accounts().workspaces()
if not self._workspaces:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
self.__dump_locations_if_needed()
super(CloudProvisioning, self).prepare()
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.detach = self.settings.get("detach", self.detach)
self.check_interval = dehumanize_time(self.settings.get("check-interval", self.check_interval))
self.public_report = self.settings.get("public-report", self.public_report)
is_execution_empty = not self.engine.config.get("execution")
self.launch_existing_test = self.settings.get("launch-existing-test", is_execution_empty, force_set=True)
if not self.launch_existing_test:
self._filter_reporting()
finder = ProjectFinder(self.parameters, self.settings, self.user, self._workspaces, self.log)
finder.default_test_name = "Taurus Cloud Test"
finder.is_functional = self.engine.is_functional_mode()
self.router = finder.resolve_test_type()
if not self.launch_existing_test:
self.router.prepare_locations(self.executors, self.engine.config)
res_files = self.get_rfiles()
files_for_cloud = self._fix_filenames(res_files)
config_for_cloud = self.router.prepare_cloud_config(self.engine.config)
config_for_cloud.dump(self.engine.create_artifact("cloud", ""))
del_files = self.settings.get("delete-test-files", True)
self.router.resolve_test(config_for_cloud, files_for_cloud, del_files)
self.report_name = self.settings.get("report-name", self.report_name)
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = r_input("Please enter report-name: ")
self.widget = self.get_widget()
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.results_reader = ResultsFromBZA()
self.results_reader.log = self.log
self.engine.aggregator.add_underling(self.results_reader)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.results_reader = FunctionalBZAReader(self.log)
self.engine.aggregator.add_underling(self.results_reader)
def __dump_locations_if_needed(self):
if self.settings.get("dump-locations", False):
self.log.warning("Dumping available locations instead of running the test")
locations = {}
for loc in self._workspaces.locations(include_private=True):
locations[loc['id']] = loc
for location_id in sorted(locations):
location = locations[location_id]
self.log.info("Location: %s\t%s", location_id, location['title'])
raise NormalShutdown("Done listing locations")
def _filter_reporting(self):
reporting = self.engine.config.get(Reporter.REP, [])
new_reporting = []
for index, reporter in enumerate(reporting):
reporter = ensure_is_dict(reporting, index, "module")
exc = TaurusConfigError("'module' attribute not found in %s" % reporter)
cls = reporter.get('module', exc)
if cls == 'blazemeter':
self.log.warning("Explicit blazemeter reporting is skipped for cloud")
else:
new_reporting.append(reporter)
self.engine.config[Reporter.REP] = new_reporting
def _configure_client(self):
self.user.log = self.log
self.user.logger_limit = self.settings.get("request-logging-limit", self.user.logger_limit)
self.user.address = self.settings.get("address", self.user.address)
self.user.token = self.settings.get("token", self.user.token)
self.user.timeout = dehumanize_time(self.settings.get("timeout", self.user.timeout))
if not self.user.token:
raise TaurusConfigError("You must provide API token to use cloud provisioning")
def startup(self):
super(CloudProvisioning, self).startup()
self.results_url = self.router.launch_test()
self.log.info("Started cloud test: %s", self.results_url)
if self.results_url:
if self.browser_open in ('start', 'both'):
open_browser(self.results_url)
if self.user.token and self.public_report:
public_link = self.router.master.make_report_public()
self.log.info("Public report link: %s", public_link)
if self.report_name:
self.router.master.set({"name": str(self.report_name)})
def _should_skip_check(self):
now = time.time()
if self._last_check_time is None:
return False
elif now >= self._last_check_time + self.check_interval:
return False
else:
return True
def check(self):
if self.detach:
self.log.warning('Detaching Taurus from started test...')
return True
if self._should_skip_check():
self.log.debug("Skipping cloud status check")
return False
self._last_check_time = time.time()
master = self._check_master_status()
if "status" in master and master['status'] != self.__last_master_status:
self.__last_master_status = master['status']
self.log.info("Cloud test status: %s", self.__last_master_status)
if self.results_reader is not None and 'progress' in master and master['progress'] >= 100:
self.results_reader.master = self.router.master
if 'progress' in master and master['progress'] > 100:
self.log.info("Test was stopped in the cloud: %s", master['status'])
self.test_ended = True
return True
self.router.start_if_ready()
self.widget.update()
return super(CloudProvisioning, self).check()
@get_with_retry
def _check_master_status(self):
return self.router.get_master_status()
def post_process(self):
if not self.detach and self.router and not self.test_ended:
self.router.stop_test()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
if self.router and self.router.master:
full = self.router.master.get_full()
if 'note' in full and full['note']:
self.log.warning("Cloud test has probably failed with message: %s", full['note'])
for session in full.get('sessions', ()):
for error in session.get("errors", ()):
raise TaurusException(to_json(error))
# if we have captured HARs, let's download them
for service in self.engine.config.get(Service.SERV, []):
# not good to reproduce what is done inside engine
# but no good way to get knowledge of the service in config
if not isinstance(service, dict):
service = {"module": service}
mod = service.get('module', TaurusConfigError("No 'module' specified for service"))
assert isinstance(mod, str)
module = self.engine.instantiate_module(mod)
if isinstance(module, ServiceStubCaptureHAR):
self._download_logs()
break
if "functionalSummary" in full:
summary = full["functionalSummary"]
if summary is None or summary.get("isFailed", False):
raise AutomatedShutdown("Cloud tests failed")
def _download_logs(self):
for session in self.router.master.sessions():
assert isinstance(session, Session)
for log in session.get_logs():
self.log.info("Downloading %s from the cloud", log['filename'])
cloud_dir = os.path.join(self.engine.artifacts_dir, 'cloud-artifacts')
if not os.path.exists(cloud_dir):
os.makedirs(cloud_dir)
dest = os.path.join(cloud_dir, log['filename'])
dwn = ExceptionalDownloader()
with ProgressBarContext() as pbar:
try:
dwn.get(log['dataUrl'], dest, reporthook=pbar.download_callback)
except BaseException:
self.log.debug("Error is: %s", traceback.format_exc())
self.log.warning("Failed to download from %s", log['dataUrl'])
continue
if log['filename'].startswith('artifacts') and log['filename'].endswith('.zip'):
with zipfile.ZipFile(dest) as zipf:
for name in zipf.namelist():
ext = name.split('.')[-1].lower()
if ext in ('har', 'jpg', 'js', 'html', 'css'):
self.log.debug("Extracting %s to %s", name, cloud_dir)
zipf.extract(name, cloud_dir)
def get_widget(self):
if not self.widget:
self.widget = CloudProvWidget(self.router)
return self.widget
class ResultsFromBZA(ResultsProvider):
"""
:type master: bzt.bza.Master
"""
def __init__(self, master=None):
super(ResultsFromBZA, self).__init__()
self.master = master
self.min_ts = 0
self.log = logging.getLogger('')
self.prev_errors = BetterDict()
self.cur_errors = BetterDict()
self.handle_errors = True
def _get_err_diff(self):
# find diff of self.prev_errors and self.cur_errors
diff = {}
for label in self.cur_errors:
if label not in self.prev_errors:
diff[label] = self.cur_errors[label]
continue
for msg in self.cur_errors[label]:
if msg not in self.prev_errors[label]:
prev_count = 0
else:
prev_count = self.prev_errors[label][msg]['count']
delta = self.cur_errors[label][msg]['count'] - prev_count
if delta > 0:
if label not in diff:
diff[label] = {}
diff[label][msg] = {'count': delta, 'rc': self.cur_errors[label][msg]['rc']}
return diff
def _calculate_datapoints(self, final_pass=False):
if self.master is None:
return
data, aggr_raw = self.query_data()
aggr = {}
for label in aggr_raw:
aggr[label['labelName']] = label
for label in data:
if label.get('kpis') and not final_pass:
label['kpis'].pop(-1) # never take last second since it could be incomplete
timestamps = []
for label in data:
if label.get('label') == 'ALL':
timestamps.extend([kpi['ts'] for kpi in label.get('kpis', [])])
self.handle_errors = True
for tstmp in timestamps:
point = DataPoint(tstmp)
for label in data:
for kpi in label.get('kpis', []):
if kpi['ts'] != tstmp:
continue
label_str = label.get('label')
if label_str is None or label_str not in aggr:
self.log.warning("Skipping inconsistent data from API for label: %s", label_str)
continue
kpiset = self.__get_kpiset(aggr, kpi, label_str)
point[DataPoint.CURRENT]['' if label_str == 'ALL' else label_str] = kpiset
if self.handle_errors:
self.handle_errors = False
self.cur_errors = self.__get_errors_from_bza()
err_diff = self._get_err_diff()
if err_diff:
for label in err_diff:
point_label = '' if label == 'ALL' else label
kpiset = point[DataPoint.CURRENT].get(point_label, KPISet(), force_set=True)
kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label])
self.prev_errors = self.cur_errors
point.recalculate()
self.min_ts = point[DataPoint.TIMESTAMP] + 1
yield point
def __get_errors_from_bza(self):
#
# This method reads error report from BZA
#
# internal errors format:
# <request_label>:
# <error_message>:
# 'count': <count of errors>
# 'rc': <response code>
#
result = {}
try:
errors = self.master.get_errors()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get errors, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
errors = self.master.get_errors()
self.log.info("Succeeded with retry")
for e_record in errors:
_id = e_record["_id"]
if _id == "ALL":
_id = ""
result[_id] = {}
for error in e_record['errors']:
result[_id][error['m']] = {'count': error['count'], 'rc': error['rc']}
for assertion in e_record['assertions']:
result[_id][assertion['failureMessage']] = {'count': assertion['failures'], 'rc': assertion['name']}
return result
def __get_kpi_errors(self, errors):
result = []
for msg in errors:
kpi_error = KPISet.error_item_skel(
error=msg,
ret_c=errors[msg]['rc'],
cnt=errors[msg]['count'],
errtype=KPISet.ERRTYPE_ERROR, # TODO: what about asserts?
urls=Counter())
result.append(kpi_error)
return result
def __get_kpiset(self, aggr, kpi, label):
kpiset = KPISet()
kpiset[KPISet.FAILURES] = kpi['ec']
kpiset[KPISet.CONCURRENCY] = kpi['na']
kpiset[KPISet.SAMPLE_COUNT] = kpi['n']
kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0
kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0
perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0}
for field, level in iteritems(perc_map):
kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0
return kpiset
def query_data(self):
try:
data = self.master.get_kpis(self.min_ts)
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
data = self.master.get_kpis(self.min_ts)
self.log.info("Succeeded with retry")
try:
aggr = self.master.get_aggregate_report()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get aggregate results, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
aggr = self.master.get_aggregate_report()
self.log.info("Succeeded with retry")
return data, aggr
class FunctionalBZAReader(FunctionalResultsReader):
def __init__(self, parent_log, master=None):
super(FunctionalBZAReader, self).__init__()
self.master = master
self.log = parent_log.getChild(self.__class__.__name__)
@staticmethod
def extract_samples_from_group(group, group_summary):
group_name = group_summary.get("name") or "Tests"
for sample in group["samples"]:
status = "PASSED"
if sample["error"]:
status = "FAILED"
error_msg = ""
error_trace = ""
assertions = sample.get("assertions")
if assertions:
for assertion in assertions:
if assertion.get("isFailed"):
error_msg = assertion.get("errorMessage")
status = "BROKEN"
rtm = sample.get("responseTime") or 0.0
yield FunctionalSample(
test_case=sample["label"],
test_suite=group_name,
status=status,
start_time=int(sample["created"]),
duration=rtm / 1000.0,
error_msg=error_msg,
error_trace=error_trace,
extras={},
subsamples=[],
)
def read(self, last_pass=False):
if self.master is None:
return
if last_pass:
try:
groups = self.master.get_functional_report_groups()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get test groups, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
groups = self.master.get_functional_report_groups()
self.log.info("Succeeded with retry")
for group_summary in groups:
group_id = group_summary['groupId']
try:
group = self.master.get_functional_report_group(group_id)
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get test group, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
group = self.master.get_functional_report_group(group_id)
self.log.info("Succeeded with retry")
for sample in self.extract_samples_from_group(group, group_summary):
yield sample
class CloudProvWidget(Pile, PrioritizedWidget):
def __init__(self, test):
"""
:type test: BaseCloudTest
"""
self.test = test
self.text = Text("")
super(CloudProvWidget, self).__init__([self.text])
PrioritizedWidget.__init__(self)
def update(self):
txt = self.test.get_test_status_text()
if txt:
self.text.set_text(txt)
class ServiceStubScreenshoter(Service):
def startup(self):
if not isinstance(self.engine.provisioning, CloudProvisioning):
self.log.warning("Stub for service 'screenshoter', use cloud provisioning to have it working")
class ServiceStubCaptureHAR(Service):
def startup(self):
if not isinstance(self.engine.provisioning, CloudProvisioning):
self.log.warning("Stub for service 'capturehar', use cloud provisioning to have it working")
| 1 | 14,809 | Not really important in this PR, but does BlazeMeter describe "progress" values anywhere? It might be nice to have a set of constants like `PROGRESS_DOWNLOADING_IMAGE`, `PROGRESS_BOOTING`, `PROGRESS_RUNNING_TEST`, etc in our BZA client. | Blazemeter-taurus | py |
@@ -124,6 +124,14 @@ public interface ContentFile<F> {
*/
List<Integer> equalityFieldIds();
+ /**
+ * Returns the sort order id of this file, which describes how the file is ordered.
+ * This information will be useful for merging data and equality delete files more efficiently
+ * when they share the same sort order id.
+ */
+ default Integer sortOrderId() {
+ return null;
+ }
/**
* Copies this file. Manifest readers can reuse file instances; use | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
/**
* Superinterface of {@link DataFile} and {@link DeleteFile} that exposes common methods.
*
* @param <F> the concrete Java class of a ContentFile instance.
*/
public interface ContentFile<F> {
/**
* Returns the ordinal position of the file in a manifest, or null if it was not read from a manifest.
*/
Long pos();
/**
* Returns id of the partition spec used for partition metadata.
*/
int specId();
/**
* Returns type of content stored in the file; one of DATA, POSITION_DELETES, or EQUALITY_DELETES.
*/
FileContent content();
/**
* Returns fully qualified path to the file, suitable for constructing a Hadoop Path.
*/
CharSequence path();
/**
* Returns format of the file.
*/
FileFormat format();
/**
* Returns partition for this file as a {@link StructLike}.
*/
StructLike partition();
/**
* Returns the number of top-level records in the file.
*/
long recordCount();
/**
* Returns the file size in bytes.
*/
long fileSizeInBytes();
/**
* Returns if collected, map from column ID to the size of the column in bytes, null otherwise.
*/
Map<Integer, Long> columnSizes();
/**
* Returns if collected, map from column ID to the count of its non-null values, null otherwise.
*/
Map<Integer, Long> valueCounts();
/**
* Returns if collected, map from column ID to its null value count, null otherwise.
*/
Map<Integer, Long> nullValueCounts();
/**
* Returns if collected, map from column ID to its NaN value count, null otherwise.
*/
Map<Integer, Long> nanValueCounts();
/**
* Returns if collected, map from column ID to value lower bounds, null otherwise.
*/
Map<Integer, ByteBuffer> lowerBounds();
/**
* Returns if collected, map from column ID to value upper bounds, null otherwise.
*/
Map<Integer, ByteBuffer> upperBounds();
/**
* Returns metadata about how this file is encrypted, or null if the file is stored in plain text.
*/
ByteBuffer keyMetadata();
/**
* Returns list of recommended split locations, if applicable, null otherwise.
* <p>
* When available, this information is used for planning scan tasks whose boundaries
* are determined by these offsets. The returned list must be sorted in ascending order.
*/
List<Long> splitOffsets();
/**
* Returns the set of field IDs used for equality comparison, in equality delete files.
* <p>
* An equality delete file may contain additional data fields that are not used by equality
* comparison. The subset of columns in a delete file to be used in equality comparison are
* tracked by ID. Extra columns can be used to reconstruct changes and metrics from extra
* columns are used during job planning.
*
* @return IDs of the fields used in equality comparison with the records in this delete file
*/
List<Integer> equalityFieldIds();
/**
* Copies this file. Manifest readers can reuse file instances; use
* this method to copy data when collecting files from tasks.
*
* @return a copy of this data file
*/
F copy();
/**
* Copies this file without file stats. Manifest readers can reuse file instances; use
* this method to copy data without stats when collecting files.
*
* @return a copy of this data file, without lower bounds, upper bounds, value counts,
* null value counts, or nan value counts
*/
F copyWithoutStats();
}
| 1 | 30,850 | nit: `<p>` after the line | apache-iceberg | java |
@@ -228,6 +228,7 @@ setup(
("libArm64/%s"%version, glob("libArm64/*.dll") + glob("libArm64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
+ ("fonts", glob("fonts/*.ttf")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", glob("../miscDeps/python/*.dll")), | 1 | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import copy
import gettext
gettext.install("nvda")
from setuptools import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import distutils_buildexe
from py2exe.dllfinder import DllFinder
import wx
import importlib.machinery
RT_MANIFEST = 24
manifest_template = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="%(uiAccess)s"
/>
</requestedPrivileges>
</security>
</trustInfo>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 7 -->
<supportedOS
Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"
/>
<!-- Windows 8 -->
<supportedOS
Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"
/>
<!-- Windows 8.1 -->
<supportedOS
Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"
/>
<!-- Windows 10 -->
<supportedOS
Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"
/>
</application>
</compatibility>
</assembly>
"""
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
orig_determine_dll_type = DllFinder.determine_dll_type
def determine_dll_type(self, imagename):
dll = os.path.basename(imagename).lower()
if dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return None
return orig_determine_dll_type(self, imagename)
DllFinder.determine_dll_type = determine_dll_type
class py2exe(distutils_buildexe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable and EOA proxy
* Add a manifest to the executables
"""
user_options = distutils_buildexe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
super(py2exe, self).initialize_options()
self.enable_uiAccess = False
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target['uiAccess'] = True
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target['uiAccess'] = True
# Add a manifest resource to every target at runtime.
for target in dist.windows:
target["other_resources"] = [
(
RT_MANIFEST,
1,
(manifest_template % dict(uiAccess=target['uiAccess'])).encode("utf-8")
),
]
super(py2exe, self).run()
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uiAccess": False,
"icon_resources":[(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description":"NVDA application",
"product_name":name,
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"uiAccess": False,
"icon_resources": [(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description": name,
"product_name":name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uiAccess": False,
"icon_resources": [(1,"images/nvda.ico")],
"other_resources": [], # Populated at run time
"version":formatBuildVersionString(),
"description": "NVDA Ease of Access proxy",
"product_name":name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["tkinter",
"serial.loopback_connection",
"serial.rfc2217",
"serial.serialcli",
"serial.serialjava",
"serial.serialposix",
"serial.socket_connection",
# netbios (from pywin32) is optionally used by Python3's uuid module.
# This is not needed.
# We also need to exclude win32wnet explicitly.
"netbios",
"win32wnet",
# winxptheme is optionally used by wx.lib.agw.aui.
# We don't need this.
"winxptheme",
],
"packages": [
"NVDAObjects",
"virtualBuffers",
"appModules",
"comInterfaces",
"brailleDisplayDrivers",
"synthDrivers",
"visionEnhancementProviders",
],
"includes": [
"nvdaBuiltin",
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
"bisect",
],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("libArm64/%s"%version, glob("libArm64/*.dll") + glob("libArm64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", glob("../miscDeps/python/*.dll")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=tuple(
"*%s" % ext
for ext in importlib.machinery.SOURCE_SUFFIXES + importlib.machinery.BYTECODE_SUFFIXES
) + (
"*.exp",
"*.lib",
"*.pdb",
"__pycache__"
))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers",
excludes=tuple(
"*%s" % ext
for ext in importlib.machinery.SOURCE_SUFFIXES + importlib.machinery.BYTECODE_SUFFIXES
) + (
"__pycache__",
))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| 1 | 28,270 | Should we include the files with otf extension here, too? If not, I wonder why we do allow them in the source but we don't include them as per the setup | nvaccess-nvda | py |
@@ -33,9 +33,6 @@ func (uq *uniQueue) enque(blk *peerBlock) {
}
func (uq *uniQueue) dequeAll() []*peerBlock {
- if len(uq.blocks) == 0 {
- return nil
- }
blks := uq.blocks
uq.blocks = []*peerBlock{}
uq.hashes = map[hash.Hash256]bool{} | 1 | // Copyright (c) 2021 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blocksync
import (
"github.com/iotexproject/go-pkgs/hash"
)
// uniQueue is not threadsafe
type uniQueue struct {
blocks []*peerBlock
hashes map[hash.Hash256]bool
}
func newUniQueue() *uniQueue {
return &uniQueue{
blocks: []*peerBlock{},
hashes: map[hash.Hash256]bool{},
}
}
func (uq *uniQueue) enque(blk *peerBlock) {
h := blk.block.HashBlock()
if _, ok := uq.hashes[h]; ok {
return
}
uq.hashes[h] = true
uq.blocks = append(uq.blocks, blk)
}
func (uq *uniQueue) dequeAll() []*peerBlock {
if len(uq.blocks) == 0 {
return nil
}
blks := uq.blocks
uq.blocks = []*peerBlock{}
uq.hashes = map[hash.Hash256]bool{}
return blks
}
| 1 | 24,711 | i think it's OK to leave this? | iotexproject-iotex-core | go |
@@ -30,7 +30,7 @@ import org.apache.flink.table.types.logical.SymbolType;
import org.apache.flink.table.types.logical.YearMonthIntervalType;
import org.apache.flink.table.types.logical.ZonedTimestampType;
-abstract class FlinkTypeVisitor<T> implements LogicalTypeVisitor<T> {
+public abstract class FlinkTypeVisitor<T> implements LogicalTypeVisitor<T> {
// ------------------------- Unsupported types ------------------------------
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import org.apache.flink.table.types.logical.DayTimeIntervalType;
import org.apache.flink.table.types.logical.DistinctType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeVisitor;
import org.apache.flink.table.types.logical.NullType;
import org.apache.flink.table.types.logical.RawType;
import org.apache.flink.table.types.logical.StructuredType;
import org.apache.flink.table.types.logical.SymbolType;
import org.apache.flink.table.types.logical.YearMonthIntervalType;
import org.apache.flink.table.types.logical.ZonedTimestampType;
abstract class FlinkTypeVisitor<T> implements LogicalTypeVisitor<T> {
// ------------------------- Unsupported types ------------------------------
@Override
public T visit(ZonedTimestampType zonedTimestampType) {
throw new UnsupportedOperationException("Unsupported ZonedTimestampType.");
}
@Override
public T visit(YearMonthIntervalType yearMonthIntervalType) {
throw new UnsupportedOperationException("Unsupported YearMonthIntervalType.");
}
@Override
public T visit(DayTimeIntervalType dayTimeIntervalType) {
throw new UnsupportedOperationException("Unsupported DayTimeIntervalType.");
}
@Override
public T visit(DistinctType distinctType) {
throw new UnsupportedOperationException("Unsupported DistinctType.");
}
@Override
public T visit(StructuredType structuredType) {
throw new UnsupportedOperationException("Unsupported StructuredType.");
}
@Override
public T visit(NullType nullType) {
throw new UnsupportedOperationException("Unsupported NullType.");
}
@Override
public T visit(RawType<?> rawType) {
throw new UnsupportedOperationException("Unsupported RawType.");
}
@Override
public T visit(SymbolType<?> symbolType) {
throw new UnsupportedOperationException("Unsupported SymbolType.");
}
@Override
public T visit(LogicalType other) {
throw new UnsupportedOperationException("Unsupported type: " + other);
}
}
| 1 | 22,024 | Does this need to be public? The only reference to `FlinkTypeVisitor` that I see in this PR is here, so I'm not sure why this is needed. | apache-iceberg | java |
@@ -3,11 +3,11 @@ WELCOME_DIALOG_TEXT = (
"down the NVDA key while pressing other keys. By default, the numpad Insert and main Insert keys "
"may both be used as the NVDA key. You can also configure NVDA to use the Caps Lock as the NVDA "
"key. Press NVDA plus n at any time to activate the NVDA menu. From this menu, you can configure "
- "NVDA, get help and access other NVDA functions.\n"
- "Options grouping\n"
+ "NVDA, get help and access other NVDA functions. \n"
+ "Options grouping \n"
"Keyboard layout: combo box desktop collapsed Alt plus k"
)
QUIT_DIALOG_TEXT = (
- "Exit NVDA dialog\n"
+ "Exit NVDA dialog \n"
"What would you like to do? combo box Exit collapsed Alt plus d"
) | 1 | WELCOME_DIALOG_TEXT = (
"Welcome to NVDA dialog Welcome to NVDA! Most commands for controlling NVDA require you to hold "
"down the NVDA key while pressing other keys. By default, the numpad Insert and main Insert keys "
"may both be used as the NVDA key. You can also configure NVDA to use the Caps Lock as the NVDA "
"key. Press NVDA plus n at any time to activate the NVDA menu. From this menu, you can configure "
"NVDA, get help and access other NVDA functions.\n"
"Options grouping\n"
"Keyboard layout: combo box desktop collapsed Alt plus k"
)
QUIT_DIALOG_TEXT = (
"Exit NVDA dialog\n"
"What would you like to do? combo box Exit collapsed Alt plus d"
) | 1 | 27,746 | No line at end of file warning | nvaccess-nvda | py |
@@ -24,6 +24,8 @@ namespace Nethermind.Blockchain.Processing
{
public class OneTimeChainProcessor : IBlockchainProcessor
{
+ public CompositeBlockTracerFactory BlockTracerFactory { get; } = new();
+
private readonly IBlockchainProcessor _processor;
private readonly IReadOnlyDbProvider _readOnlyDbProvider;
| 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Db;
using Nethermind.Evm.Tracing;
namespace Nethermind.Blockchain.Processing
{
public class OneTimeChainProcessor : IBlockchainProcessor
{
private readonly IBlockchainProcessor _processor;
private readonly IReadOnlyDbProvider _readOnlyDbProvider;
private object _lock = new();
public OneTimeChainProcessor(IReadOnlyDbProvider readOnlyDbProvider, IBlockchainProcessor processor)
{
_readOnlyDbProvider = readOnlyDbProvider ?? throw new ArgumentNullException(nameof(readOnlyDbProvider));
_processor = processor ?? throw new ArgumentNullException(nameof(processor));
}
public void Start()
{
_processor.Start();
}
public Task StopAsync(bool processRemainingBlocks = false)
{
return _processor.StopAsync(processRemainingBlocks);
}
public Block Process(Block block, ProcessingOptions options, IBlockTracer tracer)
{
lock (_lock)
{
Block result;
try
{
result = _processor.Process(block, options, tracer);
}
finally
{
_readOnlyDbProvider.ClearTempChanges();
}
return result;
}
}
public bool IsProcessingBlocks(ulong? maxProcessingInterval)
{
return _processor.IsProcessingBlocks(maxProcessingInterval);
}
public void Dispose()
{
_processor?.Dispose();
_readOnlyDbProvider?.Dispose();
}
}
}
| 1 | 25,270 | This looks unused, shouldn't it point to inner processor in any way? | NethermindEth-nethermind | .cs |
@@ -6995,11 +6995,6 @@ NABoolean RelRoot::isUpdatableBasic(NABoolean isView,
// QSTUFF
{
- // if child is a FirstN node, skip it.
- if ((child(0)->castToRelExpr()->getOperatorType() == REL_FIRST_N) &&
- (child(0)->child(0)))
- scan = (Scan *)child(0)->child(0)->castToRelExpr();
- else
scan = (Scan *)child(0)->castToRelExpr();
}
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: NormRelExpr.C
* Description: Relational expressions (both physical and logical operators)
* Methods related to the normalizer
*
* Created: 5/17/94
* Language: C++
*
*
*
*
******************************************************************************
*/
#include "Debug.h"
#include "Sqlcomp.h"
#include "GroupAttr.h"
#include "opt.h"
#include "NormWA.h"
#include "AllRelExpr.h"
#include "AllItemExpr.h"
#include "ValueDesc.h"
#include "Triggers.h"
#include "Cost.h"
#include "CostMethod.h"
#include "opt.h"
#include "RelGrby.h"
#include "ItemFunc.h"
#include "ControlDB.h"
#include "Analyzer.h"
#include "MultiJoin.h"
#include "CompException.h"
#include "ExpPCodeOptimizations.h"
#include <math.h>
#include "OptRange.h"
#include "ItemOther.h"
#include "ItemExpr.h"
#include "QRDescGenerator.h"
#ifndef TRANSFORM_DEBUG_DECL // artifact of NSK's OptAll.cpp ...
#define TRANSFORM_DEBUG_DECL
DBGDECLDBG( dbg; )
DBGDECL( static NAString unp; )
#endif
//----------------------------------------------------------------------
// static helper function: this is used by Join::transformNode() to decide
// to put the two vids in a VEG; currently this is checked for TSJs.
//----------------------------------------------------------------------
static NABoolean doTwoVidsReferToSameColumn(ValueId &vid, ValueId &vid1)
{
NAColumn *col = ((IndexColumn *) vid.getItemExpr())->getNAColumn();
NAColumn *col1 = ((IndexColumn *) vid1.getItemExpr())->getNAColumn();
if (col == NULL || col1 == NULL ) return FALSE;
if (col->getColName() != col1->getColName()) return FALSE;
if (col->getTableName(TRUE) == col1->getTableName(TRUE))
{
if (col->getTableName() == NULL)
return FALSE;
else
return TRUE;
}
return FALSE;
} // doTwoVidsReferToSameColumn()
static void tryToConvertFullOuterJoin(Join *fullOuterJoin, NormWA &normWARef)
{
NABoolean leftChildMerged, rightChildMerged = FALSE;
// Check if the left child's VEGRegion is merged
leftChildMerged = normWARef.locateVEGRegionAndCheckIfMerged(fullOuterJoin, 0 /* left child*/ );
// Check if the left child's VEGRegion is merged
rightChildMerged = normWARef.locateVEGRegionAndCheckIfMerged(fullOuterJoin, 1/* right child*/ );
// should not get here since we disable FOJ to inner in Join::bindNode() and
// should be removed when we support FOJ to inner
CMPASSERT(!leftChildMerged && !rightChildMerged);
if (leftChildMerged && !rightChildMerged)
fullOuterJoin->setOperatorType(REL_LEFT_JOIN);
if (rightChildMerged && !leftChildMerged)
fullOuterJoin->setOperatorType(REL_RIGHT_JOIN);
if (leftChildMerged && rightChildMerged)
fullOuterJoin->setOperatorType(REL_JOIN); // inner Join
switch (fullOuterJoin->getOperatorType())
{
case REL_LEFT_JOIN:
{
// This means the left child region (subtreeId = 0) is merged
// with the parent.
// Now merge the right child region (subtreeId = 1)
// and the join predicate region (subtreeId = 2)
VEGRegion * rightChildVEGRegion =
normWARef.locateVEGRegion(fullOuterJoin, 1/* right child*/ );
VEGRegion * joinPredVEGRegion =
normWARef.locateVEGRegion(fullOuterJoin, 2/* join predicate */ );
rightChildVEGRegion->mergeVEGRegion(joinPredVEGRegion);
VEGRegion *parentVEGRegion = rightChildVEGRegion->getParentVEGRegion();
CMPASSERT(parentVEGRegion); // MUST have a parent
parentVEGRegion->fixupZonesAfterFullToLeftConversion();
// We don't need to null instantiate the left rows anymore
fullOuterJoin->nullInstantiatedForRightJoinOutput().clear();
}
break;
case REL_RIGHT_JOIN:
break; // Hema TBD - need to flip
case REL_JOIN: //Inner Join
{
// TBD - Hema. Need some sort of assert that
// all the three regions (SubtreeId 0,1 & 2) are merged.
fullOuterJoin->selectionPred() += fullOuterJoin->joinPred();
fullOuterJoin->joinPred().clear();
}
break;
case REL_FULL_JOIN:
break; // do nothing. has not been transformed.
default:
ABORT("Internal error: tryToConvertFullOuterJoin()");
break;
}
}
// ***********************************************************************
// $$$$ RelExpr
// member functions for class RelExpr
// ***********************************************************************
// -----------------------------------------------------------------------
// RelExpr::transformNode()
// -----------------------------------------------------------------------
void RelExpr::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( locationOfPointerToMe.getPtr() == this );
if (nodeIsTransformed())
return;
markAsTransformed();
// ---------------------------------------------------------------------
// tranformNode takes up a bound tree and turns into a transformed
// tree. For a RelExpr that means the following.
// + expressions are transformed. If the expressions contain
// subqueries then new RelExpr are created for them and
// they are usually added above (as an ancestor) of the node
// that contained them.
// + predicates are pulled up from the children and their
// required inputs are modified
// + the required inputs of the node the node itself are changed
// from being a sufficient set to being a sufficient minimal
// set.
// ---------------------------------------------------------------------
Int32 arity = getArity();
// ---------------------------------------------------------------------
// Transform each child.
// Pull up their transformed predicates
// recompute their required inputs.
// ---------------------------------------------------------------------
for (Int32 i = 0; i < arity; i++)
{
// ---------------------------------------------------------------------
// Make values available to child
// ---------------------------------------------------------------------
child(i)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
child(i)->transformNode(normWARef, child(i));
// My child has now been transformed.
// A new semiJoin may now be my direct descendant and my original
// child a descendant of it.
// In either case my child has now been transformed.
};
// Pull up the predicates and recompute the required inputs
// of whoever my children are now.
pullUpPreds();
// transform the selection predicates
transformSelectPred(normWARef, locationOfPointerToMe);
} // RelExpr::transformNode()
// QSTUFF
// ***********************************************************************
// $$$$ RelExpr
// member functions for class RelExpr
// ***********************************************************************
// -----------------------------------------------------------------------
// RelExpr::checkReadWriteConflicts()
// -----------------------------------------------------------------------
RelExpr::rwErrorStatus RelExpr::checkReadWriteConflicts(NormWA & normWARef)
{
rwErrorStatus rc;
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++)
{
if ( (rc = child(i)->checkReadWriteConflicts(normWARef) ) != RWOKAY)
return rc;
}
return RWOKAY;
} // RelExpr::checkReadWriteConflicts()
// Build the MapValueIds node to sit on top of a GroupBy.
// This function is used by the SemanticQueryOptimization phase
// to insert a MapValueIds node on top of a GroupBy and a LeftJoin
// when a Join is converted to a LeftJoin during unnesting
// Also update the outputs of the MapValueIds node accordingly.
//------------------------------------------------------------------------------
MapValueIds * GroupByAgg::buildMapValueIdNode(ValueIdMap *map)
{
CollHeap* stmtHeap = CmpCommon::statementHeap() ;
// Create the MapValueIds node with the mapping.
MapValueIds *mapNode = new (stmtHeap) MapValueIds(this, *map);
// The inputs are same as for the child
mapNode->getGroupAttr()->addCharacteristicInputs(
getGroupAttr()->getCharacteristicInputs());
mapNode->primeGroupAttributes();
return mapNode;
} // GroupByAgg::buildMapValueIdNode()
// -----------------------------------------------------------------------
// RelRoot::checkReadWriteConflicts()
// -----------------------------------------------------------------------
RelExpr::rwErrorStatus RelRoot::checkReadWriteConflicts(NormWA & normWARef)
{
// checking is only done in the presence of embedded deletes and updates
if (!(getGroupAttr()->isEmbeddedUpdateOrDelete()) && isTrueRoot())
return RWOKAY;
rwErrorStatus rc;
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++)
{
if ( (rc = child(i)->checkReadWriteConflicts(normWARef) ) != RWOKAY)
return rc;
}
return RWOKAY;
} // RelRoot::checkReadWriteConflicts()
// -----------------------------------------------------------------------
// RelScan::checkReadWriteConflicts()
// -----------------------------------------------------------------------
RelExpr::rwErrorStatus Scan::checkReadWriteConflicts(NormWA & normWARef)
{
NAString fileName (
getTableDesc()->getNATable()->
getClusteringIndex()->getFileSetName().getQualifiedNameAsString(),
CmpCommon::statementHeap());
CollIndex i = 0;
for (i=0; i < normWARef.getWriteList().entries(); i++)
if (strcmp(normWARef.getWriteList()[i], fileName) == 0) {
*CmpCommon::diags() << DgSqlCode(-4152)
<< DgTableName(getTableDesc()->getNATable()->getTableName().getQualifiedNameAsAnsiString());
return RWERROR;
}
for (i=0; i < normWARef.getReadList().entries(); i++)
if (strcmp(normWARef.getReadList()[i], fileName) == 0) {
return RWOKAY;
}
normWARef.getReadList().insert(fileName);
return RWOKAY;
} // Scan::checkReadWriteConflicts()
// -----------------------------------------------------------------------
// GenericUpdate::checkReadWriteConflicts()
// -----------------------------------------------------------------------
RelExpr::rwErrorStatus GenericUpdate::checkReadWriteConflicts(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// This routine checks whether the same table is both read from and
// updated in the same query. This is done after transformation and binding
// to ensure that all inlining of operations already happened and removal
// as well as removal of structural nodes.
// ---------------------------------------------------------------------
NAString fileName(
getTableDesc()->getNATable()->
getClusteringIndex()->getFileSetName().getQualifiedNameAsString(),
CmpCommon::statementHeap());
CollIndex i = 0;
for ( i=0; i < normWARef.getReadList().entries(); i++)
if (strcmp(normWARef.getReadList()[i], fileName) == 0) {
*CmpCommon::diags() << DgSqlCode(-4152)
<< DgTableName(getTableDesc()->getNATable()->getTableName().getQualifiedNameAsAnsiString());
return RWERROR;
}
for ( i=0; i < normWARef.getWriteList().entries(); i++)
if (strcmp(normWARef.getWriteList()[i], fileName) == 0) {
return RWOKAY;
}
normWARef.getWriteList().insert(fileName);
return RWOKAY;
} // GenericUpdate::checkReadWriteConflicts()
// QSTUFF
// -----------------------------------------------------------------------
// Could/should be a ValueIdSet:: method.
// NEED A WAY TO GUARANTEE THAT THIS SET REPRESENTS AN *AND*ed LOGICAL SET,
// not an ITM_ITEM_LIST or other backboned set.
// This guarantee is true for the two RelExpr::transformSelectPred() callers
// in this file.
// -----------------------------------------------------------------------
static void applyTruthTable(ValueIdSet & vs)
{
// If this ValueIdSet is an ANDed set of value-items, i1 AND i2 AND ..., then:
// Remove any item which is a simple TRUE:
// il..AND TRUE AND..ir => il AND ir
// If any item is a simple FALSE, ignore all other items:
// il..AND FALSE AND..ir => FALSE
for (ValueId vid = vs.init(); vs.next(vid); vs.advance(vid))
{
OperatorTypeEnum op = vid.getItemExpr()->getOperatorType();
if (op == ITM_RETURN_TRUE)
vs -= vid;
else if (op == ITM_RETURN_FALSE)
{
vs.clear();
vs += vid;
break;
}
}
}
// Breadth First Traversal to print the transformed and source tree.
Int32 printTree(ItemExpr *ptrToTree,ItemExpr *parent, Int32 depth, Int32 l1)
{
if (ptrToTree != NULL)
{
Int32 left, right;
if (depth == 0)
{
if(l1 == 0)
cout << "root ";
if(l1 == 1)
cout << "left child ";
if(l1 == 2)
cout << "right child ";
cout << " ValueId: " << ptrToTree->getValueId() << " Value:" << ptrToTree->getText() << " Parent ValueId: " << parent->getValueId() << " Parent Value:" << parent->getText() << endl;
return 1;
}
left = printTree(ptrToTree->child(0),ptrToTree, depth - 1, 1);
right = printTree(ptrToTree->child(1),ptrToTree, depth - 1, 2);
return left || right;
}
return 0;
}
static ItemExpr* transformUnSupportedNotEqualTo(CollHeap *heap, ItemExpr* itemNotEqual)
{
if(itemNotEqual->getOperatorType() == ITM_NOT_EQUAL)
{
ItemExpr* newLeftNode = new (heap) BiRelat(ITM_LESS,itemNotEqual->child(0),itemNotEqual->child(1));
ItemExpr* newRightNode = new (heap) BiRelat(ITM_GREATER,itemNotEqual->child(0),itemNotEqual->child(1));
ItemExpr* result = new (heap) BiLogic(ITM_OR,newLeftNode,newRightNode);
result->synthTypeAndValueId();
return result;
}
return NULL;
}
// shrinkNewTree(argv[]):
// This function in turn calls union() and intersection() function on RangeSpec object, through
// the wrapper object RangeSpecRef->getRangeObject() to club the values together.
// getRangeObject() call gives RangeSpec object.
// This gets called while performing
// (1) an "OR" operation between an RangeSpecRef ItemExpression
// (operator type =ITM_RANGE_SPEC_FUNC) and an OR'ed Set (operator type =ITM_OR)
// (2) an "AND" operation between an RangeSpecRef ItemExpression
// (operator type =ITM_RANGE_SPEC_FUNC) and an OR'ed Set (operator type =ITM_AND)
// (3) an "AND/OR" operation between an RangeSpecRef ItemExpression
// (operator type =ITM_RANGE_SPEC_FUNC) and an RangeSpecRef ItemExpression (operator type =ITM_RANGE_SPEC_FUNC)
//
// Simple usage example: where a = 10 or b = 20 or a=30;
//
// Step 1:
// Or'ed Set = (RangeSpecRef(a=10),RangeSpecRef(b=20))
// RangeSpecRef = (a=30)
// Step 2:
// Or'ed Set = (RangeSpecRef(a=10,a=30),RangeSpecRef(b=20))
// Step 3: returns true;
//
//
// argv[] ={ OperatorTypeEnum op, ItemExpr *ptrToNewTree, RangeSpecRef* xref}
// argv[0] -> needed since union and insersection on a RangeSpec object is determined by this parameter:
// -> { ITM_OR, ITM_AND }
// argv[1] -> { OR'ed set, AND'ed Set} where OR'ed set = { RangeSpecRef(a), RangeSpecRef(b), ..}
// OR'ed set and AND'ed set can only exist if it is more than one column, the name "set" represents that
// argv[2] -> xref = Xternal, which is the RangeSpecRef object needs to be merged
// argv[3] -> normWARef, the Normalizer work area, passed on to getRangeItemExpr()
// so the range ItemExpr can be normalized.
// shrinkNewTree() returns (true/false) -> Boolean instead of void is needed to optimize the traversal of the Tree.
NABoolean Scan::shrinkNewTree(OperatorTypeEnum op, ItemExpr *ptrToNewTree,
RangeSpecRef* xref, NormWA& normWARef)
{
NABoolean status = false;
// Need to work on the leaf's of ptrToNewTree tree,
// which is of RangeSpecRef(ITM_RANGE_SPEC_FUNC) type object
if (ptrToNewTree->getOperatorType() == ITM_RANGE_SPEC_FUNC)
{
RangeSpecRef* rangeIE = (RangeSpecRef *)ptrToNewTree;
OptNormRangeSpec* destObj = rangeIE->getRangeObject();
CMPASSERT(destObj != NULL);
if (op == ITM_OR)
{
if (destObj->getRangeExpr()->getValueId() ==
xref->getRangeObject()->getRangeExpr()->getValueId())
{
destObj->unionRange(xref->getRangeObject());
rangeIE->setChild(1, const_cast<ItemExpr*>
(destObj->getRangeItemExpr(&normWARef)));
status = true;
}
}
else if( op == ITM_AND)
{
if (destObj->getRangeExpr()->getValueId() ==
xref->getRangeObject()->getRangeExpr()->getValueId())
{
destObj->intersectRange(xref->getRangeObject());
rangeIE->setChild(1, const_cast<ItemExpr*>
(destObj->getRangeItemExpr(&normWARef)));
status = true;
}
}
}
else
{
// Internal and's for ROOT as or which can't be converted and vice versa.
// for the above tree, op = ITM_OR which is the root
// whenever we first hit an "AND'ed set" for the OR root
// we don't traverse the AND'ed set formed.
// since Or'ed set and And'ed set are disjoint.
// For a tree like the following, we dont traverse the cut(s) while
// merging the RangeSpecRef with New ItemExpression:
// Or
// / \
// / RangeSpecRef(a(4))
// or'ed set
// / \/
// / / \
// / / and'ed set
// / cut / \
// RangeSpecRef RangeSpecRef
// / (b,BiRel(=1)) (c,BiRel(=3))
// /
// RangeSpecRef
// (leftchild=a,
// rightchild=Reconstructed ItemExpression ( for values{1,2}) i.e. or
// \ cut(we don't traverse) /\
// \ / / \
// / \ a=1 a=2
// / \
// and'ed set
// / \
// / \
// RangeSpecRef(a,BiRel(=4))RangeSpecRef(b,BiRel(=6))
if (op == ptrToNewTree->getOperatorType() )
{
// Traverse Left of OR'ed set or AND'ed set
status = shrinkNewTree(op,ptrToNewTree->child(0),xref,normWARef);
// Optimization: if(!status)
// No need to traverse the right child of the tree,
// since already found the match.
if(!status)
status = shrinkNewTree(op,ptrToNewTree->child(1),xref,normWARef);
}
}
return status;
}
#define AVR_STATE0 0
#define AVR_STATE1 1
#define AVR_STATE2 2
ItemExpr * Scan::applyAssociativityAndCommutativity(
QRDescGenerator *descGenerator,
CollHeap *heap,
ItemExpr *origPtrToOldTree,
NormWA& normWARef,
NABoolean& transformationStatus)
{
if( CmpCommon::getDefault(SUBSTRING_TRANSFORMATION) != DF_OFF )
return origPtrToOldTree;
ItemExpr * newLeftNode = NULL ;
ItemExpr * newRightNode = NULL ;
ItemExpr * newNode = NULL ;
ItemExpr * ptrToOldTree = NULL ;
//
// applyAssociativityAndCommutativity() used to be called recursively not just
// for all the items in an expression but for all the items in the node
// tree for an entire query. Consequently, we must eliminate the recursive
// calls to applyAssociativityAndCommutativity() by keeping the
// information needed by each "recursive" level in the HEAP and using
// a "while" loop to look at each node in the tree in the same order as
// the old recursive technique would have done.
// The information needed by each "recursive" level is basically just
// * a pointer to what node (ItemExpr *) to look at next,
// * a "state" value that tells us where we are in the
// applyAssociativityAndCommutativity() code for the ItemExpr node
// that we are currently working on, and
// * a pointer to the new left node (from the "recursive" call on child(0))
// which we need to have available *after* recursing down the child(1) tree.
// NOTE: We don't have to keep the ptr to the new right node in a similar
// fashion because the code does not assign to 'newRightNode' until *after*
// all recursion is finished.
//
ARRAY( ItemExpr * ) IEarray(heap, 10) ; //Initially 10 elements (no particular reason to choose 10)
ARRAY( Int16 ) state(heap, 10) ; //These ARRAYs will grow automatically as needed.)
ARRAY( ItemExpr *) leftNodeArray(heap, 10);
Int32 currIdx = 0;
IEarray.insertAt( currIdx, origPtrToOldTree );
state.insertAt( currIdx, AVR_STATE0 );
// if(ptrToOldTree->getOperatorType() == ITM_NOT_EQUAL)
// ptrToOldTree = transformUnSupportedNotEqualTo(heap,ptrToOldTree);
while ( currIdx >= 0 )
{
ptrToOldTree = IEarray[currIdx] ;
// Convert the expression to a rangespec immediately under any of the following
// conditions:
// 1) The expression is a leaf predicate (not an AND or OR).
// 2) The expressions is rooted by an OR node that is derived from an in-list.
// This is guaranteed to be an OR backbone of conditions on the same
// column/expr, and can be handled by createRangeSpec() without the overhead
// of recursing through applyAssociativityAndCommutativity(), which incurs
// a massive usage of memory for a large in-list. See bug #3248.
// 3) The expression has already undergone rangespec conversion.
if((ptrToOldTree->getOperatorType() != ITM_AND &&
ptrToOldTree->getOperatorType() != ITM_OR)
||
(ptrToOldTree->getOperatorType() == ITM_OR &&
static_cast<BiLogic*>(ptrToOldTree)->createdFromINlist())
||
ptrToOldTree->isRangespecItemExpr())
{
OptNormRangeSpec* range = static_cast<OptNormRangeSpec*>(
OptRangeSpec::createRangeSpec(descGenerator,
ptrToOldTree,
heap,
TRUE));
// Transforms all Birel ItemExpression into RangeSpecRef ItemExpression
if( range != NULL)
{
RangeSpecRef *refrange = new (heap)
RangeSpecRef(ITM_RANGE_SPEC_FUNC,
range,
range->getRangeExpr(),
range->getRangeItemExpr(&normWARef));
transformationStatus = TRUE;
// Ensure that base column value ids are replaced by vegrefs (Bugzilla 2808).
refrange->getReplacementExpr()->normalizeNode(normWARef);
newNode = refrange ;
}
else
newNode = ptrToOldTree ;
}
else
{
// Recurse through for ITM_AND/ITM_OR
// depth first traversal
if ( state[currIdx] == AVR_STATE0 )
{
state.insertAt( currIdx, AVR_STATE1 ) ;
currIdx++ ; //"Recurse" down to child 0
state.insertAt( currIdx, AVR_STATE0 ) ; // and start that child's state at 0
IEarray.insertAt( currIdx, ptrToOldTree->child(0) ) ;
continue ;
}
else if ( state[currIdx] == AVR_STATE1 )
{
leftNodeArray.insertAt( currIdx, newNode ); //Save the "return value" from recursion
state.insertAt( currIdx, AVR_STATE2 ) ;
currIdx++ ; //"Recurse" down to child 1
state.insertAt( currIdx, AVR_STATE0 ) ; // and start that child's state at 0
IEarray.insertAt( currIdx, ptrToOldTree->child(1) ) ;
continue ;
}
else
{
newLeftNode = leftNodeArray[currIdx] ; //Restore 'newLeftNode'
state.insertAt( currIdx, AVR_STATE0 ) ; //Mark us as done with this IE
newRightNode = newNode ; // Set newRightNode = "return value" from recursion
}
// case OR:
if ((newLeftNode->getOperatorType() == ITM_RANGE_SPEC_FUNC) &&
(newRightNode->getOperatorType() == ITM_RANGE_SPEC_FUNC))
{
// where a = 10 or b =20
// where a = 10 or a =20
if(shrinkNewTree(ptrToOldTree->getOperatorType(),
newLeftNode, (RangeSpecRef *)newRightNode, normWARef))
{
newNode = (ItemExpr *)newLeftNode;
}
else
{
// where a = 10 or b =20
newNode = new (heap) BiLogic(ptrToOldTree->getOperatorType(),
(RangeSpecRef *)newLeftNode,
(RangeSpecRef *)newRightNode);
}
}
else if((newLeftNode->getOperatorType() == ptrToOldTree->getOperatorType())
&& (newRightNode->getOperatorType() == ITM_RANGE_SPEC_FUNC))
{
// where a = 10 or b =20 or a =30
// ored set = ((a=10),(b=20))
// we are merging anded set with rangespec (a=30)
// if shrinkNewTree() returns true then intervals are already merged in shrinkNewTree(),
// since matching columns are
// found in the ored set.
// else we add the rangespec into ored set.
if(!shrinkNewTree(ptrToOldTree->getOperatorType(),
newLeftNode,(RangeSpecRef *)newRightNode,normWARef))
{
newNode = new (heap) BiLogic(ptrToOldTree->getOperatorType(),
newLeftNode,
(RangeSpecRef *)newRightNode);
}
else
newNode = (ItemExpr *)newLeftNode;
}
// This condition is redundant, not able to formulate any query for this
// we can't generate tree like
// Or
// / \
// OrSet OrSet
else if((newLeftNode->getOperatorType() ==
ptrToOldTree->getOperatorType()) &&
(newRightNode->getOperatorType() ==
ptrToOldTree->getOperatorType()))
{
newNode = new (heap) BiLogic(ptrToOldTree->getOperatorType(),
newLeftNode,newRightNode);
}
else if ((newLeftNode->getOperatorType() == ITM_RANGE_SPEC_FUNC)
&& (newRightNode->getOperatorType() == ptrToOldTree->getOperatorType()))
{
// where a = 10 or b =20 or a =30
// ored set = ((a=10),(b=20))
// we are merging anded set with rangespec (a=30)
// if shrinkNewTree() returns true then intervals are already merged in shrinkNewTree(),
// since matching columns are
// found in the ored set.
// else we add the rangespec into ored set.
if(!shrinkNewTree(ptrToOldTree->getOperatorType(),
newRightNode,(RangeSpecRef *)newLeftNode,normWARef))
{
newNode = new (heap) BiLogic(ptrToOldTree->getOperatorType(),
(RangeSpecRef *)newLeftNode,
newRightNode);
}
else
newNode = (ItemExpr *)newRightNode;
}
else
{
newNode = new (heap) BiLogic(ptrToOldTree->getOperatorType(),
newLeftNode,newRightNode);
}
// If user had specified selectivity for original predicate,
// then apply the same to the new predicate as well.
if(ptrToOldTree->isSelectivitySetUsingHint())
{
if(newNode->getOperatorType() == ITM_RANGE_SPEC_FUNC)
{
newNode->child(1)->setSelectivitySetUsingHint();
newNode->child(1)->setSelectivityFactor(ptrToOldTree->getSelectivityFactor());
}
else
{
newNode->setSelectivitySetUsingHint();
newNode->setSelectivityFactor(ptrToOldTree->getSelectivityFactor());
}
}
CMPASSERT(newNode != NULL);
}
if ( state[currIdx] == AVR_STATE0 ) // if done with current ItemExpr
currIdx-- ; // then return to parent
}
return newNode;
}
// -----------------------------------------------------------------------
// RelExpr::transformSelectPred()
// Do the common steps in processing selection predicates
// -----------------------------------------------------------------------
void RelExpr::transformSelectPred(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
// ---------------------------------------------------------------------
// This is a common procedure for relExprs to process the subquery
// or Isolated UDFunction predicates in its select list. It sets up the
// required inputs and outputs of the RelExpr before adding new Join
// nodes above it. The subquery/UDFunction transformation logic needs the
// required inputs of the node to properly reflect the final
// (transformed) inputs and outputs.
// ---------------------------------------------------------------------
ValueIdSet subqueryOrIsolatedUDFunctionPredicates;
const NABoolean movePredicates = TRUE;
const NABoolean postJoinPredicates = TRUE;
// ---------------------------------------------------------------------
// Compute the potential inputs and outputs for the node before
// transforming the selectionPred so that the characteristic outputs
// are correct (they are needed by the subquery transformation).
// ---------------------------------------------------------------------
primeGroupAttributes();
// remove the subquery predicates from the select list
selectionPred().removeSubqueryOrIsolatedUDFunctionPredicates (
subqueryOrIsolatedUDFunctionPredicates);
// ---------------------------------------------------------------------
// Save the original inputs to use when the subquery predicates get
// transformed.
// ---------------------------------------------------------------------
ValueIdSet externalInputs = getGroupAttr()->getCharacteristicInputs();
// ---------------------------------------------------------------------
// Transform the remaining selection predicates.
// ---------------------------------------------------------------------
if (selectionPred().transformNode(normWARef, locationOfPointerToMe,
externalInputs, movePredicates,
postJoinPredicates))
{
// -----------------------------------------------------------------
// No subqueries should have been left here.
// -----------------------------------------------------------------
CMPASSERT(0);
}
applyTruthTable(selectionPred());
// ---------------------------------------------------------------------
// Transform the subquery predicates.
// ---------------------------------------------------------------------
// semiJoin's that are added should be added between me and my parent.
if (subqueryOrIsolatedUDFunctionPredicates.transformNode(normWARef,
locationOfPointerToMe,
externalInputs, movePredicates,
postJoinPredicates))
{
// -----------------------------------------------------------------
// The transformed subquery predicate requires values that are
// produced by the semiJoin above me.
// The transform predicate was moved there.
// -----------------------------------------------------------------
}
applyTruthTable(subqueryOrIsolatedUDFunctionPredicates);
// ---------------------------------------------------------------------
// Add the transform subquery predicates back to the selection predicates.
// Some subquery predicates transform into regular predicates
// e.g. EXISTS (SELECT MAX(t.a) FROM t) ==> TRUE
// ---------------------------------------------------------------------
selectionPred() += subqueryOrIsolatedUDFunctionPredicates;
// ---------------------------------------------------------------------
// If I am no longer the direct descendant of my parent then transform
// the usurper. During its transformation it may get a taste of its
// own medicine and stop becoming the direct descendant of my parent.
// ---------------------------------------------------------------------
if (locationOfPointerToMe != (const RelExpr *)this)
{
locationOfPointerToMe->transformNode(normWARef,
locationOfPointerToMe);
// ---------------------------------------------------------------------
// If this whole subquery is under an OR or inside a complicated expr,
// this flag has been set while the expr is transformed. This is done
// so that when the new join introduced and its right child are being
// transformed, we won't incorrectly use the selection predicates in
// the subquery to convert left join elsewhere into inner join. This
// has been achieved now, so resetting the flag.
// ---------------------------------------------------------------------
if (normWARef.subqUnderExprTree())
normWARef.restoreSubqUnderExprTreeFlag();
// We are on our way back from a number of transformNode()s.
// Let's just make sure that the final usurper got transformed
CMPASSERT( locationOfPointerToMe->nodeIsTransformed());
}
// If there is a selection predicate, we check to see if there are
// constant expressions in it, and we compute them, i.e. this is
// Constant Folding
ValueIdList foldPreds;
foldPreds = getSelectionPred();
if (!foldPreds.isEmpty()) {
NABoolean allTrue = foldPreds.constantFolding();
if (!foldPreds.isEmpty()) {
CMPASSERT(selPredTree() == NULL);
if (allTrue) foldPreds.clear();
setSelectionPredicates(foldPreds);
}
}
} // RelExpr::transformSelectPred()
// -----------------------------------------------------------------------
// RelExpr::pullUpPreds()
// Most operators transmit predicates to their parents as-is.
// -----------------------------------------------------------------------
void RelExpr::pullUpPreds()
{
// ---------------------------------------------------------------------
// This method is called on a RelExpr so that it can gather the
// predicates of its immediate children unto itself.
// It is a virtual function.
// PullUpPreds gets from all the children the predicates they
// can surrender is adds them to the local selectionPred()
// ---------------------------------------------------------------------
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++)
{
selectionPred() += child(i)->getSelectionPred();
child(i)->selectionPred().clear();
child(i)->recomputeOuterReferences();
};
// ---------------------------------------------------------------------
// WARNING: One rule that this procedure must follow is
// that recomputeOuterReferences() must be called on the children even
// if no predicates are pulled up from them. This is to correct
// the outer references that are added to a right child of a
// semi or outer join when processing subqueries in the ON clause.
// ---------------------------------------------------------------------
} // RelExpr::pullUpPreds()
// -----------------------------------------------------------------------
// RelExpr::recomputeOuterReferences()
// -----------------------------------------------------------------------
void RelExpr::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// This is virtual method on RelExpr.
// When this is called it is assumed that the children have already
// been transformed.
// The required inputs of the child are therefore already minimal
// and sufficient.
// It is also assumed that the RelExpr itself has been bound.
// That implies that the group attributes have already been allocated
// and the required inputs is a sufficient (but not necessarilly minimum)
// set of external values needed to evaluate all expressions in this subtree.
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// Remove from outerRefs those valueIds that are not needed
// by my selection predicate
selectionPred().weedOutUnreferenced(outerRefs);
// Add to outerRefs those that my children need.
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++)
{
outerRefs += child(i).getPtr()->getGroupAttr()->getCharacteristicInputs();
}
// set my Character Inputs to this new minimal set.
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // RelExpr::recomputeOuterReferences()
// -----------------------------------------------------------------------
// RelExpr::rewriteNode()
// -----------------------------------------------------------------------
void RelExpr::rewriteNode(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// Rewrite the expressions of each child.
// ---------------------------------------------------------------------
Int32 nc = getArity();
for (Int32 i = 0; i < nc; i++)
child(i)->rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Rewrite the expressions in the selection preidcates.
// ---------------------------------------------------------------------
if (selectionPred().normalizeNode(normWARef))
{
}
// ++MV
if (getUniqueColumns().normalizeNode(normWARef))
{
}
// --MV
// ---------------------------------------------------------------------
// Rewrite the expressions in the Group Attributes.
// ---------------------------------------------------------------------
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // RelExpr::rewriteNode()
// -----------------------------------------------------------------------
// RelExpr::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * RelExpr::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
Int32 arity = getArity();
// --------------------------------------------------------------------
// Check which expressions can be evaluated by my child.
// Modify the Group Attributes of those children who
// ---------------- inherit some of
// these expressions.
// ---------------------------------------------------------------------
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred());
// ---------------------------------------------------------------------
// Transform each child.
// ---------------------------------------------------------------------
for (Int32 i = 0; i < arity; i++)
child(i) = child(i)->normalizeNode(normWARef);
// The essential char. outputs of my child can be fully computed only
// when the essential char. outputs of my grandchildren are fully computed
// This is because one of the rules for computing essential char. outputs
// is : An ouput that is essential in my child will stay essential in me.
// This rule can be enforced in the bottom-up part of the tree walk, while
// pushDownCoveredExpr which computes outputs is performed in the top-down
// part. Therefore we need to call this method here to set up the essential
// outputs correctly. PushDownCoveredExpr also has this logic, in phases
// beyond the normalizer this method need not be called again, its only that
// for the first time the logic in pushDownCoveredExpr is not sufficient as
// the grandchildren don't have any essential outputs yet.
fixEssentialCharacteristicOutputs();
return this;
} // RelExpr::normalizeNode()
// -----------------------------------------------------------------------
// RelExpr::semanticQueryOptimizeNode()
// -----------------------------------------------------------------------
RelExpr * RelExpr::semanticQueryOptimizeNode(NormWA & normWARef)
{
Int32 arity = getArity();
// ---------------------------------------------------------------------
// SemanticQueryOptimize each child.
// ---------------------------------------------------------------------
for (Int32 i = 0; i < arity; i++)
child(i) = child(i)->semanticQueryOptimizeNode(normWARef);
return this;
} // RelExpr::semanticQueryOptimizeNode()
// -----------------------------------------------------------------------
// RelExpr::getMoreOutputsIfPossible()
// This method is recursive. It is capable of making a tree walk down from
// the RelExpr pointed to by "this" and promoting the outputs
// of the children of each node so that the "this" node has all columns
// required to produce the valueids in the parameter outputsNeeded.
// If all members of outputsNeeded cannot be produced due to the presence
// of some operator that does not allow outputs from children to flow through
// (like groupby or sequence) then this method returns FALSE.
// Currently, this method is used towards the end of unnesting a tsj node.
// As part of the unnesting process, the left child of the join is
// required to produce additional columns which have been identified
// as a unique set for the left sub-tree. Sometimes it is possible that
// the children of the Join's left child are not producing one or more
// members of this unique set.
// -----------------------------------------------------------------------
NABoolean RelExpr::getMoreOutputsIfPossible(ValueIdSet& outputsNeeded)
{
// no additional outputs are needed.
if (outputsNeeded.isEmpty())
return TRUE;
Int32 i, nc ;
ValueIdSet tempSet, potentialOutputsFromChildren, newOutputsNeeded ;
ValueIdSet emptySet,coveredExprs, coveredSubExprs;
GroupAttributes fakeGA;
NABoolean gotOutputsNeeded = FALSE ;
// in the top down part of the tree-walk check if the children of
// current node can produce the required outputs, if so the tree
// walk need not proceed any futher.
nc = getArity();
for(i = 0; i < nc ; i++)
{
child(i).getPtr()->getPotentialOutputValuesAsVEGs(tempSet);
potentialOutputsFromChildren += tempSet ;
fakeGA.addCharacteristicInputs(child(i).getGroupAttr()->getCharacteristicInputs());
}
fakeGA.addCharacteristicOutputs(potentialOutputsFromChildren);
fakeGA.coverTest(outputsNeeded,
emptySet, // additional inputs not provided
coveredExprs,
emptySet, // additional inputs not provided
&coveredSubExprs,
&newOutputsNeeded);
if (NOT newOutputsNeeded.isEmpty())
{
// children of current node could not produce all needed outputs
// proceed further down the tree, looking for needed outputs.
for(i = 0; i < nc ; i++)
{
if (NOT gotOutputsNeeded)
gotOutputsNeeded = child(i).getPtr()->
getMoreOutputsIfPossible(newOutputsNeeded) ;
}
}
// In the bottom-up part of the tree walk, add to the outputs of
// of children whatever values will cover any part of outPutsNeeded.
// If ouputsNeeded cannot be entirely satisfied, return FALSE.
// check what the children are capable of producing now that their
// outputs have been possibly increased, If children still cannot
// produce all all outputs needed (ever after the recursive
// call returned TRUE), then that means that the child is an operator
// like SEQUENCE that does not allow outputs to flow through. Return
// FALSE in this case.
potentialOutputsFromChildren.clear();
newOutputsNeeded.clear();
for(i = 0; i < nc ; i++)
{
child(i).getPtr()->getPotentialOutputValuesAsVEGs(tempSet);
potentialOutputsFromChildren += tempSet ;
}
fakeGA.addCharacteristicOutputs(potentialOutputsFromChildren);
fakeGA.coverTest(outputsNeeded,
emptySet, // additional inputs not provided
coveredExprs,
emptySet, // additional inputs not provided
&coveredSubExprs,
&newOutputsNeeded);
// increase outputs for children if all is well.
ValueIdSet outputsToAdd, maxOutputs ;
for(i = 0; i < nc ; i++)
{
outputsToAdd.clear();
maxOutputs.clear();
child(i).getPtr()->getPotentialOutputValuesAsVEGs(maxOutputs);
outputsToAdd.accumulateReferencedValues(
maxOutputs,
outputsNeeded);
child(i)->getGroupAttr()->addCharacteristicOutputs(outputsToAdd);
if (getOperatorType() == REL_MAP_VALUEIDS)
{
((MapValueIds *)this)->addSameMapEntries(outputsToAdd);
}
// child(i).getGroupAttr()->computeCharacteristicIO
// (emptySet, // no additional inputs
// outputsNeeded);
}
if (NOT newOutputsNeeded.isEmpty())
{
outputsNeeded = newOutputsNeeded ;
return FALSE ;
}
else
{
outputsNeeded.clear();
return TRUE ;
}
}
// RelExpr::getMoreOutputsIfPossible()
// ***********************************************************************
// $$$$ Join
// member functions for class Join
// ***********************************************************************
// -----------------------------------------------------------------------
// Join::transformNode()
// -----------------------------------------------------------------------
void Join::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
// ---------------------------------------------------------------------
// Rewrite a Right Join as a Left Join
// ---------------------------------------------------------------------
if (getOperatorType() == REL_RIGHT_JOIN)
{
setOperatorType(REL_LEFT_JOIN);
// switch the left and right subtrees
RelExpr * temp = child(0).getPtr();
child(0) = child(1).getPtr();
child(1) = temp;
}
if (isInnerNonSemiJoin())
{
// -----------------------------------------------------------------
// If not a SEMI or OUTER join then move the predicates in joinPred_
// to the selection predicates.
// -----------------------------------------------------------------
selectionPred() += joinPred_;
joinPred_.clear();
}
// before triggers need special handling for subqueries
if (child(0)->getOperatorType() == REL_BEFORE_TRIGGER)
{
normWARef.setInBeforeTrigger(TRUE);
}
// Make values available to the childs
ValueIdSet availableValues = getGroupAttr()->getCharacteristicInputs();
child(1)->getGroupAttr()->addCharacteristicInputs(availableValues);
if (isTSJForMergeUpsert())
{
ValueIdSet subqVids;
for (ValueId vid = availableValues.init();
availableValues.next(vid); availableValues.advance(vid)) {
if (vid.getItemExpr()->getOperatorType() == ITM_ROW_SUBQUERY)
subqVids.insert(vid);
}
availableValues -= subqVids;
//remove subqueries
}
child(0)->getGroupAttr()->addCharacteristicInputs(availableValues);
// ---------------------------------------------------------------------
// Allocate a new VegRegion for the left subtree for
// full outer join.
// This is need so as to convert the Full Outer to
// (a) Left Join - if there is a selection predicate on a column that
// suffers from null-instantiation and that column is
// part of the join column and that column is covered by
// left subtree.
// (b) Right Join - if there is a selection predicate on a column that
// suffers from null-instantiation and that column is
// part of the join column and that column is covered by
// right subtree.
// (c) Inner Join - if both (a) and (b) is true. That is there is a predicate
// that satisfies (a) and there is predicate that
// statisfies (b).
// ---------------------------------------------------------------------
if (isFullOuterJoin())
normWARef.allocateAndSetVEGRegion(IMPORT_AND_EXPORT,
this, // owner
0 // first child
);
// ---------------------------------------------------------------------
// Transform the left child.
// Put any semijoins between the child and myself
// ---------------------------------------------------------------------
child(0)->transformNode(normWARef, child(0));
// Return to my own VEGRegion.
if (isFullOuterJoin())
normWARef.restoreOriginalVEGRegion();
// ---------------------------------------------------------------------
// Initialize a new VEGRegion when entering the right subtree of a
// Left Join. The new VEGRegion should be capable of importing
// any outer references and exporting any value that suffers null
// instantiation.
// We don't really need the right subtree to be in a different VEGRegion
// from the left subtree since it cannnot reference any values produced
// from there anyway. But the on clause needs to be in a different VEGRegion
// and since the on clause may introduce semiJoin's in the right subtree
// it is more convenient to put both the subtree and the ON clause in the
// same VEGRegion.
// ---------------------------------------------------------------------
if (isLeftJoin() OR isFullOuterJoin() OR isAntiSemiJoin())
{
// Create a new VEGRegion for the right child for full outer Join.
if (isFullOuterJoin())
normWARef.allocateAndSetVEGRegion(IMPORT_AND_EXPORT,
this, // owner
1 //second child
);
else
normWARef.allocateAndSetVEGRegion(IMPORT_AND_EXPORT,
this); // default to first child.
}
// ---------------------------------------------------------------------
// Transform the right child.
// Put any semijoins between the child and myself
// ---------------------------------------------------------------------
child(1)->transformNode(normWARef, child(1));
if (isFullOuterJoin())
normWARef.restoreOriginalVEGRegion();
// done transforming before triggers subqueries
if (normWARef.isInBeforeTrigger())
{
normWARef.setInBeforeTrigger(FALSE);
}
// ---------------------------------------------------------------------
// If there is a joinPred transform them. Put any new Joins between this
// join and my current transformed child. The predicates may reference
// values from the left child.
// ---------------------------------------------------------------------
// Create a new VEGRegion for the Full Outer Region.
// The Join Predicate will reside in this VEGRegion.
// The selection predicate will remain in the parent's
// VEGRegion.
if (isFullOuterJoin())
normWARef.allocateAndSetVEGRegion(IMPORT_ONLY,
this, // owner
2 // third child
);
// TBD - Hema.
// Disallow subqueries in Join Predicate in Full Outer Join.
const NABoolean movePredicates = TRUE;
ValueIdSet externalInputs(getGroupAttr()->getCharacteristicInputs());
externalInputs += child(0)->getGroupAttr()->getCharacteristicOutputs();
if (joinPred().transformNode(normWARef,child(1),
externalInputs,movePredicates ))
{
// Transform the new right child
child(1)->transformNode(normWARef, child(1));
// -----------------------------------------------------------------
// The transformed subquery predicate required values that are
// produced by the semiJoin who now is my right child
// The transformed predicates was moved there and the required
// inputs for my child are now correct (sufficient and minimal).
// -----------------------------------------------------------------
// Check to see if we need to turn this into a TSJ.
ValueIdSet neededInputs;
neededInputs = child(1).getPtr()->getGroupAttr()->getCharacteristicInputs();
neededInputs -= getGroupAttr()->getCharacteristicInputs();
ValueIdSet crossReferences;
crossReferences = child(0)->getGroupAttr()->getCharacteristicOutputs();
// --------------------------------------------------------------------
// At this point of transformation, the vid's in the different parts
// of the query tree might be inconsistent due to replacement expr
// being set. This will be corrected during normalization. Here, we
// need to explicitly compared the vid's of the replacement expr's.
// --------------------------------------------------------------------
ValueIdSet neededInputs2;
ValueId vid;
for (vid = neededInputs.init();
neededInputs.next(vid);
neededInputs.advance(vid))
neededInputs2.insert(vid.getItemExpr()->
getReplacementExpr()->getValueId());
ValueIdSet crossReferences2;
for (vid = crossReferences.init();
crossReferences.next(vid);
crossReferences.advance(vid))
crossReferences2.insert(vid.getItemExpr()->
getReplacementExpr()->getValueId());
crossReferences2.intersectSet(neededInputs2);
// If the right child needs values from the left child, turn this
// Join into a TSJ
if(NOT crossReferences2.isEmpty() && NOT isTSJ())
{
convertToTsj();
// After we transform the right child and we pullup predicates
// we may turn back to a non TSJ if we were able to pull-up
// all those predicates that needed values from the left child.
}
// Verify that we can produce every value the right child needs
neededInputs2 -= crossReferences2;
// neededInputs is now what the right child needs and is
// neither an input to this join nor an output of the left child
CMPASSERT(neededInputs2.isEmpty());
}
// ---------------------------------------------------------------------
// Restore the original VEGRegion.
// ---------------------------------------------------------------------
if (isLeftJoin() OR isFullOuterJoin() OR isAntiSemiJoin())
normWARef.restoreOriginalVEGRegion();
#if 0
// ---------------------------------------------------------------------
// Try to create "singleton" VEG with an null-inst value to emulate
// what's happening with base columns and index columns. This might not
// really be necessary and is therefore commented out for now pending
// a more detailed study.
// ---------------------------------------------------------------------
// Go through null-instantiated outputs and add columns to VEG.
if (isLeftJoin())
{
for (CollIndex x = 0; x < nullInstantiatedOutput().entries(); x++)
{
ValueId vid = nullInstantiatedOutput().at(x);
normWARef.addVEG(vid,vid);
}
}
#endif
DBGSETDBG( "TRANSFORM_DEBUG" )
DBGIF(
unp = "";
selectionPred().unparse(unp);
cerr << "Join selpred: " << unp << endl;
)
// Pull up the predicates and recompute the required inputs
// of whoever my children are now.
pullUpPreds();
DBGIF(
unp = "";
selectionPred().unparse(unp);
cerr << "Join selpred: " << unp << endl;
)
if (CmpCommon::getDefault(NOT_IN_OPTIMIZATION) == DF_ON)
{
if (isAntiSemiJoin())
{
// if there are NotIn(A,B) predicate try transforming it to A=B if possible
ValueIdSet origSet;
ValueIdSet newSet;
rewriteNotInPredicate(origSet, newSet);
if (newSet.entries()>0)
{
normWARef.locateAndSetVEGRegion(this);
newSet.transformNode(normWARef,child(1),
externalInputs,movePredicates );
normWARef.restoreOriginalVEGRegion();
joinPred() -= origSet;
joinPred() += newSet;
}
}
}
// ---------------------------------------------------------------------
// Convert a tsj to a join when a value that is produced by the left
// subtree is not referenced in the right subtree.
// If the tsj right child contain triggers - don't convert to join.
// Triggers need to be activated for each left row even if they don't
// reference the left subtree data.
//
// For RoutineJoins/Udfs we also want to convert it to a join if the UDF
// does not need any inputs from the left.
// ---------------------------------------------------------------------
if (isTSJ())
{
/*--- old code (see comment below why it's replaced)
ValueIdSet outerRefs =
child(0)->getGroupAttr()->getCharacteristicOutputs();
outerRefs.intersectSet
(child(1)->getGroupAttr()->getCharacteristicInputs());
---*/
// Check to see if we need to turn this into a TSJ.
ValueIdSet neededInputs;
neededInputs = child(1).getPtr()->getGroupAttr()->getCharacteristicInputs();
// is this ok? Our set of char. inputs may not yet be minimal,
// and could contain char. outputs from the left child.
neededInputs -= getGroupAttr()->getCharacteristicInputs();
ValueIdSet crossReferences;
crossReferences = child(0)->getGroupAttr()->getCharacteristicOutputs();
// --------------------------------------------------------------------
// At this point of transformation, the vid's in the different parts
// of the query tree might be inconsistent due to replacement expr
// being set. This will be corrected during normalization. Here, we
// need to explicitly compared the vid's of the replacement expr's.
// --------------------------------------------------------------------
ValueIdSet neededInputs2;
ValueId vid;
for (vid = neededInputs.init();
neededInputs.next(vid);
neededInputs.advance(vid))
neededInputs2.insert(vid.getItemExpr()->
getReplacementExpr()->getValueId());
ValueIdSet crossReferences2;
for (vid = crossReferences.init();
crossReferences.next(vid);
crossReferences.advance(vid))
crossReferences2.insert(vid.getItemExpr()->
getReplacementExpr()->getValueId());
crossReferences2.intersectSet(neededInputs2);
// The above logic looks at intersection of ValueIdSet of outputs
// of child(0) and inputs of child(1) to decide if a join is a TSJ.
// Sometimes, an expression (such as a base column of child(0))
// may have two distinct valueids depending on where it is appearing;
// i.e., it may have v1 as valueid on left side (child(0)) and
// v2 on right side
// (child(1)). In these cases, the simple intersection test is not
// complete. So, we look at physical index columns and names
// to deduce that the two valueids are infact same or not.
if (crossReferences2.isEmpty())
{
ValueIdSet eis; // for storing equivalent index set
for (vid = crossReferences.init();
crossReferences.next(vid);
crossReferences.advance(vid))
{
if (vid.getItemExpr()->getOperatorType() != ITM_BASECOLUMN)
continue;
eis += ((BaseColumn *)vid.getItemExpr())->getEIC();
} // for populate eis set
ValueId vid1;
ValueIdSet eis1; // for storing equivalent index set
for (vid1 = neededInputs.init();
neededInputs.next(vid1);
neededInputs.advance(vid1))
{
if (vid1.getItemExpr()->getOperatorType() != ITM_BASECOLUMN)
continue;
eis1+= ((BaseColumn *)vid1.getItemExpr())->getEIC();
} // populate eis1 set
// now compare physical fileset name and column names, to see
// if any two columns from sets eis and eis1 are same
ValueIdSet rightChildInputs =
child(1).getPtr()->getGroupAttr()->getCharacteristicInputs();
for (vid = eis.init();
eis.next(vid);
eis.advance(vid))
{
if (vid.getItemExpr()->getOperatorType() != ITM_INDEXCOLUMN)
continue;
for (vid1 = eis1.init();
eis1.next(vid1);
eis1.advance(vid1))
{
if (vid1.getItemExpr()->getOperatorType() != ITM_INDEXCOLUMN)
continue;
if ( doTwoVidsReferToSameColumn(vid, vid1) )
{
normWARef.addVEG(
((IndexColumn *)vid.getItemExpr())->getDefinition(),
((IndexColumn *)vid1.getItemExpr())->getDefinition()
);
//-------------------------------------------------------
// Genesis Case: 10-000626-1151:
// This is a TSJ: if right child is asking for a valueid
// that is in VEG as that of left child is producing, then
// right child may ask for what left child is producing
//--------------------------------------------------------
rightChildInputs -=
((IndexColumn *)vid1.getItemExpr())->getDefinition();
rightChildInputs +=
((IndexColumn *)vid.getItemExpr())->getDefinition();
crossReferences2.insert(vid);
}
} // inner for
} // outer for
child(1).getPtr()->getGroupAttr()->
setCharacteristicInputs(rightChildInputs);
} // isEmpty()
if (crossReferences2.isEmpty() &&
!isTSJForWrite() &&
!getInliningInfo().isDrivingPipelinedActions() &&
!getInliningInfo().isDrivingTempInsert() )// Triggers -
{
// Remember we used to be a RoutineJoin. This is used to determine
// what type of contexts for partitioning we will try in OptPhysRel.
if (isRoutineJoin()) setDerivedFromRoutineJoin();
convertToNotTsj();
}
else
{
// We have a TSJ that will be changed to Nested join
// safe to change NOtIn here to non equi-predicate form (NE)
// at this point only the case on single column NotIn can reach here
// and the either the outer or inner column or both is nullable
// and may have null values
resolveSingleColNotInPredicate();
}
}
transformSelectPred(normWARef, locationOfPointerToMe);
} // Join::transformNode()
// -----------------------------------------------------------------------
// Join::pullUpPreds()
// -----------------------------------------------------------------------
void Join::pullUpPreds()
{
// We don't pull up predicateds for Full Outer Join (FOJ).
// That is because we don't try to push them down during
// normalization.
// Just recomputeOuterReferences() on both children.
if (getOperatorType() == REL_FULL_JOIN)
{
child(0)->recomputeOuterReferences();
child(1)->recomputeOuterReferences();
return;
}
// ---------------------------------------------------------------------
// Pull up predicates from each child.
// Accumulate the predicates from each of my child subtrees.
// ---------------------------------------------------------------------
// Pull up the predicates from the left child
// ---------------------------------------------------------------------
selectionPred() += child(0)->getSelectionPred();
child(0)->selectionPred().clear();
child(0)->recomputeOuterReferences();
// ---------------------------------------------------------------------
// If outer/semi join then predicates from the right child go to
// joinPred otherwise they go to the selectionPred.
// ---------------------------------------------------------------------
if (isInnerNonSemiJoin() || getOperatorType() == REL_TSJ_FLOW)
{
selectionPred() += child(1)->getSelectionPred();
}
else
{
joinPred() += child(1)->getSelectionPred();
}
child(1)->selectionPred().clear();
child(1)->recomputeOuterReferences();
//----------------------------------------------------------------------
// if am a SemiJoin and any of my joinPred is covered by my inputs
// and my first child output then move that to the selectionPed.
//----------------------------------------------------------------------
ValueIdSet predicatesToMove, boringSet, predicatesThatStay;
if (isSemiJoin()) // anti-joins, left-joins shouldn't do this!
{
getGroupAttr()->coverTest(joinPred(),
child(0)->getGroupAttr()->getCharacteristicOutputs(),
predicatesToMove,
boringSet,
&predicatesThatStay);
if (NOT predicatesToMove.isEmpty())
{
joinPred() -= predicatesToMove;
selectionPred() += predicatesToMove;
}
}
} // Join::pullUpPreds()
// -----------------------------------------------------------------------
// Join::recomputeOuterReferences()
// -----------------------------------------------------------------------
void Join::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
if (NOT getGroupAttr()->getCharacteristicInputs().isEmpty())
{
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// Weed out those expressions not needed by my selectionPred and joinPred
ValueIdSet exprSet = getSelectionPred();
exprSet += joinPred();
exprSet.insertList(nullInstantiatedOutput());
exprSet.insertList(nullInstantiatedForRightJoinOutput());
exprSet.weedOutUnreferenced(outerRefs);
// Add back those expressiones needed by my left child
outerRefs += child(0).getPtr()->getGroupAttr()->getCharacteristicInputs();
// If it is a TSJ don't add the outputs of the left child to
// the needed inputs.
exprSet = child(1).getPtr()->getGroupAttr()->getCharacteristicInputs();
if (isTSJForMergeUpsert())
{
ValueIdSet exprSet2;
ValueId vid;
for (vid = exprSet.init();
exprSet.next(vid);
exprSet.advance(vid))
exprSet2.insert(vid.getItemExpr()->
getReplacementExpr()->getValueId());
exprSet2.removeCoveredExprs(child(0).getPtr()->getGroupAttr()->getCharacteristicOutputs());
outerRefs += exprSet2;
}
else
{
if (isTSJ())
{
exprSet.removeCoveredExprs(child(0).getPtr()->getGroupAttr()->getCharacteristicOutputs());
}
outerRefs += exprSet;
}
getGroupAttr()->setCharacteristicInputs(outerRefs);
}
} // Join::recomputeOuterReferences()
// ----------------------------------------------------------------------
// Fix genesis case 10-061010-8731, solution 10-061010-9689 RFE in which
// queries like
// select ... from t1
// where t1.c not in (select t2.c from t2 where t2.c is not null ...)
// and t1.c is not null ...
// is compiled into a horribly inefficient but correct plan like
// nested_anti_semi_join(pa(t1), pa(t2))
// which generates a cross product of t1 with t2 and applies the predicate
// not((t1.c <> t2.c) is true)
// A joke is that the Bank of America query reported in the case would have
// taken 5 years to run. A much better plan would be
// hash_anti_semi_join(pa(t1), pa(t2))
// which generates a join of t1 with t2 and applies the predicate
// t1.c = t2.c
// Using this plan, the Bank of America query completes in under 2 minutes.
// ----------------------------------------------------------------------
void Join::tryToRewriteJoinPredicate(NormWA & normWARef)
{
// applies only to anti_semi_joins
if (!isAntiSemiJoin()) {
return;
}
// look for "not((t1.c <> t2.c) is true)"
for (ValueId exprId = joinPred().init();
joinPred().next(exprId);
joinPred().advance(exprId)) {
ItemExpr *iePtr = exprId.getItemExpr();
if (iePtr->getOperatorType() == ITM_NOT) {
ItemExpr *grandkid, *kid = iePtr->child(0);
if (kid && kid->getOperatorType() == ITM_IS_TRUE &&
(grandkid=kid->child(0)) != NULL &&
grandkid->getOperatorType() == ITM_NOT_EQUAL) {
// look for conditions that can guarantee opds' non-nullability, eg,
// look for "t1.c is not null && t2.c is not null"
ValueIdSet preds = joinPred(); // start with join predicate
preds -= exprId; // remove "not((t1.c <> t2.c) is true)" from set
preds += selectionPred(); // add any selection predicate
if (preds.isNotNullable(grandkid->child(0)) &&
preds.isNotNullable(grandkid->child(1))) {
#ifndef NDEBUG
FILE *logF = NULL;
NABoolean logRewrites =
CmpCommon::getDefault(COMP_BOOL_138) == DF_OFF &&
CmpCommon::getDefault(COMP_BOOL_137) == DF_ON;
if (logRewrites &&
(logF = fopen("rewriteJoinPredicateLog.txt", "a")) != NULL) {
preds.print(logF, "", "ASJ predicates:");
iePtr->print(logF);
}
#endif
// both operands are guaranteed to be non-null. replace
// "not((t1.c<>t2.c) is true) && t1.c is not null && t2.c is not null"
// with "t1.c=t2.c".
ItemExpr *eqpred = new(normWARef.wHeap())
BiRelat(ITM_EQUAL, grandkid->child(0).getPtr(),
grandkid->child(1).getPtr());
((BiRelat *)eqpred)->
specialMultiValuePredicateTransformation() = TRUE;
exprId.replaceItemExpr(eqpred);
eqpred->synthTypeAndValueId(TRUE);
#ifndef NDEBUG
if (logRewrites && logF) {
exprId.getItemExpr()->print(logF);
fclose(logF);
}
#endif
}
}
}
}
}
// -----------------------------------------------------------------------
// Join::rewriteNode()
// -----------------------------------------------------------------------
void Join::rewriteNode(NormWA & normWARef)
{
NABoolean isALeftJoin = isLeftJoin();
NABoolean isAFullOuterJoin = isFullOuterJoin();
NABoolean isASemiJoin = isSemiJoin();
// ---------------------------------------------------------------------
// Check if this is a Left Join.
// ---------------------------------------------------------------------
if (isALeftJoin && !isAFullOuterJoin)
{
if (canConvertLeftJoinToInnerJoin(normWARef))
{
// -------------------------------------------------------------
// Convert the operator so that it is no longer an outer join.
// -------------------------------------------------------------
convertToNotOuterJoin();
isALeftJoin = FALSE; // no longer a LEFT JOIN
// -------------------------------------------------------------
// Combine all the predicates together.
// -------------------------------------------------------------
if (isASemiJoin || isAntiSemiJoin())
{
CMPASSERT (FALSE) ; // left joins can't be semi!
joinPred() += getSelectionPred();
selectionPred().clear();
}
else
{
selectionPred() += joinPred();
joinPred().clear();
}
}
}
// Check if it's a full outer join
if (isAFullOuterJoin)
{
tryToConvertFullOuterJoin(this, normWARef);
// that means Full Outer Join has been converted.
if (getOperatorType() != REL_FULL_JOIN)
isAFullOuterJoin = FALSE; // no longer a FULL OUTER
}
// try to rewrite join predicate from
// not((t1.c <> t2.c) is true) and t1.c is not null and t2.c is not null
// to
// t1.c = t2.c and t1.c is not null and t2.c is not null
// -----------------------------------------------------------
// When NOT_IN_OPTIMIZATION is ON we don't need to call
// tryToRewriteJoinPredicate method anymore.
// We may need to remove this call and the method in the future
if (CmpCommon::getDefault(COMP_BOOL_138) == DF_OFF &&
CmpCommon::getDefault(NOT_IN_OPTIMIZATION) == DF_OFF) {
tryToRewriteJoinPredicate(normWARef);
}
// ---------------------------------------------------------------------
// Rewrite the expressions of the left child.
// ---------------------------------------------------------------------
if (isAFullOuterJoin)
normWARef.locateAndSetVEGRegion(this, 0 /* first child */);
child(0)->rewriteNode(normWARef);
// -----------------------------------------------------------------
// Normalize the values that will be subject to null-instantiation
// with values in the Child(0) region.
// -----------------------------------------------------------------
normalizeNullInstantiatedForRightJoinOutput(normWARef);
// -----------------------------------------------------------------
// Restore the original VEGRegion.
// -----------------------------------------------------------------
if(isAFullOuterJoin)
normWARef.restoreOriginalVEGRegion();
// ---------------------------------------------------------------------
// Rewrite the expressions of the right child.
// ---------------------------------------------------------------------
if (isALeftJoin OR isAFullOuterJoin OR isAntiSemiJoin())
{
// -----------------------------------------------------------------
// Locate and set the VEGRegion for the ON clause.
// This is done in order to rewrite "=" predicates in terms of
// the VEGs that are valid within its VEGRegion.
// -----------------------------------------------------------------
if (isAFullOuterJoin)
normWARef.locateAndSetVEGRegion(this, 1 /* second child */);
else
normWARef.locateAndSetVEGRegion(this);
child(1)->rewriteNode(normWARef);
// -----------------------------------------------------------------
// Normalize the values that will be subject to null-instantiation
// with values in the Child(1) region.
// -----------------------------------------------------------------
normalizeNullInstantiatedOutput(normWARef);
// -----------------------------------------------------------------
// Restore the original VEGRegion.
// -----------------------------------------------------------------
if (isAFullOuterJoin)
normWARef.restoreOriginalVEGRegion();
// -----------------------------------------------------------------
// Rewrite expressions in the ON clause predicate.
// -----------------------------------------------------------------
if (isAFullOuterJoin)
normWARef.locateAndSetVEGRegion(this, 2 /* third child */);
normWARef.setInJoinPredicate(TRUE) ;
if (joinPred().normalizeNode(normWARef))
{
}
normWARef.setInJoinPredicate(FALSE) ;
// -----------------------------------------------------------------
// Restore the original VEGRegion.
// -----------------------------------------------------------------
normWARef.restoreOriginalVEGRegion();
} // normalize the ON clause of the LEFT Join
else
{ // normalize the ON clause of the INNER Join
child(1)->rewriteNode(normWARef);
// -----------------------------------------------------------------
// Rewrite expressions in the ON clause predicate.
// -----------------------------------------------------------------
if (joinPred().normalizeNode(normWARef))
{
}
} // normalize the ON clause of the INNER Join
// ---------------------------------------------------------------------
// Rewrite expressions in the WHERE clause predicate tree.
// ---------------------------------------------------------------------
if (selectionPred().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite the ValueIdMap between the select and the update part so
// it has VEGReferences init (note that we avoided VEGies that span
// both the select and the update part, this is (probably?) one
// reason why we only normalized one half of the keys preds above.
// ---------------------------------------------------------------------
if( getInliningInfo().isDrivingMvLogInsert()
&&
NULL != updateSelectValueIdMap_ )
{
updateSelectValueIdMap_->normalizeNode(normWARef);
// If a VID in the bottom is of the form ValueIdUnion(x,x),
// then replace it with x. This is necessary to push down UPDATEs
// with MV attached tables into DP2.
const ValueIdList& originalBottomValues =
updateSelectValueIdMap_->getBottomValues();
ValueIdList newBottomValues(originalBottomValues);
for(CollIndex i = 0; i < originalBottomValues.entries(); i++) {
ItemExpr* x = originalBottomValues[i].getItemExpr();
if (x && x->getOperatorType() == ITM_VALUEIDUNION &&
((ValueIdUnion*)x) -> getLeftSource() ==
((ValueIdUnion*)x) -> getRightSource()
)
{
newBottomValues[i] = ((ValueIdUnion*)x)->getRightSource();
} else
newBottomValues[i] = originalBottomValues[i];
}
updateSelectValueIdMap_ = new (CmpCommon::statementHeap())
ValueIdMap(updateSelectValueIdMap_->getTopValues(), newBottomValues);
}
// ---------------------------------------------------------------------
// Rewrite expressions in the Group Attributes.
// ---------------------------------------------------------------------
if (isALeftJoin)
normWARef.saveLeftJoinChildVEGRegion(this,0);
((ValueIdSet &)getGroupAttr()->getCharacteristicInputs()).normalizeNode(normWARef);
if (isALeftJoin)
normWARef.resetLeftJoinChildVEGRegion();
((ValueIdSet &)getGroupAttr()->getCharacteristicOutputs()).normalizeNode(normWARef);
// getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // Join::rewriteNode()
// -----------------------------------------------------------------------
// Join::canConvertLeftJoinToInnerJoin()
// Currently handles LEFT JOIN only.
// -----------------------------------------------------------------------
NABoolean Join::canConvertLeftJoinToInnerJoin(NormWA & normWARef)
{
return normWARef.locateVEGRegionAndCheckIfMerged(this);
} // Join::canConvertLeftJoinToInnerJoin()
// -----------------------------------------------------------------------
// Join::normalizeNullInstantiatedOutput()
//
// A method for normalizing the operands of an InstantiateNull operator
// that appears in the nullInstantiatedOutput(). A special method is
// necessary to prevent an InstantiateNull that appears in this list
// from being replaced with a VEGReference for the VEG to which it
// belongs.
// -----------------------------------------------------------------------
void Join::normalizeNullInstantiatedOutput(NormWA & normWARef)
{
ItemExpr * instNull;
for (CollIndex index = 0;
index < nullInstantiatedOutput().entries(); index++)
{
instNull = nullInstantiatedOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
// Replace the existing child of the InstantiateNull with
// its normalized form.
instNull->child(0) = instNull->child(0)->normalizeNode(normWARef);
} // endfor
//++MV
// Used for translating the required sort key to the right
// child sort key and backwards
BuildRightChildMapForLeftJoin();
//--MV
} // Join::normalizeNullInstantiatedOutput()
// -----------------------------------------------------------------------
// Join::normalizeNullInstantiatedForRightJoinOutput()
//
// A method for normalizing the operands of an InstantiateNull operator
// that appears in the nullInstantiatedForRightJoinOutput(). A
// special method is necessary to prevent an InstantiateNull that
// appears in this list from being replaced with a VEGReference for the
// VEG to which it belongs.
// -----------------------------------------------------------------------
void Join::normalizeNullInstantiatedForRightJoinOutput(NormWA & normWARef)
{
ItemExpr * instNull;
for (CollIndex index = 0;
index < nullInstantiatedForRightJoinOutput().entries(); index++)
{
instNull = nullInstantiatedForRightJoinOutput()[index].getItemExpr();
CMPASSERT(instNull->getOperatorType() == ITM_INSTANTIATE_NULL);
// Replace the existing child of the InstantiateNull with
// its normalized form.
instNull->child(0) = instNull->child(0)->normalizeNode(normWARef);
} // endfor
//++MV
// Used for translating the required sort key to the right
// child sort key and backwards
BuildLeftChildMapForRightJoin();
//--MV
} // Join::nullInstantiatedForRightJoinOutput()
// -----------------------------------------------------------------------
// Join::leftLinearizeJoinTree()
//
// A left-linear tree of Inner Joins is one in which no Inner Join
// has another Inner Join as its right child. This method implements
// a transformation rule that produces a left-linear tree of Inner Joins.
// It replaces, if possible, T1 IJ (T2 IJ T3) with a left-linear sequence
// T1 IJ T2 IJ T3.
//
// The figure below assumes that subtree L2 and subtree R2 do not have
// and Inner Join as the topmost node.
// Pattern before the transformation:
//
// Inner Join #1 : p1
// / \
// subtree L1 : p2 Inner Join #2 : p5
// / \
// subtree L2 : p3 subtree R2 : p4
//
// NOTE: p1,p2,p3,p4,p5 are predicates
//
// Left linear tree produced by this transformation:
//
// Inner Join #2 : p1 & p5 (we attempt to push down these predicates, so they may end up in the children)
// / \
// Inner Join #1 subtree R2 : p4
// / \
// subtree L1 : p2 subtree L2 : p3
//
// -----------------------------------------------------------------------
Join * Join::leftLinearizeJoinTree(NormWA & normWARef,
TransformationType transformationType)
{
// Don't do this transformation if the user said they want the
// join order to be completely determined by the order the
// tables are specified in the query.
if (CURRSTMT_OPTDEFAULTS->joinOrderByUser())
return this;
// Condition for applying this rule:
// I and my right child must both be an Inner Join.
if ( (getOperatorType() != REL_JOIN) OR
( (child(1)->getOperatorType() != REL_JOIN) AND
(child(1)->getOperatorType() != REL_ROUTINE_JOIN) AND
(child(1)->getOperatorType() != REL_LEFT_JOIN) ) )
return this;
// R1 is my current right child
Join * R1 = (Join *)(child(1).getPtr());
// Left linearize R1
R1->leftLinearizeJoinTree(normWARef, transformationType);
// Assign the left child of R1 to become my new right child
child(1) = R1->child(0);
// If we pulled anything up or R1 has a join predicate, we need to
// run recursive pushdown at the RelRoot to make sure we don't end up
// with predicates on unions and TSJs. This will happen at the end
// of the SQO phase so we don't do any unnecessary tree walks.
if ((!selectionPred().isEmpty() || !R1->joinPred().isEmpty() ) &&
(R1->child(0)->getOperatorType() == REL_UNION ||
R1->child(1)->getOperatorType() == REL_UNION))
normWARef.setRequiresRecursivePushdown(TRUE);
// Pull up predicates so that VEGPredicates and predicates that contain
// VEGReferences can potentially be distributed more extensively amongst
// my subtrees.
R1->selectionPred() += getSelectionPred();
selectionPred().clear();
// R1 inherits all the values that I received as inputs and should
// produce all the values that I was producing as output.
R1->getGroupAttr()->setCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
R1->getGroupAttr()->setCharacteristicOutputs
(getGroupAttr()->getCharacteristicOutputs());
// Recompute my own Inputs and Outputs.
primeGroupAttributes();
// Temporarily set the left child after the rotation so that I could push
// predicate down to it before starting left linearization again (due to
// the new right child).
//
R1->child(0) = this;
ValueIdSet availableInputs;
availableInputs = R1->getGroupAttr()->getCharacteristicInputs();
// If this method is being called during subquery unnesting
// then logical properies need to be resynthesized and
// pushdown rules are slightly different
if (transformationType == UNNESTING)
{
ValueIdSet outerReferences ;
availableInputs.getOuterReferences(outerReferences);
availableInputs -= outerReferences ;
ValueIdSet nonPredExpr;
if (R1->getOperatorType() == REL_ROUTINE_JOIN)
nonPredExpr += R1->child(1)->getGroupAttr()->getCharacteristicInputs() ;
R1->pushdownCoveredExprSQO(R1->getGroupAttr()->getCharacteristicOutputs(),
availableInputs,
R1->selectionPred(),
nonPredExpr,
TRUE, // keepPredsNotCoveredByLeftChild
TRUE); // keepPredsNotCoveredByRightChild
R1->getGroupAttr()->clearLogProperties();
getGroupAttr()->clearLogProperties();
R1->synthLogProp();
}
else if (transformationType == SEMI_JOIN_TO_INNER_JOIN)
{
R1->pushdownCoveredExpr(R1->getGroupAttr()->getCharacteristicOutputs(),
availableInputs,
R1->selectionPred());
R1->getGroupAttr()->clearLogProperties();
getGroupAttr()->clearLogProperties();
R1->synthLogProp();
}
else
{
// Pushdown predicates that were pulled up
R1->pushdownCoveredExpr(R1->getGroupAttr()->getCharacteristicOutputs(),
availableInputs,
R1->selectionPred());
}
// I must left-linearize myself once again because I have acquired a new
// right child.
//
R1->child(0) = leftLinearizeJoinTree(normWARef, transformationType);
return R1; // the tree was indeed left linearized
} // Join::leftLinearizeJoinTree()
// -----------------------------------------------------------------------
// Join::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * Join::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
NABoolean isATSJ = isTSJ();
RelExpr * normalizedExpr = this; // default return value
//--------------------------------------------------------------------------------
// Create filternode on top of grandchild of a subquery TSJ to prevent pushdown
// of predicates. This is needed if the correlated subquery will be unnested.
//--------------------------------------------------------------------------------
if (candidateForSubqueryUnnest() &&
(child(1)->getOperatorType() == REL_GROUPBY))
{
createAFilterGrandChildIfNeeded(normWARef);
}
// -----------------------------------------------------------------
// Perform predicate pushdown.
// -----------------------------------------------------------------
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred());
if (CmpCommon::getDefault(NOT_IN_OUTER_OPTIMIZATION) == DF_ON)
{
//rewrite notin predicate
rewriteNotInPredicate();
}
// -----------------------------------------------------------------
// Normalize the left subtrees. Store pointers to the
// roots of the subtrees after normalization.
// -----------------------------------------------------------------
if (isFullOuterJoin())
normWARef.locateAndSetVEGRegion(this, 0 /* first child */);
child(0) = child(0)->normalizeNode(normWARef);
if (isFullOuterJoin())
normWARef.restoreOriginalVEGRegion();
// -----------------------------------------------------------------
// Normalize the right subtree in the proper VEGRegion
// -----------------------------------------------------------------
if (isLeftJoin() OR isAntiSemiJoin() OR isFullOuterJoin())
{
// -------------------------------------------------------------
// Locate and set the VEGRegion for the right subtree.
// -------------------------------------------------------------
if (isFullOuterJoin())
normWARef.locateAndSetVEGRegion(this, 1 /* second child */);
else
normWARef.locateAndSetVEGRegion(this);
child(1) = child(1)->normalizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
}
else
{
child(1) = child(1)->normalizeNode(normWARef);
}
fixEssentialCharacteristicOutputs();
// -----------------------------------------------------------------
// Transform a bushy tree of inner joins or a subtree in which
// a left join is the right child of an inner join into a
// left associative linear sequence of join. Note that TSJs are
// not transformed.
// -----------------------------------------------------------------
normalizedExpr = leftLinearizeJoinTree(normWARef);
// ---------------------------------------------------------------------
// Convert a tsj to a join if the tsj is not for a write operation
// and if a value that is produced by the left subtree is not
// referenced in the right subtree,
// ---------------------------------------------------------------------
if (isATSJ AND NOT isTSJForWrite() AND //NOT isRoutineJoin() AND
NOT child(1)->getGroupAttr()->
getCharacteristicInputs().referencesOneValueFromTheSet
(child(0)->getGroupAttr()->getCharacteristicOutputs())
&& !getInliningInfo().isDrivingPipelinedActions()
&& !getInliningInfo().isDrivingTempInsert() // Triggers -
)
{
// Remember we used to be a RoutineJoin. This is used to determine
// what type of contexts for partitioning we will try in OptPhysRel.
if (isRoutineJoin()) setDerivedFromRoutineJoin();
convertToNotTsj();
// ---------------------------------------------------------------
// Transform a bushy tree of inner joins or a subtree in which
// a left join is the right child of an inner join into a
// left associative linear sequence of join.
// ---------------------------------------------------------------
normalizedExpr = leftLinearizeJoinTree(normWARef);
}
normWARef.setExtraHubVertex(normalizedExpr);
return normalizedExpr;
} // Join::normalizeNode()
//--------------------------------------------------------------------------
// Join::createAFilterGrandChildIfNecessary()
// This filter node is created (if necessary) after transform but before
// normalization. Therefore inputs are minimal but outputs are maximal. Any
// predicates with outerreferences will be as high up in the tree as possible.
//---------------------------------------------------------------------------
void Join::createAFilterGrandChildIfNeeded(NormWA & normWARef)
{
// caller has already verified that child(1) is a groupby
CMPASSERT(child(1)->getOperatorType() == REL_GROUPBY) ;
Filter *predFilterNode = NULL;
NABoolean doNotUnnest = FALSE ;
GroupByAgg * gbyNode = (GroupByAgg *) child(1)->castToRelExpr();
RelExpr * oldRightGrandChild = child(1)->child(0)->castToRelExpr();
NABoolean candidateForLeftJoin = candidateForSubqueryLeftJoinConversion();
NABoolean nestedAggInSubQ = FALSE;
GroupByAgg * subQGby = NULL ;
if (oldRightGrandChild->getOperator().match(REL_GROUPBY))
{
subQGby = (GroupByAgg *) oldRightGrandChild ;
oldRightGrandChild = oldRightGrandChild->child(0)->castToRelExpr();
nestedAggInSubQ = TRUE ;
}
if (oldRightGrandChild->getOperator().match(REL_ANY_SEMIJOIN) ||
oldRightGrandChild->getOperator().match(REL_ANY_ANTI_SEMIJOIN) ||
oldRightGrandChild->getOperator().match(REL_GROUPBY))
{
// we do not want to unnest queries that have a semijoin or a group by
// as a child of the groupby.
doNotUnnest = TRUE;
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Right grandchild of TSJ is a semijoin or has more than one group by");
}
}
// -----------------------------------------------------------------------
// Check to see if we have any Outer References in our selection predicate
// If we do we want to create a Filter Node on top of ourselves to hold
// the Outer Reference predicate.
// ------------------------------------------------------------------------
ValueIdSet outerReferences, nonLocalPreds;
gbyNode->getGroupAttr()->getCharacteristicInputs().
getOuterReferences(outerReferences);
// We found that for left joins, we don't want to pull up correlated
// predicates from the selection predicate if there is also correlated
// predicates in the join preidcate. This is a fix for solution
// 10-090206-8977.
if ( (doNotUnnest == FALSE) &&
oldRightGrandChild->getOperator().match(REL_ANY_LEFT_JOIN))
{
Join *myself = (Join *) oldRightGrandChild;
if (((Join *) oldRightGrandChild)->joinPred().
getReferencedPredicates(outerReferences, nonLocalPreds))
{
doNotUnnest = TRUE;
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Filter child is leftJoin with outerreferences in joinPred ");
}
}
}
if (doNotUnnest == FALSE)
{
nonLocalPreds.clear();
oldRightGrandChild->selectionPred().getReferencedPredicates
(outerReferences, nonLocalPreds) ;
if (nestedAggInSubQ)
subQGby->selectionPred().getReferencedPredicates
(outerReferences, nonLocalPreds);
if (!nonLocalPreds.isEmpty())
{
// Right grandchild selection pred has outer references
// Like the case for the joinpredicates above, we need to
// make sure we don't create a filter with aggregates in it.
// The problem we run into is if the groupBy that produced the
// aggregate gets moved above the join.
if (candidateForLeftJoin ||
oldRightGrandChild->getOperator().match(REL_ANY_LEFT_JOIN))
{
for ( ValueId filterVid = nonLocalPreds.init();
nonLocalPreds.next(filterVid) ;
nonLocalPreds.advance(filterVid))
{
// Check to see if the filter predicates contains any
// aggregates, if so do not create filter
if (filterVid.getItemExpr()->containsAnAggregate())
{
doNotUnnest = TRUE;
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Filter preds would have contained aggregates ");
}
}
}
}
if ((doNotUnnest == FALSE) && candidateForLeftJoin &&
(CmpCommon::getDefault(SUBQUERY_UNNESTING_P2) != DF_INTERNAL) &&
((normWARef.getLeftJoinConversionCount() >= 2)||nestedAggInSubQ))
{
doNotUnnest = TRUE;
// For phase 2 we only unnest 2 subqueries
// containing NonNullRejecting Predicates. Later we will ensure
// that these 2 subqueries are not nested.
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
if (!nestedAggInSubQ)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Skipping unnesting of Subquery due to NonNullRejecting Predicates in more than two subqueries");
else
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Skipping unnesting of Subquery since we have both NonNullRejecting predicate and nested aggregate in subquery.");
}
}
// create the filter node
if (doNotUnnest == FALSE)
{
predFilterNode = new (CmpCommon::statementHeap())
Filter(oldRightGrandChild);
predFilterNode->selectionPred() += nonLocalPreds;
oldRightGrandChild->selectionPred() -= nonLocalPreds;
if (nestedAggInSubQ)
{
subQGby->selectionPred() -= nonLocalPreds;
predFilterNode->getGroupAttr()->setCharacteristicInputs
(subQGby->getGroupAttr()->getCharacteristicInputs());
subQGby->recomputeOuterReferences();
}
else
{
predFilterNode->getGroupAttr()->setCharacteristicInputs
(oldRightGrandChild->getGroupAttr()->getCharacteristicInputs());
}
oldRightGrandChild->recomputeOuterReferences();
// If the nodes below us require the same outer references as inputs
// as before we don't want to do the unnesting
if (oldRightGrandChild->getGroupAttr()->getCharacteristicInputs() ==
predFilterNode->getGroupAttr()->getCharacteristicInputs())
{
// disassociate the oldGrandChild from the Filter
predFilterNode->child(0) = (RelExpr *) NULL;
// put the predicate back.
oldRightGrandChild->selectionPred() += nonLocalPreds;
// remember that we decided not to unnest.
doNotUnnest = TRUE;
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Skipping unnesting of Subquery due to subtree below filter requires same outer references as filter");
}
else
{
// Recompute inputs/outputs
oldRightGrandChild->primeGroupAttributes();
predFilterNode->primeGroupAttributes();
if (candidateForLeftJoin)
normWARef.incrementLeftJoinConversionCount();
if (nestedAggInSubQ)
gbyNode->child(0)->child(0) = predFilterNode;
else
gbyNode->child(0) = predFilterNode;
}
}
}
else
{
// right grandchild has no outer refs in selection pred.
// Look in the groupby node now
if (gbyNode->selectionPred().getReferencedPredicates
(outerReferences, nonLocalPreds) ||
gbyNode->aggregateExpr().getReferencedPredicates
(outerReferences, nonLocalPreds))
{
// we know group expr is empty as this is a scalar grby.
// do nothing as we have something to unnest (i.e. do not set the doNotUnnest flag)
// unless we need Phase2 and we have already matrked one level.
if (candidateForLeftJoin &&
(CmpCommon::getDefault(SUBQUERY_UNNESTING_P2) != DF_INTERNAL) &&
(normWARef.getLeftJoinConversionCount() >= 1))
{
doNotUnnest = TRUE;
// For phase 2 we only unnest 1 level of subqueries
// containing NonNullRejecting Predicates
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Skipping unnesting of Subquery due to NonNullRejecting Predicates in more than one subquery");
}
}
else
{
// no outer ref in grandchild's selection pred and in grby's (child) selection pred or
// aggregate expr.
doNotUnnest = TRUE;
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: No Correlation found");
}
}
}
if (doNotUnnest)
{
setCandidateForSubqueryUnnest(FALSE);
normWARef.decrementCorrelatedSubqCount();
}
return ;
}
// Join::createAFilterGrandChildIfNecessary()
/* --------------------------------------------------------------------------
Join::eliminateRedundantJoin()
-----------------------------------------------------------------------------
Performs one of the following transformations, if this node is suitably marked
1) If predicates have been marked for removal
Join {selection_pred : p1,p2,p3,...pn} ----> Join {selection_pred : p3,...pn}
where p1 and p2 are equi join predicates that are known to be true due to a
foreign_key-unique_key relationship
2) If the children of the join are marked for removal
parent
| parent
Join |
/ \ ------> X
X Y
where the node Y has been marked for elimination by the synthLogPhase. Note that
instead of node Y, node X may also be marked for elimination and a similar
transformation is performed in that case too.
3) If its a left join and has been markedForElimination by the normalize phase
then
parent
| parent
LeftJoin |
/ \ ------> X
X Y
Note that in this case, it is only possible to eliminate the right child. */
RelExpr* Join::eliminateRedundantJoin(NormWA &normWARef)
{
if (getOperatorType() == REL_JOIN)
{
RelExpr *result = NULL;
GroupAttributes *ga = NULL;
selectionPred() -= getPredicatesToBeRemoved();
equiJoinPredicates_ -= getPredicatesToBeRemoved();
clearPredicatesToBeRemoved();
if ((child(1).getPtr())->markedForElimination())
{
result = child(0);
ga = child(1)->getGroupAttr();
}
else if ((child(0).getPtr())->markedForElimination())
{
result = child(1);
ga = child(0)->getGroupAttr();
}
if (result)
{
CMPASSERT(selectionPred().isEmpty() && joinPred().isEmpty());
NABoolean found = FALSE;
TableDesc *tabDesc = NULL;
const ValueIdSet &constraints = ga->getConstraints();
for (ValueId id = constraints.init();
constraints.next(id) && NOT found;
constraints.advance(id) )
{
if (id.getItemExpr()->getOperatorType() == ITM_COMP_REF_OPT_CONSTRAINT)
{
ComplementaryRefOptConstraint * compRIConstraint =
(ComplementaryRefOptConstraint *) id.getItemExpr();
if (compRIConstraint->getIsMatchedForElimination())
{
tabDesc = compRIConstraint->getTableDesc();
found = TRUE;
}
}
}
CMPASSERT(found);
const ValueIdList &allCols = tabDesc->getColumnList();
for (CollIndex i = 0; i < allCols.entries(); i++)
{
ItemExpr *ie = allCols[i].getItemExpr();
CMPASSERT(ie->getOperatorType() == ITM_BASECOLUMN)
const ValueIdSet &eic = ((BaseColumn *)ie)->getEIC();
normWARef.deleteVEGMember(((BaseColumn *)ie)->getValueId());
for (ValueId eqVid = eic.init(); eic.next(eqVid); eic.advance(eqVid))
normWARef.deleteVEGMember(eqVid);
}
return result;
}
}
else if (markedForElimination() && (getOperatorType() == REL_LEFT_JOIN))
{
TableDescList tableDescs(CmpCommon::statementHeap());
child(1)->getAllTableDescs(tableDescs);
CMPASSERT(tableDescs.entries() != 0);
normWARef.locateAndSetVEGRegion(this);
for (CollIndex j = 0; j < tableDescs.entries(); j++)
{
const ValueIdList &allCols = tableDescs[j]->getColumnList();
for (CollIndex i = 0; i < allCols.entries(); i++)
{
ItemExpr *ie = allCols[i].getItemExpr();
CMPASSERT(ie->getOperatorType() == ITM_BASECOLUMN)
const ValueIdSet &eic = ((BaseColumn *)ie)->getEIC();
normWARef.deleteVEGMember(((BaseColumn *)ie)->getValueId());
for (ValueId eqVid = eic.init(); eic.next(eqVid); eic.advance(eqVid))
normWARef.deleteVEGMember(eqVid);
}
}
normWARef.restoreOriginalVEGRegion();
return child(0) ; // outer joins
}
return this;
} // Join::eliminateRedundantJoin()
void RelExpr::getAllTableDescs(TableDescList &tableDescs)
{
Int32 arity = getArity();
if (arity == 0)
{
switch (getOperatorType())
{
case REL_SCAN:
tableDescs.insert(((Scan *)this)->getTableDesc());
break;
case REL_STORED_PROC:
tableDescs.insert(((TableValuedFunction *)this)->getTableDesc());
break;
default:
break;
}
}
else
{
for (Int32 i = 0; i < arity; i++)
{
child(i)->getAllTableDescs(tableDescs);
}
}
}
/*-------------------------------------------------------------------------
Join::transformSemiJoin()
---------------------------------------------------------------------------
This method transforms a semi join to an inner join.
a) In the simplest case, which is enabled by default the right child is
unique in the joining column and the semi join can be simply translated
into a join. An example query is
select t1.a
from t1
where t1.b in (select t2.a
from t2) ;
Here t2.a is a unique key of table t2.
The following transformation is made
Semi Join {pred : t1.b = t2.a} Join {pred : t1.b = t2.a}
/ \ -------> / \
/ \ / \
Scan t1 Scan t2 Scan t1 Scan t2
b) If the right child is not unique in the joining column then
we transform the semijoin into an inner join followed by a groupby
as the join's right child. This transformation is enabled by default
only if the right side is an IN list, otherwise a CQD has to be used.
select t1.a
from t1
where t1.b in (1,2,3,4,...,101) ;
Semi Join {pred : t1.b = t2.a} Join {pred : t1.b = InList.col}
/ \ -------> / \
/ \ / \
Scan t1 Scan t2 Scan t1 GroupBy {group cols: InList.col}
|
|
TupleList
*/
RelExpr* Join::transformSemiJoin(NormWA& normWARef)
{
// SQO is called in a loop sometimes.
// We do not wish to apply this transformation more than once.
setCandidateForSemiJoinTransform(FALSE);
// If there are no equijoins or if there is some correlation,
// this transformation cannot be applied.
if ((getOperatorType() == REL_SEMITSJ) ||
getEquiJoinPredicates().isEmpty())
{
return this ;
}
// apply the transformation described in item a) above
ValueIdSet equiJoinCols1 = getEquiJoinExprFromChild1();
if ((NOT equiJoinCols1.isEmpty()) &&
child(1)->getGroupAttr()->isUnique(equiJoinCols1))
{
RelExpr * linearizedExpr = this ;
// in this case no additional groupBy is necessary,
// simply changing semijoin --> join
// will suffice.
setOperatorType(REL_JOIN) ;
// move prds from joinPred to selection pred.
selectionPred() += joinPred();
joinPred().clear() ;
linearizedExpr = leftLinearizeJoinTree(normWARef,
SEMI_JOIN_TO_INNER_JOIN);
return linearizedExpr ;
}
/* Apply the transformation described in item b) above.
The transformation below is done if there are no non-equijoin preds either
and the inner side has no base tables (i.e. is an IN LIST) or if we have
used a CQD to turn this transformation on for a specific user. For the general
case we are not certain if this transformation is always beneficial, so it is
not on by default */
ValueIdSet preds ;
preds += joinPred();
preds += selectionPred();
preds -= getEquiJoinPredicates() ;
if (preds.isEmpty() &&
((child(1)->getGroupAttr()->getNumBaseTables() == 0) ||
(CmpCommon::getDefault(SEMIJOIN_TO_INNERJOIN_TRANSFORMATION) == DF_ON)))
{
CollHeap *stmtHeap = CmpCommon::statementHeap() ;
setOperatorType(REL_JOIN) ;
// we need a group by below the transformed join
GroupByAgg *newGrby = new (stmtHeap) GroupByAgg(
child(1)->castToRelExpr()) ;
newGrby->setGroupAttr(new (stmtHeap)
GroupAttributes(*(child(1)->getGroupAttr())));
newGrby->getGroupAttr()->clearLogProperties();
newGrby->setGroupExpr(equiJoinCols1);
child(1) = newGrby ;
newGrby->synthLogProp(&normWARef);
// move preds from joinPred to selection pred.
selectionPred() += joinPred();
joinPred().clear() ;
//synthesize logical props for the new nodes.
return this ;
}
return this ; // semijoin has non-equijoin predicates or this
// transformation is OFF
} // Join::transformSemiJoin()
// -----------------------------------------------------------------------
// copyNode()
// This method creates a copy of the original RelExpr.
// Sideffects: no change to the old Node.
// newNode will have new a new groupAttrib structure allocated
// and initialized with the information from the old one.
// Similarly the newNode will initialize its RETDesc to the
// same as the oldNode.
// -----------------------------------------------------------------------
static RelExpr * copyNode(RelExpr* oldNode, CollHeap* heap)
{
RelExpr* newNode = oldNode->copyTopNode(NULL, heap);
newNode->setGroupAttr(new (heap)
GroupAttributes(*(oldNode->getGroupAttr())));
newNode->setRETDesc(oldNode->getRETDesc());
newNode->getGroupAttr()->setLogExprForSynthesis(newNode);
return newNode;
} // copyNode()
// -----------------------------------------------------------------------
// copyNodeAndSetChildren()
// This method creates a copy of the original RelExpr and also initializes
// the copy's children to be identical to that of the original.
// Sideffects: no change to the old Node.
// see sideffects of copyNode.
// newNode will have its children initialized to the same
// as that of the original node.
// -----------------------------------------------------------------------
static RelExpr * copyNodeAndSetChildren(RelExpr* oldNode, CollHeap* heap)
{
RelExpr* newNode = copyNode(oldNode,heap);
for(Int32 i = 0; i < oldNode->getArity(); i++)
{
newNode->child(i) = oldNode->child(i) ;
}
return newNode;
} // copyNodeAndSetChildren()
// -----------------------------------------------------------------------
// Join::pullUpPredsWithAggrs()
//
// For certain PullUpGroupBy and all MoveUpGroupby transformations
// a GroupBy node moves over a Join node (i.e. the GroupBy which
// used to be a child of the Join, now becomes its parent). For
// such a tree transformation to work, any predicate in the Join
// that references aggregates in the GroupBy must now be moved into
// the GroupBy node. This method performs this task.
//
//
// Sideffects: Will move join selection predicates that contains
// aggregates into the grbyNode.
//
// Returns: TRUE: if we can move the aggregates from the join's selection
// predicate to the groupBy's selection predicate.
// FALSE: If the join contains join predicates and the join
// predicates contains aggregates expressions from the groupBy.
// -----------------------------------------------------------------------
NABoolean Join::pullUpPredsWithAggrs(GroupByAgg* grbyNode, MapValueIds * mapNode)
{
// We need to check Left Joins too, but we cannot pull any predicates
// up from the join preds, so if we find aggregates in the join preds
// indicate a failure so we do not put the groupBy on top of the join.
if (NOT joinPred().isEmpty())
{
ValueIdSet predicatesThatNeedsToBePulled;
if (joinPred().getReferencedPredicates
(grbyNode->aggregateExpr(), predicatesThatNeedsToBePulled))
{
// Skip such this subquery .
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Join has aggregates in its predicates.");
}
return FALSE;
}
}
if (NOT selectionPred().isEmpty())
{
if (mapNode == NULL)
{
ValueIdSet predicatesToPullUp;
if (selectionPred().getReferencedPredicates
(grbyNode->aggregateExpr(), predicatesToPullUp))
{
selectionPred() -= predicatesToPullUp ;
grbyNode->selectionPred() += predicatesToPullUp ;
}
}
else
{
ValueIdMap *copyOfMap = new (CmpCommon::statementHeap())
ValueIdMap(mapNode->getMap());
for (ValueId vid = selectionPred().init();
selectionPred().next(vid);
selectionPred().advance(vid))
{
ValueId bottomMapId;
copyOfMap->rewriteValueIdDown(vid, bottomMapId);
// Only if our outputs will actually be different, do we want to
// create a map.
if ( vid != bottomMapId )
{
ValueId ignoreVid;
ValueIdSet mapPullUpPred( bottomMapId);
ValueIdSet mapPredicatesToPullUp;
if (mapPullUpPred.getReferencedPredicates
(grbyNode->aggregateExpr(), mapPredicatesToPullUp))
{
selectionPred() -= vid;
grbyNode->selectionPred() += mapPredicatesToPullUp ;
if (getGroupAttr()->getCharacteristicOutputs().
referencesTheGivenValue(vid, ignoreVid, FALSE,FALSE))
{
// Need to add a Map Entry in the MapNode
mapNode->addMapEntry(vid, bottomMapId);
}
}
}
}
}
}
return TRUE;
} // Join::pullUpPredsWithAggrs()
// -----------------------------------------------------------------------
// GroupByAgg::computeGroupExpr()
//
// The group expression for a pulledUp or movedUp GroupBy node
// is computed from a seed ValueIdSet and a superSet ValueIdSet.
// The seed valueId set is that starting set of values that need
// to be in the groupExpr. For the pullUpGroupBy transformation this
// is the set of uniqueCols from the left child. The superSet is then
// used to add more values to the groupExpr. Items in the output or
// having clause that are referenced by items in the superset are also
// added to the groupExpr.
//
// Sideffects: Changes the groupBy's group expression
// -----------------------------------------------------------------------
void GroupByAgg::computeGroupExpr(const ValueIdSet& seed,
ValueIdSet& superSet,
NormWA& normWARef)
{
ValueIdSet duplicates;
ValueIdSet reqGrpValues = seed ;
reqGrpValues += leftUniqueExpr() ;
reqGrpValues.accumulateReferencedValues(
superSet, selectionPred());
reqGrpValues.accumulateReferencedValues(
superSet, getGroupAttr()->getCharacteristicOutputs());
// Need to make sure we retain original groupExpr() for
// cases where we have a semijoin->GroupBy->Filter
// In this case the group Expression will not be empty
// initially like it is for ScalarAggs, and so we have to
// make sure we keep it. However due to moveUpGroupByTransformation
// we have to make sure we remove duplicates..
duplicates = reqGrpValues.intersect(groupExpr());
if (duplicates.isEmpty())
reqGrpValues += groupExpr();
else
reqGrpValues -= duplicates;
addGroupExpr(reqGrpValues);
groupExpr().normalizeNode(normWARef) ;
} // GroupByAgg::computeGroupExpr()
/*-----------------------------------------------------------------------
Join::pullUpGroupByTransformation()
// The PullUpGroupBy transformation is one of the two main transformations
// aplied while unnesting a subquery. For a single level subquery this is
// the only transformation required for subquery unnesting.
// X and Y denote arbitatry RelExprs.
// The TSJ has to be one introduced while flattening out a subquery
// in the Transform phase. Under some circumstance the TSJ can be transformed
// into a Join by the time it gets to this method. The Filter node is
// introduced during Normalization to prevent pushdown of predicates with
// outerReferences
//
// TSJ GroupBy {pred3}(grouping cols:
// / \ | cluster_key of X (leftUniqueCols)+
// / \ | other necessary columns of X)
// X ScalarAgg {pred3} --> Join {pred2}
// | / \
// | / \
// Filter {pred2} X Y {pred1}
// |
// |
// Y {pred1}
//
//The same tree as above but in terms of the local variables used in the code below
//
// this newGrby {pred3}(grouping cols:
// / \ | cluster_key of newLeftChild (leftUniqueCols)+
// / \ | other necessary columns of newLeftChild)
//oldLeft oldGB {pred3}
//Child --> newJoin {pred2}
// | / \
// | / \
// Filter {pred2} newLeft newRight {pred1}
// | Child Child
// |
// oldGBGrandChild {pred1}
//
//
// Expects: RelExpr tree as seen above to the left.
// Sideffects: if successful, returns a new groupBy with the
// a copy of join as the child. The original tree has not changed.
// The predicates in the new groupBy and the new Join will have
// changed according to the comments above.
//
// If there is an explicit groupby in the subquery the transformation above is extended as
// TSJ GroupBy {pred3,agg2(agg1)}(grouping cols:
// / \ | cluster_key of X (leftUniqueCols)+
// / \ | other necessary columns of X)
// X ScalarAgg {pred3} --> SubQ_GroupBy {agg1} (grouping cols: g1 +
// | {agg2(agg1)} | cluster_key of X (leftUniqueCols) +
// | | other necessary columns of X)
// SubQ_GroupBy {agg1} newJoin {pred2}
// | {grouping cols: g1} / \
// | / \
// Filter {pred2} newLeft newRight {pred1}
// | Child Child
// |
// Y {pred1}
//
// If there is an explicy groupby in the subquery then the flag nestedAggInSubQ is set.
------------------------------------------------------------------------------*/
GroupByAgg* Join::pullUpGroupByTransformation(NormWA& normWARef)
{
CollHeap *stmtHeap = CmpCommon::statementHeap() ;
RelExpr *oldGB = child(1)->castToRelExpr();
// note that typically child of oldGB is actually a Filter node, here
// oldGBgrandchild is the child of oldGB before the Filter was added.
RelExpr *oldGBgrandchild ;
NABoolean nestedAggInSubQ = FALSE;
if ((oldGB->child(0)->getOperatorType() == REL_GROUPBY) &&
(oldGB->child(0)->child(0)->getOperatorType() == REL_FILTER))
{
oldGBgrandchild = oldGB->child(0)->child(0)->child(0)->castToRelExpr();
nestedAggInSubQ = TRUE;
}
else if (oldGB->child(0)->getOperatorType() == REL_FILTER)
oldGBgrandchild = oldGB->child(0)->child(0)->castToRelExpr();
else
oldGBgrandchild = oldGB->child(0)->castToRelExpr();
RelExpr *filterParent = nestedAggInSubQ ?
oldGB->child(0)->castToRelExpr() : oldGB;
RelExpr *oldLeftChild = child(0)->castToRelExpr();
// Determine a set of unique columns for the left sub-tree.
// Note: Scans and joins synthesize uniqueness constraints even for
// columns that are not in the characteristic outputs. Other
// operators such as groupby or union don't. We make use of these
// extra uniqeness constraints here. Any needed columns not yet
// added to the characteristic outputs will be added later, in
// method getMoreOutputsIfPossible().
ValueIdSet leftUniqueCols ;
if (NOT (child(0)->getGroupAttr()->findUniqueCols(leftUniqueCols)))
{
// Could not find a set of unique cols.
// If the left sub-tree contains a UNION/TRANSPOSE/SEQUENCE or SAMPLE
// then we will fail to unnest the subquery for this reason.
filterParent->eliminateFilterChild();
// left child does not have a unique constraint
// cannot unnest this subquery
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Left child does not have a unique constraint");
// Things to consider (referring to the picture above): If the all of the
// following are true:
// * {pred2} has only equals/VEG predicates of the form X.col = Y.col
// * {aggr} does not have any outer references
// * {pred3} does not have any outer references
//
// then we could do an alternative transformation, not yet implemented:
//
// TSJ Join {pred2: X.a=Y.b, ...}
// / \ / \
// / \ / \
// X ScalarAgg {pred3} --> X grby {Y.b, ...} {pred3}
// | {aggr} \ {aggr}
// | \
// Filter {pred2: X.a=Y.b, ...} Y {pred1}
// |
// |
// Y {pred1}
//
// Pros: - The groupby is already at a place where it will likely
// end up in the optimal plan
// Cons: - We don't get a nice join backbone with all base tables
//
// Cases where we could attempt this transformation:
// - We fail to find a unique key for X (i.e. we reach here)
// - pred2 has a very high selectivity, making newJoin (in the picture
// at the top of this method) similar to a cartesian product
return NULL ;
}
// if subquery needs left joins some additional checks are done here to
// see if pull up groupby transformation can be done while preserving
// semantic correctness. No changes for left joins or preserving nulls
// is done in this method.
if (candidateForSubqueryLeftJoinConversion())
{
if (NOT selectionPred().isEmpty())
{
// Selection predicates in a Join that needs to be converted to a Left Join
// can be tricky, particularly if they contain aggregates.
// We skip such a subquery for now.
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Join with selectionPreds cannot be converted to LeftJoin.");
}
filterParent->eliminateFilterChild();
return NULL ;
}
}
// make copies of GroupBy, Join, Joins left and right children before
// making any changes. All changes will be made on the copied nodes.
// If for some reason unnesting cannot be completed, the original node
// is returned.
// copy the left child of Join
RelExpr * newLeftChild = copyNodeAndSetChildren(oldLeftChild, stmtHeap);
// copy the right child of Join
RelExpr * newRightChild = copyNodeAndSetChildren(oldGBgrandchild, stmtHeap);
// copy the Join
Join * newJoin = (Join *) copyNode(this, stmtHeap);
newJoin->getGroupAttr()->clearLogProperties(); //logical prop. must be resynthesized
// New GroupBy is a copy of the old Scalar Aggregate
GroupByAgg *newGrby = (GroupByAgg *) copyNode(oldGB, stmtHeap);
newGrby->setRETDesc(getRETDesc());
newGrby->getGroupAttr()->clearLogProperties(); //logical prop. must be resynthesized
GroupByAgg *newSubQGrby = NULL;
if (nestedAggInSubQ)
{
newSubQGrby = (GroupByAgg *) copyNode(oldGB->child(0)->castToRelExpr(),
stmtHeap);
newSubQGrby->getGroupAttr()->clearLogProperties();
}
// For multi-level subqueries it is possible that this Join is
// not a TSJ, but still contains outer references. This happens
// when right child does not need any values from the left child,
// but it does need values from a parent subquery. If the
// selection predicate (or join predicate)
// of this Join needs any aggregate outputs
// from the its old groupBy child, then those predicates need
// to move up to the new parent GroupBy node.
NABoolean safeToPullUpGrby;
safeToPullUpGrby = newJoin->pullUpPredsWithAggrs(newGrby);
if (NOT safeToPullUpGrby )
{
// The join contains aggregates
// Skip such this subquery .
filterParent->eliminateFilterChild();
return NULL ;
}
if (nestedAggInSubQ)
{
safeToPullUpGrby = newJoin->pullUpPredsWithAggrs(newSubQGrby);
if (NOT safeToPullUpGrby )
{
filterParent->eliminateFilterChild();
return NULL ;
}
// inputs of newSubQGroupBy are same as the old
// TSJ/Join that we are replacing. Outputs are join's + aggregates
newSubQGrby->getGroupAttr()->addCharacteristicOutputs
(getGroupAttr()->getCharacteristicOutputs());
newSubQGrby->getGroupAttr()->setCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
newSubQGrby->child(0) = newJoin ;
}
// inputs and outputs of new GroupBy are same as the old
// TSJ/Join that we are replacing
newGrby->getGroupAttr()->setCharacteristicOutputs
(getGroupAttr()->getCharacteristicOutputs());
newGrby->getGroupAttr()->setCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
if (nestedAggInSubQ)
newGrby->child(0) = newSubQGrby;
else
newGrby->child(0) = newJoin ;
// set the grouping cols for new GroupBy
// grouping cols for new GroupBy are
// unique cols of X +
// cols of X that are needed to evaluate its selection pred. +
// cols of X that part of the characteristic outputs
newGrby->setLeftUniqueExpr(leftUniqueCols);
ValueIdSet oldLeftChildOutputs
(oldLeftChild->getGroupAttr()->getCharacteristicOutputs());
newGrby->computeGroupExpr(
leftUniqueCols,
oldLeftChildOutputs,
normWARef
);
if (nestedAggInSubQ)
{
newSubQGrby->getGroupAttr()->
addCharacteristicOutputs(newGrby->groupExpr());
newSubQGrby->computeGroupExpr(newGrby->groupExpr(),
oldLeftChildOutputs,
normWARef);
}
// The newGrby cannot be a scalar groupby under any circumstance
// So if the group expression is empty, add a constant to the
// list of the grouping columns, so that this groupby is not scalar
// i.e. does not produce a NULL value for empty groups.
if (newGrby->groupExpr().isEmpty())
{
ItemExpr *tf = new (stmtHeap) ConstValue(0);
tf->synthTypeAndValueId(TRUE);
newGrby->groupExpr() += tf->getValueId();
}
// connect newJoin to newX and newY
newJoin->child(0) = newLeftChild ;
newJoin->child(1) = newRightChild ;
newJoin->setOperatorType(REL_JOIN) ;
// pull up predicates in filter to newJoin
// do not change the filter itself in case we
// decide to not unnest.
if (oldGB->child(0)->getOperatorType() == REL_FILTER)
newJoin->selectionPred() += oldGB->child(0)->castToRelExpr()->selectionPred();
else if (nestedAggInSubQ &&
oldGB->child(0)->child(0)->getOperatorType() == REL_FILTER)
newJoin->selectionPred() +=
oldGB->child(0)->child(0)->castToRelExpr()->selectionPred();
// If the new GroupBy contains any outer references (i.e. requiredInputs
// that are not provided by the user) then mark it as needing
// the MoveUpGroupBy transformation.
ValueIdSet outerReferences;
newGrby->getGroupAttr()->getCharacteristicInputs().
getOuterReferences(outerReferences);
if (NOT(outerReferences.isEmpty()))
{
if (!nestedAggInSubQ)
newGrby->setRequiresMoveUp(TRUE) ;
else
{
filterParent->eliminateFilterChild();
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: More than 1 level of nested subquery and nested aggregate are both present");
return NULL;
}
}
return newGrby ;
} // Join::pullUpGroupByTransformation()
/*-----------------------------------------------------------------------
GroupByAgg::nullPreservingTransformation()
// The Null preserving transformation is applied to the output of the
// PullUpGroupBy transformation, if the subquery has null preserving
// predicates. According to the the Dayal-Murali algorithm such subqueries
// require a Left Join instead of a Join. The effect of this transformation
// is shown below
//
//
// GroupBy {pred3} MapValueId {topMap:original outputs of GroupBy
// | | bottomMap:new NullInstantiated outputs of GroupBy}
// | |
// Join {SP:pred2} ----------> GroupBy {pred3, aggregateExpr and groupExpr
// / \ | expressed in terms of nullInstantiated output of LeftJoin}
// / \ |
// X Y {pred1} LeftJoin{JP:pred2}
// / \
// / \
// X Y{pred1}
//
// The MapValueId node shown here is present only if the GroupBy has outputs
// from the right side of the Join. The aggregateExpr in the transformed GroupBy
// has new aggregates if the original aggregate contains count or oneTrue
// aggregates.
//
// This method is split in two halfs. This one that does the LeftJoin
// conversion and error checking, and nullPreserveMyExprs() that
// does the aggregate rewriting and nullInstantiation.
//
// Expects: Child of groupBy to be a Join.
// Sideffects: If successfull will convert the join child into a LeftJoin
// with its output from the joins Right child nullInstantiated
// and the groupBy's aggregates rewritten in terms of the
// nullInstantiated outputs of the LeftJoin and in the case
// of the OneTrue aggregate, rewrittien as a count(0).
//
// Another sideffect is that the LeftJoin now will own the
// the VEGregion of the old groupBy. Thus we have now
// changed the original query tree. We remember this in the SqoWA
// (part of the NormWA) so that we can reassign the VEGregion back
// if we have to give up on unnesting this subquery further down
// the road.
------------------------------------------------------------------------------*/
RelExpr* GroupByAgg::nullPreservingTransformation(GroupByAgg* oldGB,
NormWA& normWARef)
{
GroupByAgg * newGrby = this;
Join * newJoin = (Join*) child(0)->castToRelExpr();
RelExpr * newRightChild = newJoin->child(1)->castToRelExpr();
// oldGBgrandchild is going to be the child of the filter from the
// original tree. We use that references because we know that its
// outputs are correct and consistent at this point.
RelExpr *oldGBgrandchild;
if (oldGB->child(0)->getOperatorType() == REL_FILTER)
oldGBgrandchild = oldGB->child(0)->child(0)->castToRelExpr();
else
oldGBgrandchild = oldGB->child(0)->castToRelExpr();
// two checks are performed below to see if this subquery can be unnested
// using left joins. If one of the following is true we do not unnest
// (a) Filter preds without reference to anything from the inner side
// - This typically only happens in multilevel queries
// where the filter predicates are correlated on the outer tables
// but do not refer to anything on the inner. What we have observed
// with these types of queries is that the predicate ends up being
// pulled up, then pushed down again, but when it gets pushed down,
// it will end up on the left hand side instead of the right - where
// it came from.
//
// (b) Aggregate Expr and outputs of newGrby contains oneTrue
// - we do not want to unnest these as we have no way of
// fixing up the oneTrue replaced by (count(1)>0) predicate
// upwards as the groupby can only output what is part of its
// aggregate or grouping expression. We could solve this by adding
// the expression to the group expression, but at the moment there
// isn't enought time to adequately test the semantic effects of
// such a change. This should be looked at for phase 3.
ValueIdSet emptySet, coveredSubs, newOutput;
const ValueIdSet& filterPreds = newJoin->selectionPred();
// If the filter preds do not reference anything from the inner side
// we do not unnest this subquery with left joins.
if ( (NOT filterPreds.isEmpty()) &&
filterPreds.referencesOneValueFromTheSet
(oldGBgrandchild->getGroupAttr()->getCharacteristicOutputs()) == FALSE )
{
// We are not referencing anything from the inner side...
// Predicate does not reference the Inner Table
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
{
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Join predicate has no references to inner table.");
}
oldGB->eliminateFilterChild();
return NULL ;
}
// Check to see if the newGrby contains a oneTrue
// which is used to represent an EXIST.
// if we find one we want to replace it with a
// count(1)>0 predicate , and count(1) will then replace the
// oneTrue as the aggregate.
// This is done so to get a nullinstantiated version of
// the constant, thus preserving the semantic of the
// query. The actual transformation of this happens in
// nullPreserveMyExprs(), but we need to do some checking before we get
// that far.
ValueId ignoreReturnedVid;
ValueId oneTrueVid;
// change newJoin into a left join and move preds into the join predicate
// we have already guaranteed that all selection preds in the newJoin are
// from the filter node and they do not contain any aggregates.
CMPASSERT((newJoin->getOperatorType() == REL_JOIN) ||
(newJoin->getOperatorType() == REL_TSJ));
newJoin->setOperatorType(REL_LEFT_JOIN) ;
newJoin->joinPred() = newJoin->selectionPred();
newJoin->selectionPred().clear();
// Want the left join to take over the VEG region from
// the old scalar-agg and use that to the
// right child region (subtreeId = 1)
VEGRegion* oldGBRegion = normWARef.locateVEGRegion(oldGB,0);
CMPASSERT (oldGBRegion != NULL);
normWARef.getSqoWA()->insertChangedRelExpr(oldGB, newJoin,
SQO_REASSIGNED_VREGION, oldGBRegion->getRegionId(), 0, 1);
normWARef.reassignVEGRegion(oldGB, 0, newJoin, 1);
return( nullPreserveMyExprs(normWARef));
} // GroupByAgg::nullPreservingTransformation()
/*-----------------------------------------------------------------------
GroupByAgg::nullPresereMyExprs()
// This method takes care of nullInstantiate any of the outputs from
// the right child of the Left join.
//
// It also rewrites the GroupBy's expressions in terms of the newly
// nullinstantiated values.
//
// It then creates a MapValueId on top of the groupBy if the groupBy outputs
// any of those values that got NullInstantiated. This faciliatates translating
// the NullInstantiated values back to their original form before we introduced
// the LeftJoin. The top part of the map has the original ValueIds in it, so
// that we do not need to rewrite the tree above us.
//
// A before and after picture is shown below:
//
//
// GroupBy {pred3} MapValueId {topMap:original outputs of GroupBy
// | | bottomMap:new NullInstantiated outputs of GroupBy}
// | |
// Join {SP:pred2} ----------> GroupBy {pred3, aggregateExpr and groupExpr
// / \ | expressed in terms of nullInstantiated output of LeftJoin}
// / \ |
// X Y {pred1} LeftJoin{JP:pred2}
// / \
// / \
// X Y{pred1}
//
// The MapValueId node shown here is present only if the GroupBy has outputs
// from the right side of the Join. The aggregateExpr in the transformed GroupBy
// has new aggregates if the original aggregate contains count or oneTrue
// aggregates.
//
// Note: It is assumed that this function is called after the GroupBy has moved
// on top of the LeftJoin!
//
// Expects: A LeftJoin as the groupBy's child
//
// Sideffects: 1) NullInstantiates outputs of the LeftJoin steming from the
// the LeftJoins right child.
//
// 2) count(*) and count(keyCol) has already been translated into
// a count(1). Add the 1 to the LeftJoins output and
// nullInstantiate it. For a count(col), col is already part
// of the LeftJoin's output. Then change the opType of the
// count() to be of type ITM_COUNT_NONULL to take care of the
// count bug. Since we now changed the the itemExpr that is
// common to the old relExpr tree, we need to remember this
// in the SqoWA(member of NormWA) so we can undo it if we
// need to give up.
// Note we can have several counts in here..
//
// 3) EXIST is translated earlier in the compiler to a ITM_ONE_TRUE.
// Replace the ONETRUE with a count(0) similarly to what we did
// in 2) above. Again we need to remember this change as we
// are changing an itemExpr common to the old tree.
// There will be only ONE special aggregate like ONE_TRUE.
//
// 4) The selection predicate, aggregate expression,
// grouping expression,leftUnique expressions and output
// expression are rewritten in terms of the nullInstantiated
// outputs from the LeftJoin.
//
// 5) if the rewritten outputs of the groupBy contains any
// nullInstantiated values from the LeftJoin, we need to
// insert a mapValueId node on top of the groupBy to translate
// between the old and the new groupBys. The tree above us
// expects the old ones.
------------------------------------------------------------------------------*/
RelExpr* GroupByAgg::nullPreserveMyExprs( NormWA& normWARef)
{
GroupByAgg * newGrby = this;
ValueId oneTrueVid;
ValueId anyTrueVid;
Join * newJoin = (Join*) child(0)->castToRelExpr();
RelExpr * joinRightChild = newJoin->child(1)->castToRelExpr();
CollHeap *stmtHeap = CmpCommon::statementHeap() ;
// For safety, in case someone calls this method out of context.
if (newJoin->getOperatorType() != REL_LEFT_JOIN)
return this;
// Get the outputs of the Left Joins right child
// we need to add a constant to it to get it nullinstantiated
// if we have a count or a oneTrue aggregate in the newGrby
ValueIdSet currentOutputs = joinRightChild->getGroupAttr()->getCharacteristicOutputs();
// Handle the count case
if (newGrby->aggregateExpr().containsCount())
{
// count(*) gets translated earlier in the compiler
// to count(1).
// If we have a count(*) situation, add the constant
// to the join's right child's output so that it can
// get nullInstantiated
// Doing so takes care of the infamous count() bug.
// We also need to make sure the count operator is of type
// ITM_COUNT_NONULL.
// count(col) works as is as long as we make sure that
// the count operator is of type ITM_COUNT_NONULL.
// In the case where col is non Nullable count(col) also
// gets translated into a count(1)
// Need to nullInstantiate any outPuts from the right side..
for ( ValueId vid = newGrby->aggregateExpr().init();
newGrby->aggregateExpr().next(vid);
newGrby->aggregateExpr().advance(vid))
{
if ((vid.getItemExpr()->origOpType() == ITM_COUNT_STAR__ORIGINALLY) ||
(vid.getItemExpr()->origOpType() == ITM_COUNT))
{
// Found a count(*) or a count(col)
// a count(*) is represented as a count(1)
// Make sure we add the constant as a fake output
// of the leftJoin so that it will be nullInstantiated.
// In the case of count(col), col is already an output from the
// leftJoin.
//
// Add the const used in count(*) expression to the joins
// output so it can be nullinstantiated.
if (vid.getItemExpr()->child(0)->getOperatorType() == ITM_CONSTANT)
{
currentOutputs += vid.getItemExpr()->child(0)->getValueId();
}
normWARef.getSqoWA()->insertChangedItemExpr(vid, SQO_NEWOPTYPE, vid.getItemExpr(),
vid.getItemExpr()->getOperatorType());
// unconditionally change the COUNT to COUNT_NONNULL
// This constant will be nullinstantiated below
vid.getItemExpr()->setOperatorType(ITM_COUNT_NONULL);
}
}
}
if ( aggregateExpr().containsOneTrue(oneTrueVid) )
{
ItemExpr *constVal = new (stmtHeap) SystemLiteral(1);
// replace the OneTrue aggreate in the newGrby selection
// predicate with the manufactured count(1) > 0 in
// the groupby selection predicate.
// Also replace the OneTrue aggregate with the
// count(1) aggregate in the newGrby's aggregate.
// create the new count(1) aggregate.
Aggregate * dummyAgg = new (stmtHeap)
Aggregate(ITM_COUNT_NONULL, constVal);
// Create the count(1)>0 predicate.
BiRelat *fakeCountPred = new (stmtHeap)
BiRelat(ITM_GREATER,
dummyAgg,
new (stmtHeap) SystemLiteral(0));
fakeCountPred->synthTypeAndValueId();
// Need to nullInstantiate any outPuts from the right side..
// Add the fake column to the output of the join's right child.
// By having the LeftJoin output the fake constant we can tell
// if the row gets nullInstantiated. If the fake constant comes
// back as NULL, we have a nullInstantiated row!
// This can only happen after we do the synthTypeAndValueId() above...
// if this line needs to move above, then you have to call
// synthTypeAndValueId on the constVal...
currentOutputs += constVal->getValueId();
// Retain the old itemExpr so that we can restore it
// if we bail from unnesting..
normWARef.getSqoWA()->insertChangedItemExpr(oneTrueVid, SQO_REPLACED,
oneTrueVid.getItemExpr());
// By using replaceItemExpr, we immediately
// fix up the newGrby's selection predicate if it
// contained the oneTrue.
oneTrueVid.replaceItemExpr(fakeCountPred);
// Fix up the aggregate.
newGrby->aggregateExpr() -= oneTrueVid;
newGrby->aggregateExpr() += dummyAgg->getValueId();
}
if (newGrby->aggregateExpr().containsAnyTrue(anyTrueVid))
{
// For the cases where the groupBy's selection predicate
// contains a AnyTrue(), we need to add in an additional
// check to also allow nullInstantiated rows to pass or to
// transform its result to be of equivalent value to that of
// the aggregate in its nested form.
// For example
//
// The following query:
// SELECT A FROM T1 WHERE B = ALL
// (SELECT T2.D FROM T2 WHERE T2.D = T1.B) OR EXISTS
// (SELECT T3.F FROM T3 WHERE T3.H > T1. B AND T3.H < T1.A)
// order by 1;
//
// Root Root
// | |
// Tsj -> Tsj
// / \ / \
// Tsj ScalAgg2 MapVid ScalAgg2
// / \ \ / \
// T1 ScalAgg T3 GroupBy T3
// \ |
// T2 LeftJoin
// / \
// T1 T2
//
// In this example, ScalAgg in the nested case produces the following
// AnyTrue() aggregate: AnyTrue(T2.D <> T1.B), which is an input to ScalAgg2
// If we have a row in T1, where T1.B is NULL, the nested ScalAgg will
// get a NO_DATA from the scan of T2 for that row, which means an empty
// group, in which case ANY_TRUE will evaluate to FALSE.
//
// In the unnested case, the same row from T1 will produce a NullInstantiated
// row when joined with T2 due to the leftJoin, thus the group passed up
// to the GroupBy (which for the unnested case also produces the same
// AnyTrue() aggregate), contains 1 row, and the AnyTrue() aggregate will
// evaluate to UNKNOWN due to the NULL value for T1.B and the NULL value
// for T2.D.
//
// To solve this problem for the unnested case, we add a fake constant
// to the output of the LeftJoin, and augment the anyTrue predicate to be
// AnyTrue(T2.D <> T1.B AND NOT IsNull(fakeConst)).
//
//
// We have a similar problem when the groupBy contains a Not AnyTrue()
// selection predicate as a result of a translation of an ALL expression to
// a NOT ANY.
// Create the fake constant
ItemExpr *constVal = new (stmtHeap) SystemLiteral(1);
constVal->synthTypeAndValueId();
// Need to add the fake constant to the group Expression
// since the expression we are ANDing in is not part of the
// aggregate expression .
newGrby->groupExpr() += constVal->getValueId();
// Create the IS NULL predicate
UnLogic *newIsNullPred = new (stmtHeap)
UnLogic(ITM_IS_NULL, constVal);
newIsNullPred->synthTypeAndValueId();
ItemExpr *anyTrueExpr = anyTrueVid.getItemExpr()->child(0);
// Create the Not IS NULL predicate
UnLogic *newNotPred = new (stmtHeap) UnLogic(ITM_NOT, newIsNullPred);
newNotPred->synthTypeAndValueId();
// AND it with the existing AnyTrue predicate..
BiLogic *newPred = new (stmtHeap) BiLogic(ITM_AND,
anyTrueExpr,
newNotPred);
newPred->synthTypeAndValueId(TRUE);
// Remember what we changed, so it can be restored if we need to back out..
normWARef.getSqoWA()->insertChangedItemExpr(
anyTrueVid,
SQO_NEWCHILD,
anyTrueExpr,
0);
// assign the new predicate to the AnyTrue node.
anyTrueVid.getItemExpr()->child(0) = newPred;
// Need to nullInstantiate any outputs from the right side..
// Add the fake column to the output of the left join.
// By having the LeftJoin output the fake constant we can tell
// if the row gets nullInstantiated. If the fake constant comes
// back as NULL, we have a nullInstantiated row!
// We facilitate this by adding the constant to the LeftJoins
// nullInstantiatedOutput list.
currentOutputs += constVal->getValueId();
}
// NullInstantiate the output from the newJoin's right child.
ValueIdList &nullOutputList = newJoin->nullInstantiatedOutput();
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context());
for (ValueId exprId = currentOutputs.init();
currentOutputs.next(exprId);
currentOutputs.advance(exprId))
{
ValueId nullId = exprId.nullInstantiate(&bindWA,TRUE);
nullOutputList.insert(nullId);
}
newJoin->normalizeNullInstantiatedOutput(normWARef);
ValueIdSet aggExprRewritten, selPredsRewritten;
ValueIdSet leftUniqueExprRewritten, grpExprRewritten;
const ValueIdSet &selPreds = newGrby->getSelectionPred();
const ValueIdSet &grpExpr = newGrby->groupExpr();
const ValueIdSet &aggExpr = newGrby->aggregateExpr();
const ValueIdSet &leftUniqueExpr = newGrby->leftUniqueExpr();
// Create a copy of the newJoins map so that our rewrites
// do not have unwanted sideffects where the join might
// put any of the newGrby aggregates in its output.
ValueIdMap *rightChildMap = new (stmtHeap)
ValueIdMap(newJoin->rightChildMapForLeftJoin());
// This is kind of counter intuitive.
// We the top part of the map contains the nullInstantiated
// predicates, which we want to use in the groupBy's
// aggregate expression, thus the use of rewriteValueIdSetUp,
// and the reversal of the arguments...
rightChildMap->rewriteValueIdSetUp(aggExprRewritten,aggExpr);
// Now we need to look for count(1) (same as count(*)
// and substitute that with the nullinstantiation of the
// const column.
newGrby->aggregateExpr() = aggExprRewritten;
// Remap the selection predicate as well.
rightChildMap->rewriteValueIdSetUp(selPredsRewritten,selPreds);
newGrby->setSelectionPredicates(selPredsRewritten);
// Remap the group expression as well.
rightChildMap->rewriteValueIdSetUp(grpExprRewritten,grpExpr);
newGrby->groupExpr() = grpExprRewritten;
// Remap the leftUnique expression as well. Needed for when we move
// above a Left Join
rightChildMap->rewriteValueIdSetUp(leftUniqueExprRewritten,leftUniqueExpr);
newGrby->leftUniqueExpr() = leftUniqueExprRewritten;
newGrby->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
// Create a MapValueID Node on top of the
// GroupBy that map between the old join
// output used above and the new GroupBy output
NABoolean mapNeeded = FALSE;
ValueIdSet rewrittenGbyOutputs;
ValueIdSet gbyOutputs = newGrby->getGroupAttr()->getCharacteristicOutputs();
ValueIdMap *map = new (stmtHeap) ValueIdMap;
// cannot use the rewriteValueIdSetUp routine for the outputs,
// as we need to construct a map for the outputs and the ValueIdSets
// used in rewriteValueIdSetUp() do not retain the order of the Vids
// using a ValueIdList doesn't help either as the order is reversed,
// and it seemed like a bad idea to rely on that to never change.
for (ValueId topMapId = gbyOutputs.init();
gbyOutputs.next(topMapId); gbyOutputs.advance(topMapId))
{
ValueId bottomMapId;
rightChildMap->rewriteValueIdUp(bottomMapId,topMapId);
// Only if our outputs will actually be different, do we want to
// create a map.
// One would think that it should be ok to add elemets to the map
// that have the same value in both the upper and lower part,
// but that ends up producing incorrect output.... so we only
// add elements that are
// different.
if ( topMapId != bottomMapId )
{
mapNeeded = TRUE;
}
// Add a new mapping entry for the MapValeIds node.
map->addMapEntry(topMapId, bottomMapId);
rewrittenGbyOutputs += bottomMapId;
}
newGrby->getGroupAttr()->setCharacteristicOutputs(rewrittenGbyOutputs);
// For phase 3 we need to remember that we created a map so
// If we are moving a GroupBy on top of a LeftJoin (that we already
// have converted), we don't want to create an additional map. This
// since the first map already maps any of the output from this left
// join.
if ( mapNeeded )
{
MapValueIds * newMap = newGrby->buildMapValueIdNode(map);
return newMap;
}
return newGrby;
} // GroupByAgg::nullPreserveMyExprs()
/*-----------------------------------------------------------------------------
// Join::moveUpGroupByTransformation()
// MoveUp GroupBy transformation. Relevant only for subqueries with
// two or more levels of nesting. For a two level subquery, at this stage
// the new tree looks like
// MovedUpGroupByTail(newJoin(X2,moveUpGroupBy(Y2))).
// If the selection pred. of moveUpGroupBy and/or Y2 contain outer references
// those predicates will have to be pulled up so that newJoin does not have
// to be a TSJ. The first step in this process is to apply the MoveUpGroupBy
// transformation which will change the new tree to
// MovedUpGroupByTail(moveUpGroupBy(newJoin(X2,Y2))).
// movedUpGrbyTail(newGrby) movedUpGrbyTail(newGrby)
// | |
// | |
// newJoin moveUpGroupBy
// / \ |
// / \ ------> |
// X2 moveUpGroupBy newJoin
// | / \
// | / \
// Y2 X2 Y2
// If Y2 is GroupBy that has been marked for moveUp (which can happen if we have
// more than 2 levels of nesting), then it the next iteration through the while
// loop below the old Y2 will become the new moveUpGrby and the old moveUpGroupBy
// will become the new movedUpGrpupByTail.
// If the query has N levels of nesting, we may have to move N-1 GroupBy
// nodes over the newly introduced Join.
// If subquery unnesting has introduced Left Joins and MapValueId nodes
// through the NullPreservingTransformation, then the moveUpGroupTransformation
// is slightly different from the figure shown above. If MapValueId nodes are
// present the transformation will be as shown below. Note that newJoin can be
// a regular Join or a LeftJoin. Note that In phase 2 we allow only atmost one
// LeftJoin to be introduced by subquery unnesting per query, thus there can
// be at most one MapValueId node introduced by subquery unnesting.
// The transformation shown below will occur at most once per query.
// In phase 3 this restriction will go away as we will then be able to unnest
// multiple subqueries that requires a Left Join.
// movedUpGrbyTail(topGrby) movedUpGrbyTail(topGrby)
// | |
// | |
// newJoin moveUpMap
// / \ |
// / \ ------> |
// X2 moveUpMap moveUpGroupBy
// | |
// | |
// moveUpGroupBy newJoin
// | / \
// | / \
// Y2 X2 Y2
//
//
// Expects: child(1) to be a GroupBy with the moveUp flag set, or a mapValueId
// with a GroupBy child that has the moveUp flag set.
//
// Sideffects: If successfull will return a pointer to a groupBy that
// is a copy of the groupBy marked for moveUp which now
// has this join as its child. This new groupby will also
// have its grouping expression altered, as well as its inputs
// and outputs. The groupBy's selection predicate will also
// contain any of the join's selection predicates that contained
// an aggregate from the original groupBy.
-------------------------------------------------------------------------------*/
GroupByAgg* Join::moveUpGroupByTransformation(GroupByAgg* topGrby,
NormWA & normWARef)
{
GroupByAgg *moveUpGrby;
ValueIdSet emptySet ;
GroupByAgg *movedUpGrbyTail = topGrby;
RelExpr * joinRightChild = child(1)->castToRelExpr();
CollHeap *stmtHeap = CmpCommon::statementHeap() ;
MapValueIds *moveUpMap = NULL;
while ((joinRightChild->getOperatorType() == REL_GROUPBY) &&
(((GroupByAgg*) joinRightChild)->requiresMoveUp()) ||
((joinRightChild->getOperatorType() == REL_MAP_VALUEIDS) &&
((( (RelExpr*) joinRightChild->child(0))->getOperatorType() == REL_GROUPBY) &&
(((GroupByAgg*) joinRightChild->child(0)->castToRelExpr())->requiresMoveUp()))))
{
if (isLeftJoin())
{
// We do not want to pull the groupBy above the left join
// as it will restrict tuples from the left side of the left
// join to flow up. This needs to be revisited for phase 3
// when we transform to a Left Leaning tree when we have
// LeftJoins.
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Can not move groupBy above LeftJoin.");
return NULL;
}
if (joinRightChild->getOperatorType() == REL_MAP_VALUEIDS)
{
moveUpMap = (MapValueIds*) copyNodeAndSetChildren(
joinRightChild, stmtHeap);
moveUpGrby = (GroupByAgg*) copyNodeAndSetChildren
(
joinRightChild->child(0)->castToRelExpr(),
stmtHeap
) ;
moveUpMap->child(0) = moveUpGrby;
moveUpMap->getGroupAttr()->clearLogProperties();
}
else
{
moveUpMap = NULL;
moveUpGrby = (GroupByAgg*) copyNodeAndSetChildren(
joinRightChild,stmtHeap) ;
}
moveUpGrby->getGroupAttr()->clearLogProperties();
joinRightChild = copyNodeAndSetChildren
(
moveUpGrby->child(0)->castToRelExpr(),
stmtHeap
);
//Join may have predicates that reference aggregates in moveUpGrby
// If so pull these preds into moveUpGrby
NABoolean safeToMoveGrby;
safeToMoveGrby = pullUpPredsWithAggrs(moveUpGrby, moveUpMap);
if (NOT safeToMoveGrby )
{
// The join contains aggregates, skip this subquery.
return NULL ;
}
child(1) = joinRightChild ;
moveUpGrby->child(0) = this ;
if (moveUpMap != NULL)
movedUpGrbyTail->child(0) = moveUpMap ;
else
movedUpGrbyTail->child(0) = moveUpGrby ;
// set up inputs and outputs of moveUpGroupBy taking into account
// its new location in the query tree.
moveUpGrby->addGroupExpr(movedUpGrbyTail->groupExpr());
moveUpGrby->getGroupAttr()->setCharacteristicInputs(emptySet);
moveUpGrby->primeGroupAttributes();
moveUpGrby->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
// Need to make sure we nullInstantiate anything that we need from
// the right hand side, as we may have just moved over a Left Join.
// This function is a no-op if our child is not a LeftJoin.
RelExpr * result = moveUpGrby->nullPreserveMyExprs(normWARef);
if (result == NULL)
return NULL;
if (moveUpMap)
{
moveUpMap->getGroupAttr()->setCharacteristicInputs(emptySet);
moveUpMap->primeGroupAttributes();
moveUpMap->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
moveUpMap->pushdownCoveredExpr(
moveUpMap->getGroupAttr()->getCharacteristicOutputs(),
moveUpMap->getGroupAttr()->getCharacteristicInputs(),
emptySet);
}
movedUpGrbyTail->pushdownCoveredExpr(
movedUpGrbyTail->getGroupAttr()->getCharacteristicOutputs(),
movedUpGrbyTail->getGroupAttr()->getCharacteristicInputs(),
movedUpGrbyTail->selectionPred()
);
// Sometimes the moveUpMap ends up being empty after being moved
// on top of a Join. Eliminate it if we don't need it, otherwise
// it will impede output flow.
if ( moveUpMap != NULL &&
moveUpMap->getGroupAttr()->getCharacteristicOutputs().isEmpty())
{
movedUpGrbyTail->child(0) = moveUpMap->child(0);
// set up inputs and outputs of moveUpGroupBy taking into account
// its new location in the query tree.
moveUpGrby->addGroupExpr(movedUpGrbyTail->groupExpr());
moveUpGrby->getGroupAttr()->setCharacteristicInputs(emptySet);
moveUpGrby->primeGroupAttributes();
moveUpGrby->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
// Repush
movedUpGrbyTail->pushdownCoveredExpr(
movedUpGrbyTail->getGroupAttr()->getCharacteristicOutputs(),
movedUpGrbyTail->getGroupAttr()->getCharacteristicInputs(),
movedUpGrbyTail->selectionPred()
);
}
// does moveUpGroupBy still have outer references? If NO then it need
// not "move over" any more Join nodes.
ValueIdSet outerReferences;
moveUpGrby->getGroupAttr()->getCharacteristicInputs().
getOuterReferences(outerReferences);
if (outerReferences.isEmpty())
moveUpGrby->setRequiresMoveUp(FALSE) ;
// moveUpGroupBy will have grouping cols of movedUpGroupByTail +
// left unique cols that were computed for it previously +
// cols needed to provide its outputs +
// cols needed to compute its selection pred.
// the superSet (second param) for this call is the current
// value for the groupExpr. In other words the aim of the call below
// is to see if the groupExpr for the moveUpGrby can be reduced
// from the setting that was done a few lines earlier.
moveUpGrby->computeGroupExpr(movedUpGrbyTail->groupExpr(),
moveUpGrby->groupExpr(), normWARef);
// movedUpGrbyTail is set to moveUpGroupBy in case there are more
// GroupBys that need to be moved up.
movedUpGrbyTail = moveUpGrby ;
}
// end of MoveUpGroupBy transformation
// note that if the specific pattern of GroupBy and Joins shown here
// is not present then this transformation will not be applied and
// right subtree of the new Join will contain outer references. This
// will cause unnesting this TSJ/Join to fail below, and we will revert
// to the orginal nested tree for this subquery level.
return movedUpGrbyTail;
} // Join::moveUpGroupByTransformation()
/*----------------------------------------------------------------------------
// GroupByAgg::subqueryUnnestFinalize()
// set up inputs/outputs of the new Join, its children
// and the newJoin's parent GroupBy.
// move selection predicates to the appropriate nodes.
// return FALSE if any outer references remain or if
// sufficient outputs cannot be produced to compute left side's
// unique columns
//
// Expects: Child(0) to be a Join or a subQ groupby.
// Sideffects: recomputed inputs and outputs of the join child's children
// recomputed inputs and outputs of the join.
// pushes any of the groupBy's predicates down that can go down.
// pushes any of the join's predicates down that can go down.
-------------------------------------------------------------------------------*/
NABoolean GroupByAgg::subqueryUnnestFinalize(ValueIdSet& newGrbyGroupExpr,
NormWA& normWARef)
{
Join * newJoin = NULL ;
if (child(0)->getOperatorType() == REL_GROUPBY)
newJoin = (Join*) child(0)->child(0)->castToRelExpr();
else
newJoin = (Join*) child(0)->castToRelExpr();
RelExpr * newLeftChild = newJoin->child(0)->castToRelExpr();
RelExpr * newRightChild = newJoin->child(1)->castToRelExpr();
newLeftChild->primeGroupAttributes();
newRightChild->primeGroupAttributes();
newLeftChild->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
newRightChild->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
ValueIdSet nonLocalPreds,valuesReqdByParent,availableInputs,outerReferences;
// availableInputs is the requiredInputs of the newJoin minus
// any outer references. These outer references are not really
// available as our intention is to apply this transformation
// at succcesive levels and unnest all subqueries.
availableInputs = newJoin->getGroupAttr()->getCharacteristicInputs();
availableInputs += newRightChild->getGroupAttr()->getCharacteristicInputs();
availableInputs.getOuterReferences(outerReferences);
availableInputs -= outerReferences ;
for (Int32 i = 0; i < 2; i++) {
// --------------------------------------------------------------------
// Check to see if we have any Outer References in our child's selection
// predicate
// If we do we want to pull it up .
// ---------------------------------------------------------------------
if (newJoin->child(i)->selectionPred().getReferencedPredicates
(outerReferences, nonLocalPreds))
{
if ((i == 1)&&newJoin->isLeftJoin())
newJoin->joinPred() += nonLocalPreds ;
else
newJoin->selectionPred() += nonLocalPreds ;
newJoin->child(i)->selectionPred() -= nonLocalPreds ;
newJoin->child(i)->recomputeOuterReferences();
nonLocalPreds.clear();
}
}
//computing Join's inputs/outputs
newJoin->primeGroupAttributes();
newJoin->getGroupAttr()->normalizeInputsAndOutputs(normWARef);
//push down any of the groupBy's predicates that we can.
pushdownCoveredExpr( getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred() );
// Rules for pushdown from Join during this transformation are different
// in two ways from the usual.
// 1) If left child does not cover any part of a
// VEGPred it will still be retained in the Join, so that it can be pulled
// further up the query tree as we apply this transformation at other levels
// In the usual rules, the VEGPred will be pushed down to the right child
// without being retained at the Join
ValueIdSet emptySet;
valuesReqdByParent = newJoin->getGroupAttr()->getCharacteristicOutputs() ;
newJoin->pushdownCoveredExprSQO(valuesReqdByParent,
availableInputs,
newJoin->selectionPred(),
emptySet,
TRUE, // keepPredsNotCoveredByChild0
TRUE); // keepPredsNotCoveredByChild1
// check if right child still contains outer references. If so
// this subquery level cannoy be unnested. Give up and return the
// old TSJ. Note that other subquery levels may still be
// successfully unnested.
outerReferences.clear();
newRightChild->getGroupAttr()->getCharacteristicInputs().
getOuterReferences(outerReferences);
if (NOT(outerReferences.isEmpty()))
{
// right child still has outer references
// cannot unnest this subquery
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Right child has outer references that cannot be removed by current unnesting.");
return FALSE ;
}
// Is the Join producing all the outputs needed for the new goruping
// columns of the GroupBy? If not make a tree walk down the left subtree,
// increase outputs as needed at various child levels so that this Join
// can produce the needed values. If we fail unnesting is not possible
// at this level.
ValueIdSet additionalOutputsNeeded = newGrbyGroupExpr;
additionalOutputsNeeded -= newJoin->getGroupAttr()->getCharacteristicOutputs();
ValueIdSet savedOutputsNeeded = additionalOutputsNeeded ;
if (newJoin->getMoreOutputsIfPossible(additionalOutputsNeeded))
{
newJoin->getGroupAttr()->addCharacteristicOutputs(savedOutputsNeeded);
}
else
{
// left sub-tree cannot produce additional columns required to group
// by the left unique cols. Cannot unnest this subquery.
// Can occur if left-subtree contains UNION, TRANSPOSE, SEQUENCE or SAMPLE
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Left subtree cannot produce output values required for grouping.");
return FALSE ;
}
return TRUE;
} // GroupByAgg::subqueryUnnestFinalize()
/*----------------------------------------------------------------------------
// Join::applyInnerKeyedAccessHeuristic()
//
// Checks to see if the join predicate is on a key column of the inner table
// and the key column is the leading key column.
//
// Expects: a child chain like this:
// GroupBy->Filter->Scan
//
// Sideffects: Doesn't change anything.
-------------------------------------------------------------------------------*/
NABoolean Join::applyInnerKeyedAccessHeuristic(const GroupByAgg* newGrby,
NormWA & normWARef)
{
RelExpr *oldGBgrandchild;
// note that the child of oldGB is actually a Filter node, here
// oldGBgrandchild is the child of oldGB before the Filter was added.
if (child(1)->child(0)->getOperatorType() == REL_FILTER)
oldGBgrandchild = child(1)->child(0)->child(0)->castToRelExpr();
else
oldGBgrandchild = child(1)->child(0)->castToRelExpr();
// Apply inner table keyed scan heuristic. This heuristic turns off subquery
// unnesting for this tsj if the join predicate is on a key column of the inner table.
// The heuristic applies only if the key column is the leading key column
// of a base table or an index. No consideration is made for the selectivity
// of the index. This heuristic applies only if
// 1. comp_bool_168 is OFF
// 2. Inner side of tsj is a scan (not another subquery)
// 3. There is only one level of nesting or this is tree subquery
// 4. The number of tables below this join is LEQ COMP_INT_46 (default value is 10)
// If there are multiple levels of nesting the benefit of this heuristic is
// doubtful as unnesting the lowest level will allow higher levels to be unnested.
if((CmpCommon::getDefault(COMP_BOOL_168) == DF_OFF) &&
(oldGBgrandchild->getOperatorType() == REL_SCAN) &&
((normWARef.getCorrelatedSubqCount() == 1) ||
(NOT (((GroupByAgg*)newGrby)->requiresMoveUp()))) &&
(getGroupAttr()->getNumJoinedTables() <=
ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_46)))
{
RelExpr *oldGB = child(1)->castToRelExpr();
const TableDesc * tableDesc =
((Scan *)oldGBgrandchild)->getTableDesc();
const LIST(IndexDesc *) & ixlist = tableDesc->getIndexes();
ValueIdSet preds = oldGB->child(0)->castToRelExpr()->selectionPred();
ValueIdSet leadingKeyCols ;
for (CollIndex j = 0; j < ixlist.entries(); j++)
{
// get only the leading key column from every access path.
ValueId vid = ixlist[j]->getOrderOfKeyValues()[0];
ItemExpr *colIE = vid.getItemExpr();
if (colIE->getOperatorType() == ITM_VEG_REFERENCE)
{
// get the valueid of the VEG
leadingKeyCols +=
((VEGReference *)colIE)->getVEG()->getValueId();
}
}
for (ValueId x = preds.init();
preds.next(x);
preds.advance(x))
{
ItemExpr *ie = x.getItemExpr();
if (ie->getOperatorType() == ITM_VEG_PREDICATE)
{
ValueId id = ((VEGPredicate *)ie)->getVEG()->getValueId();
if (leadingKeyCols.contains(id))
{
child(1)->eliminateFilterChild();
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: Join predicate is on a leading key column of inner table.");
return TRUE ;
}
}
}
}
return FALSE;
} // Join::applyInnerKeyedAccessHeuristic()
// -----------------------------------------------------------------------
// Join::semanticQueryOptimizeNode(). This method facilitate the entry
// point for semantic Query Optimization. It will attempt the following types
// of optimization:
//
// a) join elimination
// b) transform semi joins to inner joins
// c) subqueries unnesting
//
// For the time being, only one of these transformations happens on a
// single query. Phase 3 may look at allowing multiple transformation
// on the same query.
//
// The prework for checking if a particular transformation is possible
// occured in the transformer, where we set flags to indicate what kind of
// transformation a particular query is a candidate for. The other thing that
// may happen in the transformer, if we decide a query is a candidate for SQO,
// is that we may create a filter node to hold predicates with outer references.
// One of the main functions of the filter is to prevent pushdown of predicates
// with outer references.
//
// JOIN ELIMINATION:
// For join elimination we apply the following rules:
// 1) If predicates have been marked for removal
// Join {selection_pred: p1,p2,p3,...pn} --> Join {selection_pred: p3,...pn}
// where p1 and p2 are equi join predicates that are known to be true due
// to a foreign_key-unique_key relationship
//
// 2) If the children of the join are marked for removal
//
// parent
// | parent
// Join |
// / \ ------> X
// X Y
//
// where the node Y has been marked for elimination by the synthLogPhase.
//
// 3) If its a left join and has been markedForElimination by the normalize
// phase then
//
// parent
// | parent
// LeftJoin |
// / \ ------> X
// X Y
//
// SEMI-JOIN TRANSFORMATION
//
// a) If the right child is unique in the joining column and the semi join
// can be simply translated into a join.
//
// An example query is
//
// select t1.a
// from t1
// where t1.b in (select t2.a
// from t2) ;
// Here t2.a is a unique key of table t2.
//
// The following transformation is made
// Semi Join {pred : t1.b = t2.a} ------> Join {pred : t1.b = t2.a}
//
// b) If the right child is not unique in the joining column then
// we transform the semijoin into an inner join followed by a groupby
// as the join's right child. This transformation is enabled by default
// only if the right side is an IN list, otherwise a CQD has to be used.
//
// groupby (X.key)
// SemiJoin |
// / \ ------> Join
// X Y / \
// X Y
//
// SUBQUERY UNNESTING
// The subquery unnesting consist of two main transformations:
// pullUpGroupBy and moveUpGroupBy transformation
// which are based on Dayal and Muralikrishna's algorithm (see below).
//
// a) pullUpGroupBy transformation:
//
// For a single level subquery this is the only transformation required for
// subquery unnesting.
//
// TSJ GroupBy
// / \ |
// X ScalarAgg --> Join (pred)
// | / \
// Filter (pred) X Y
// |
// Y
//
// For a multilevel query this may happen several times.
// Under certain circumstances, in a multilevel subquery, we may also need
// to apply the moveupGroupBy transformation.
//
// b) moveUpGroupBy transformation:
//
// When the pullUpGroupBy transformation has to be applied more than once
// on a query tree (for multi-level subqueries), then it is possible that
// that a groupBy below still contains outer references. For example with
// a two level query, this is what the tree will look like after applying
// the pullUpGroupBy transformation twice:
//
// TSJ2 GroupBy2
// / \ pullUpGroupBy |
// X ScalarAgg2 transformation Join2
// | (2 times) / \
// Filter2 ----------> X GroupBy1
// | \
// TSJ1 Join1
// / \ / \
// Y ScalarAgg1 Y Z
// \
// Filter1
// \
// Z
//
// If the selection pred. of GroubBy1 and/or Join1 contain outer
// references after the transformation, those predicates will have to
// be pulled up so that Join2 does not have to be a TSJ. See the comment
// in Join::moveUpGroupByTransformation() for how the right side
// of the picture above gets transformed further.
//
// One additional complication occurs when we need to convert any of the
// TSJs into a LeftJoin. This conversion occurs during either or both
// the pullUpGroupBy or moveUpGroupBy transformation. If we require a LeftJoin
// we manipulate the predicates and null-instantiated outputs of the LeftJoin
// that is from the right subtree in order to preserve correctness. Refer
// to the infamous count bug!
// For more details, please refer to
// M. Muralikrishna, "Improved Unnesting Algorithms for Join Aggregate SQL Queries",
// Proc. VLDB Conf., pp. 91-102 (1992)
// -----------------------------------------------------------------------
RelExpr * Join::semanticQueryOptimizeNode(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// SemanticQueryOptimize each child.
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// SemanticQueryOptimize the left and right subtrees. Store pointers to
// the roots of the subtrees after SQO.
// ---------------------------------------------------------------------
if (isFullOuterJoin())
normWARef.locateAndSetVEGRegion(this, 0 /* first child */);
child(0) = child(0)->semanticQueryOptimizeNode(normWARef);
if(isFullOuterJoin())
normWARef.restoreOriginalVEGRegion();
if (ownsVEGRegions())
{
// -------------------------------------------------------------
// Locate and set the VEGRegion for the right subtree.
// -------------------------------------------------------------
if (isFullOuterJoin())
normWARef.locateAndSetVEGRegion(this, 1 /* second child */);
else
normWARef.locateAndSetVEGRegion(this);
child(1) = child(1)->semanticQueryOptimizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
}
else
{
child(1) = child(1)->semanticQueryOptimizeNode(normWARef);
}
// In the bottom up phase of the SQO tree walk
// check if there are
// a) joins to be eliminated or
// b) semi joins to transform to inner joins or
// c) any subqueries to unnest
// a) Join Elimination
/*---------------------------------------------------------------------------------------*/
RelExpr* reducedExpr = eliminateRedundantJoin(normWARef);
if (reducedExpr != this)
return reducedExpr;
// b) SemiJoin Transformation
/*---------------------------------------------------------------------------------------*/
if (candidateForSemiJoinTransform()) // we have a semi join that could be transformed to
// an inner join + group by.
{
reducedExpr = transformSemiJoin(normWARef);
if (reducedExpr != this)
return reducedExpr;
}
// c) Subquery Unnesting
/*---------------------------------------------------------------------------------------*/
if (candidateForSubqueryUnnest())
{
// SQO phase is called in a loop for join elimination
// we do not want to attempt unnesting on the same node twice.
setCandidateForSubqueryUnnest(FALSE);
// Outer references are kept in a filter node, if there are no outer
// references then unnesting is not needed.
// For subqueries that are not correlated the Filter node will be absent
// as the method createAFilterParentIfNecessary() would not have created
// a Filter node at this point in the query tree. Therefore non-correlated
// subqueries will not enter the loop.
// If comp_bool_221 is on we will unnest even if there is no filter node.
if ((CmpCommon::getDefault(COMP_BOOL_221) == DF_OFF) &&
((child(1)->getArity() != 1) ||
!(child(1)->castToRelExpr()->hasFilterChild())))
{
if (CmpCommon::getDefault(SUBQUERY_UNNESTING) == DF_DEBUG)
*CmpCommon::diags() << DgSqlCode(2997)
<< DgString1("Subquery was not unnested. Reason: No Correlation found");
return this ; // do nothing, no correlated subquery
}
// Increment the subquery id counter in SqoWA so that we can
// destinguish between things we change for this subquery over another..
normWARef.getSqoWA()->incrementSubQId();
// Main body of subquery unnesting. The PullUpGroupBy transformation and
// the MoveUpGroupBy transformation are applied here.
// The PullUpGroupByTransformation
GroupByAgg* newGrby = pullUpGroupByTransformation(normWARef);
if (newGrby == NULL)
return this;
// If inner table is accessed on a leading column of any access path
// then do not unnest this subquery. We perform this check after
// the pullUpGroupByTransformation() since we want to cover the
// the case of a Tree Query and we need to know if the newGrby requires
// a moveUpGroupBy transformation.
if(applyInnerKeyedAccessHeuristic((const GroupByAgg*)newGrby,normWARef))
return this;
MapValueIds* newMap = NULL ;
if (candidateForSubqueryLeftJoinConversion())
{
RelExpr* result = newGrby->nullPreservingTransformation(
(GroupByAgg*)child(1)->castToRelExpr(),
normWARef);
if (result == NULL)
{
normWARef.getSqoWA()->undoChanges(normWARef);
return this;
}
if (result->getOperatorType() == REL_MAP_VALUEIDS)
newMap = (MapValueIds*) result;
}
// Apply MoveUp GroupBy transformation. Relevant only for subqueries with
// two or more levels of nesting. If moveUpGroupBy is not needed
// movedUpGroupByTail will be set to newGrby.
RelExpr* gbChild = newGrby->child(0)->castToRelExpr();
Join * newJoin = NULL;
GroupByAgg * newJoinParent = newGrby;
if (gbChild->getOperatorType() == REL_GROUPBY)
{
newJoin = (Join*) gbChild->child(0)->castToRelExpr();
newJoinParent = (GroupByAgg*) gbChild;
}
else
newJoin = (Join*) gbChild;
GroupByAgg* movedUpGrbyTail =
newJoin->moveUpGroupByTransformation(newJoinParent, normWARef);
NABoolean hasNoErrors;
if (movedUpGrbyTail != NULL)
{
hasNoErrors = movedUpGrbyTail->subqueryUnnestFinalize(
newGrby->groupExpr(),
normWARef);
}
if ((movedUpGrbyTail == NULL) || (NOT hasNoErrors))
{
normWARef.getSqoWA()->undoChanges(normWARef);
child(1)->eliminateFilterChild();
return this ;
}
// this subquery level has been successfully unnested. Left linearize the
// join backbone. Comp_int_11 can be used to not left linearize as we
// go further up the tree. This is not advised as the the analyzer expects
// the tree to be left linear in many situations. This control is kept
// as it provides the possibility to see some interesting plans.
if ((ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_11) < 0) ||
(newJoin->child(1)->getGroupAttr()->getNumJoinedTables() <=
ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_11)))
{
newJoin = newJoin->leftLinearizeJoinTree(normWARef,
UNNESTING); // Unnesting
movedUpGrbyTail->child(0) = newJoin ;
}
//synthesize logical props for the new nodes.
if (newMap == NULL)
{
newGrby->synthLogProp(&normWARef);
return newGrby ;
}
else
{
newMap->synthLogProp(&normWARef);
return newMap ;
}
}
else
{
// this subquery was not unnested, but we could have other transformations
// that would render the tree no longer left linearized
if ((ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_11) < 0) ||
(child(1)->getGroupAttr()->getNumJoinedTables() <=
ActiveSchemaDB()->getDefaults().getAsLong(COMP_INT_11)))
{
return leftLinearizeJoinTree(normWARef, SEMI_JOIN_TO_INNER_JOIN); //
}
}
/*---------------------------------------------------------------------------------------*/
return this;
} // Join::semanticQueryOptimizeNode()
NABoolean Join::prepareMeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &commonPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
if (isTSJForWrite() ||
isTSJForUndo() ||
isTSJForMerge() ||
getIsForTrafLoadPrep())
return FALSE;
// The caller of this methods added "commonPredicatesToAdd" to
// predicates_ (the generic selection predicates stored in the
// RelExpr). That works for both inner and non-inner joins. The
// only thing we have left to do is to recompute the equi-join
// predicates.
findEquiJoinPredicates();
return TRUE;
}
// ***********************************************************************
// $$$$ Union
// member functions for class Union
// ***********************************************************************
// -----------------------------------------------------------------------
// Union::transformNode()
// -----------------------------------------------------------------------
void Union::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
//++Triggers -
if (getBlockedUnion())
normWARef.setInBlockedUnionCount();
// ---------------------------------------------------------------------
// Compartmentalize the VEGRegions between the left and the right
// child so that their VEGs get installed in different VEGRegions,
// where they rightfully belong. It prevents unenforcable "="
// relationships from being deduced transitively.
// The VEGRegion for each child of the union is only allowed to
// import outer references. It cannot "export" any "=" relationships.
// ---------------------------------------------------------------------
// Allocate a new VEGRegion within the scope of my own VEGRegion
// for my left child.
normWARef.allocateAndSetVEGRegion(IMPORT_ONLY,this,0);
// Make values available to left child
child(0)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
// Transform the left child.
child(0)->transformNode(normWARef, child(0));
// Return to my own VEGRegion.
normWARef.restoreOriginalVEGRegion();
// Allocate another new VEGRegion within the scope of my own VEGRegion
// for my right child.
normWARef.allocateAndSetVEGRegion(IMPORT_ONLY,this,1);
// Make values available to right child
child(1)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
// Transform the right child.
child(1)->transformNode(normWARef, child(1));
// Return to my own VEGRegion.
normWARef.restoreOriginalVEGRegion();
// No need to transform colMapExprList because the source and the target
// expressions will be transformed by their own operators.
// Pull up the predicates and recompute the required inputs
// of whoever my children are now.
pullUpPreds();
// transform the selection predicates
transformSelectPred(normWARef, locationOfPointerToMe);
// this Union will be removed during optimization. So send the outputs
// of its left child to the parent. That is what will happen when this
// node is later removed.
if ((getIsTemporary()) &&
(getGroupAttr()->getCharacteristicOutputs().entries() == 0))
{
GroupAttributes * childGAPtr = child(0).getPtr()->getGroupAttr();
getGroupAttr()->setCharacteristicOutputs(childGAPtr->getCharacteristicOutputs());
}
//++Triggers -
if (getBlockedUnion())
normWARef.restoreInBlockedUnionCount();
} // Union::transformNode()
// -----------------------------------------------------------------------
// Union::pullUpPreds()
// -----------------------------------------------------------------------
void Union::pullUpPreds()
{
// For a predicate to be pulled up from the children it has to
// be part of both children. The only predicates we can detect
// as being part of both child are those that only use correlated
// references. and are identical (i.e. use the same value id's
// for expressions under the tree).
//
// Other predicates will require a more sophisticated pattern matching.
//
ValueIdSet commonPredicates(child(0)->getSelectionPred());
commonPredicates.intersectSet(child(1)->getSelectionPred());
selectionPred() += commonPredicates;
child(0)->selectionPred() -= commonPredicates;
child(1)->selectionPred() -= commonPredicates;
child(0)->recomputeOuterReferences();
child(1)->recomputeOuterReferences();
} // Union::pullUpPreds()
// -----------------------------------------------------------------------
// Union::recomputeOuterReferences()
// -----------------------------------------------------------------------
void Union::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
if (NOT getGroupAttr()->getCharacteristicInputs().isEmpty())
{
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// Weed out those expressions not needed by my selection predicates
// and by my left and right children as input values.
ValueIdSet exprSet = getSelectionPred();
exprSet += child(0)->getGroupAttr()->getCharacteristicInputs();
exprSet += child(1)->getGroupAttr()->getCharacteristicInputs();
// Add conditional expression for conditional union.
exprSet.insertList(condExpr());
exprSet.insertList(alternateRightChildOrderExpr()); //++MV
// Add the output expressions of each child that are cached in the
// UnionMap. If a child references an external input value in its
// output, i.e., the select list of the SELECT, but that external
// input is not referenced elsewhere in the query, then no record
// of such a reference exists but in the UnionMap. This is so
// because the RelRoot for the subselect under the Union, which
// contained the select list is eliminated by transformNode().
// When the RelRoot is eliminated, its characteristic inputs
// are added to its children. However, Union::pullUpPreds()
// calls recomputeOuterReferences on each child. The latter call
// wipes out all such external input values that are not referenced
// elsewhere in the query. In order to ensure that such external
// input values flow down to the Union, add them to exprSet.
exprSet.insertList(getLeftMap().getBottomValues());
exprSet.insertList(getRightMap().getBottomValues());
exprSet.weedOutUnreferenced(outerRefs);
getGroupAttr()->setCharacteristicInputs(outerRefs);
}
} // Union::recomputeOuterReferences()
// -----------------------------------------------------------------------
// UnionMap::normalizeSpecificChild()
// -----------------------------------------------------------------------
void UnionMap::normalizeSpecificChild(NormWA & normWARef, Lng32 childIndex)
{
// Normalize the maps constructed for the union, replacing
// valueIds with VegRef's where appropriate.
ValueIdUnion * viduPtr;
for (CollIndex index = 0; index < colMapTable_.entries(); index++)
{
viduPtr = ((ValueIdUnion *)(colMapTable_[index].getItemExpr()));
CMPASSERT(viduPtr->getOperatorType() == ITM_VALUEIDUNION);
viduPtr->normalizeSpecificChild(normWARef, childIndex);
}
switch (childIndex)
{
case 0:
leftColMap_.normalizeNode(normWARef);
break;
case 1:
rightColMap_.normalizeNode(normWARef);
break;
default:
CMPASSERT(childIndex < 2);
break;
}
} // UnionMap::normalizeSpecificChild()
// -----------------------------------------------------------------------
// Union::rewriteNode()
// -----------------------------------------------------------------------
void Union::rewriteNode(NormWA & normWARef)
{
// Locate the VEGRegion that I had allocated for my left child.
normWARef.locateAndSetVEGRegion(this,0);
// Normalize expressions contributed by child(0)
child(0)->rewriteNode(normWARef);
// Normalize expressions contributed by child(0)
getUnionMap()->normalizeSpecificChild(normWARef, 0);
normWARef.restoreOriginalVEGRegion();
// Locate the VEGRegion that I had allocated for my right child.
normWARef.locateAndSetVEGRegion(this,1);
// Normalize expressions contributed by child(1)
child(1)->rewriteNode(normWARef);
// Normalize expressions contributed by child(1)
getUnionMap()->normalizeSpecificChild(normWARef, 1);
// ++MV
// The alternate right child order expression should be normalized in
// the region of the right child
if (alternateRightChildOrderExpr().normalizeNode(normWARef))
{
}
// --MV
normWARef.restoreOriginalVEGRegion();
// Normalize the predicates.
if (selectionPred().normalizeNode(normWARef))
{
}
if (condExpr().normalizeNode(normWARef))
{
}
// for embedded statements, when a blocked union is introduced by triggers
// that will later be removed use the veg region of the left child before
// normalizing outputs.
if (getIsTemporary())
normWARef.locateAndSetVEGRegion(this,0);
// Rewrite my own Group Attributes
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // Union::rewriteNode()
// -----------------------------------------------------------------------
// Union::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * Union::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
// ---------------------------------------------------------------------
// Check which expressions can be evaluated by the children of the union.
// Modify the Group Attributes of those children who inherit some of
// these expressions.
// ---------------------------------------------------------------------
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred()
);
// ---------------------------------------------------------------------
// Normalize the left and right subtrees. Store pointers to the
// roots of the subtrees after normalization.
// ---------------------------------------------------------------------
// Locate the VEGRegion that I had allocated for my left child.
normWARef.locateAndSetVEGRegion(this, 0);
child(0) = child(0)->normalizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
// Locate the VEGRegion that I had allocated for my left child.
normWARef.locateAndSetVEGRegion(this, 1);
child(1) = child(1)->normalizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
fixEssentialCharacteristicOutputs();
normWARef.setExtraHubVertex(this);
return this; // return a -> to self
} // Union::normalizeNode()
// -----------------------------------------------------------------------
// Union::semanticQueryOptimizeNode()
// This instance of the SQO virtual method is the same as the base class
// implementation except that it also keeps track of which
// VEGRegion we are currently in.
// -----------------------------------------------------------------------
RelExpr * Union::semanticQueryOptimizeNode(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// SemanticQueryOptimize the left and right subtrees. Store pointers to
// the roots of the subtrees after SQO.
// ---------------------------------------------------------------------
// Locate the VEGRegion that I had allocated for my left child.
normWARef.locateAndSetVEGRegion(this, 0);
child(0) = child(0)->semanticQueryOptimizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
// Locate the VEGRegion that I had allocated for my left child.
normWARef.locateAndSetVEGRegion(this, 1);
child(1) = child(1)->semanticQueryOptimizeNode(normWARef);
normWARef.restoreOriginalVEGRegion();
return this;
} // Union::semanticQueryOptimizeNode()
NABoolean Union::prepareTreeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &commonPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
NABoolean result = TRUE;
// we only support UNION nodes without local predicates, which
// should be all cases, since there should not be any predicates on
// a UNION
if (getSelectionPred().entries() > 0)
{
info->getConsumer(0)->emitCSEDiagnostics(
"Selection predicates on union node not supported");
return FALSE;
}
// recursively call this for the children
for (CollIndex i=0; i<2 && result; i++)
{
ValueIdSet locOutputsToAdd(outputsToAdd);
ValueIdSet childOutputsToAdd;
ValueIdSet childPredsToRemove;
ValueIdSet childPredsToAdd;
ValueIdMap *map = (i==0 ? &getLeftMap() : &getRightMap());
ValueIdSet availableValues(map->getTopValues());
ValueIdSet dummyValuesForVEGRewrite;
ValueIdSet mappedKeyColumns;
ValueIdSet childKeyColumns;
// if there are outputs to add, we can only do that for
// outputs that already exist in the ValueIdMap
availableValues += getGroupAttr()->getCharacteristicInputs();
if (locOutputsToAdd.removeUnCoveredExprs(availableValues))
{
info->getConsumer(0)->emitCSEDiagnostics(
"Not able to add output values unknown to union operator");
result = FALSE;
}
map->rewriteValueIdSetDown(outputsToAdd, childOutputsToAdd);
map->rewriteValueIdSetDown(predicatesToRemove, childPredsToRemove);
map->rewriteValueIdSetDown(commonPredicatesToAdd, childPredsToAdd);
result = child(i)->prepareTreeForCSESharing(
childOutputsToAdd,
childPredsToRemove,
childPredsToAdd,
inputsToRemove,
dummyValuesForVEGRewrite,
childKeyColumns,
info);
map->mapValueIdSetUp(mappedKeyColumns, childKeyColumns);
// include only those that actually got mapped
mappedKeyColumns -= childKeyColumns;
keyColumns += mappedKeyColumns;
}
if (result)
{
NABoolean dummy;
CollIndex nu = unionMap_->leftColMap_.getBottomValues().entries();
getGroupAttr()->addCharacteristicOutputs(outputsToAdd);
getGroupAttr()->removeCharacteristicInputs(inputsToRemove);
// add columns that are a constant in at least one of the
// UNION's children to the key columns. Such columns can be used
// to eliminate entire legs of the union and therefore act like
// key or partition key columns.
for (CollIndex u=0; u<nu; u++)
{
if (unionMap_->leftColMap_.getBottomValues()[u].getItemExpr()->
castToConstValue(dummy) ||
unionMap_->rightColMap_.getBottomValues()[u].getItemExpr()->
castToConstValue(dummy))
keyColumns += unionMap_->colMapTable_[u];
}
}
// there is no need to call prepareMeForCSESharing() here
return result;
}
// ***********************************************************************
// $$$$ GroupByAgg
// member functions for class GroupByAgg
// ***********************************************************************
void GroupByAgg::transformNode(NormWA & normWARef,
ExprGroupId &locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
NABoolean needsNewVEGRegion = FALSE;
// ---------------------------------------------------------------------
// Each scalar aggregate allocates a VEGRegion for "=" predicates that
// do not reject null values.
// It can only import a VEG from another VEGRegion in which an "outer
// reference" is involved.
// select empname
// from employee
// where not exists
// (select branchnum
// from branch
// where ( employee.branchnum = branch.branchnum)
// and (branchnum = 1)
// group by branchnum
// );
// It is legal to deduce that employee.branchnum = 1 within the subquery
// but not in the main query.
// ---------------------------------------------------------------------
if (groupExpr().isEmpty())
{
// -----------------------------------------------------------------
// Fix to "BR0198" (Genesis 10-000303-8476).
// If there's no grouping expression and no aggregation expression,
// then aggregate over a constant, i.e.,
// make one single group (zero or one "row") of the entire table.
// See Ansi 7.8 SR 1 + GR 1 (HAVING clause).
// See /regress/fullstack/test002 cases.
// By adding a constant to the grouping expression we are treating
// this as a nonScalar grby.
// -----------------------------------------------------------------
if (aggregateExpr().isEmpty())
{
ItemExpr *tf = new(normWARef.wHeap()) ConstValue(0);
tf->synthTypeAndValueId(TRUE);
groupExpr() += tf->getValueId();
}
else if (NOT containsNullRejectingPredicates())
{
needsNewVEGRegion = TRUE;
normWARef.allocateAndSetVEGRegion(IMPORT_ONLY,this);
}
}
// ---------------------------------------------------------------------
// Transform child. Pull up its transformed predicates
// recompute their required inputs.
// ---------------------------------------------------------------------
child(0)->transformNode(normWARef, child(0));
// My child has now been transformed.
// A new semiJoin may now be my direct descendant and my original
// child a descendant of it.
// In either case my child has now been transformed.
// ---------------------------------------------------------------------
// A Group By clause can only contain column references.
// An aggregate function cannot contain a subquery according to SQL2.
// However, the group by list and aggregate functions could be columns
// from a derived table and may therefor contain subselects an all
// sorts of nasty things. So we allow anything here.
//
// Subqueries in the group by list and aggregate functions should
// introduce semijoins below the groupby and subqueries in the
// having clause above the groupby
//
// Order of work should be
// process group by
// process aggregate expressions
// pull up predicates
// process having clause
// ---------------------------------------------------------------------
if (groupExpr().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */ ) )
{
// The group by list apparently had some subqueries that had not been
// processed before (scracth, scratch..). Normalize the new
// tree that has become our child.
child(0)->transformNode(normWARef, child(0));
}
if (aggregateExpr().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */ ) )
{
// The aggregate was on a subquery that had not been
// processed before (scracth, scratch..). Normalize the new
// tree that has become our child.
child(0)->transformNode(normWARef, child(0));
}
// Pull up the predicates into my having clause and recompute the
// required inputs of whoever my children are now.
pullUpPreds();
if (needsNewVEGRegion)
{
// Restore the original VEGRegion.
normWARef.restoreOriginalVEGRegion();
}
// transform the selection predicates
normWARef.setInHavingClause(TRUE) ;
transformSelectPred(normWARef, locationOfPointerToMe);
normWARef.setInHavingClause(FALSE) ;
} // GroupByAgg::transformNode()
// -----------------------------------------------------------------------
// GroupByAgg::pullUpPreds()
// -----------------------------------------------------------------------
void GroupByAgg::pullUpPreds()
{
// ---------------------------------------------------------------------
// Pull up predicates from the child.
// move them to my having clause
// ---------------------------------------------------------------------
// Make inputs available to child
child(0)->getGroupAttr()->addCharacteristicInputs(getGroupAttr()->getCharacteristicInputs());
// Parts of the rules for this virtual method is that recomputOuterRefs()
// should be called on the child even if no predicates are pulled up
// from it.
child(0)->recomputeOuterReferences();
// If this is a scalar groupby that can produce NULL values then predicates
// cannot be moved up.
if (groupExpr().isEmpty() && NOT containsNullRejectingPredicates())
return;
if (child(0)->getSelectionPred().isEmpty())
return;
// Only predicates that reference group by columns or
// other input values can be pulled up.
// We are going to prime group attributes ahead of time here so that
// we can call coverTest() from here.
ValueIdSet saveExternalInputs = getGroupAttr()->getCharacteristicInputs();
primeGroupAttributes();
ValueIdSet predicatesToPullUp, boringSet, predicatesThatStay;
getGroupAttr()->coverTest(child(0)->selectionPred(),
saveExternalInputs, // Like passing empty
predicatesToPullUp,
boringSet,
&predicatesThatStay);
if (NOT predicatesToPullUp.isEmpty())
{
selectionPred() += predicatesToPullUp;
child(0)->selectionPred() -= predicatesToPullUp;
child(0)->recomputeOuterReferences();
}
getGroupAttr()->setCharacteristicInputs(saveExternalInputs);
} // GroupByAgg::pullUpPreds()
// -----------------------------------------------------------------------
// GroupByAgg::recomputeOuterReferences()
// -----------------------------------------------------------------------
void GroupByAgg::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
ValueIdSet allMyExpr(getSelectionPred());
allMyExpr += groupExpr();
allMyExpr += aggregateExpr();
allMyExpr.weedOutUnreferenced(outerRefs);
outerRefs += child(0).getPtr()->getGroupAttr()->getCharacteristicInputs();
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // GroupByAgg::recomputeOuterReferences()
// -----------------------------------------------------------------------
// GroupbyAgg::rewriteNode()
// -----------------------------------------------------------------------
void GroupByAgg::rewriteNode(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// Each scalar aggregate allocates a VEGRegion for "=" predicates that
// do not reject null values.
// It can only import a VEG from another VEGRegion in which an "outer
// reference" is involved.
// select empname
// from employee
// where not exists
// (select branchnum
// from branch
// where ( employee.branchnum = branch.branchnum)
// and (branchnum = 1)
// group by branchnum
// );
// It is legal to deduce that employee.branchnum = 1 within the subquery
// but not in the main query.
// ---------------------------------------------------------------------
NABoolean needsNewVEGRegion = FALSE;
if (groupExpr().isEmpty() && (NOT containsNullRejectingPredicates()))
{
needsNewVEGRegion = TRUE;
normWARef.locateAndSetVEGRegion(this);
}
// ---------------------------------------------------------------------
// Rewrite the expressions of the child.
// ---------------------------------------------------------------------
child(0)->rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Rewrite the expressions that are grouping expressions
// ---------------------------------------------------------------------
if (groupExpr().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite the expressions that are rollup grouping expressions
// ---------------------------------------------------------------------
if (rollupGroupExprList().normalizeNode(normWARef))
{
}
normalizeExtraOrderExpr(normWARef);
// ---------------------------------------------------------------------
// Rewrite the expressions that are aggregate expressions
// ---------------------------------------------------------------------
if (aggregateExpr().normalizeNode(normWARef))
{
}
// 10-050616-8826 -BEGIN
// If transformation has not happened then its a possiblity that
// the "TYPE" of the ItemExpr can change. For Example case when
// we transform outer joins to inner joins.
if(NOT aggregateExpr().isEmpty())
{
ValueIdSet postExpr = aggregateExpr();
for(ValueId exprId = postExpr.init(); postExpr.next(exprId); postExpr.advance(exprId))
{
const NAType &type1 = exprId.getType();
const NAType &type2 = exprId.getItemExpr()->child(0).getValueId().getType();
if( NOT(type1 == type2) )
{
exprId.getItemExpr()->synthTypeAndValueId(TRUE);
}
}
}
// 10-050616-8826 -END
// ---------------------------------------------------------------------
// If we're enforcing an ITM_ONE_ROW on (x,y), then we can produce not
// merely the ITM_ONE_ROW, but also x and y, so add them to our outputs.
// For example, if the aggregate is, say,
// ITM_ONE_ROW(VEGRef_10(T.A,ixT.A), VEGRef_15(T.B,ixT.B))
// { example query: select * from S where (select A,B from T) < (100,200) }
// then add value ids 10 and 11 to our characteristic outputs.
// ---------------------------------------------------------------------
ValueIdSet moreOutputs;
getPotentialOutputValues(moreOutputs);
getGroupAttr()->addCharacteristicOutputs(moreOutputs);
// ---------------------------------------------------------------------
// Restore the VEGRegion of my parent.
// ---------------------------------------------------------------------
if (needsNewVEGRegion)
normWARef.restoreOriginalVEGRegion();
// ---------------------------------------------------------------------
// Rewrite the expressions in the HAVING clause predicate.
// ---------------------------------------------------------------------
if (selectionPred().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite my own Group Attributes
// ---------------------------------------------------------------------
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // GroupbyAgg::rewriteNode()
// -----------------------------------------------------------------------
// GroupbyAgg::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * GroupByAgg::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
// ---------------------------------------------------------------------
// Each scalar aggregate allocates a VEGRegion for "=" predicates that
// do not reject null values.
// It can only import a VEG from another VEGRegion in which an "outer
// reference" is involved.
// select empname
// from employee
// where not exists
// (select branchnum
// from branch
// where ( employee.branchnum = branch.branchnum)
// and (branchnum = 1)
// group by branchnum
// );
// It is legal to deduce that employee.branchnum = 1 within the subquery
// but not in the main query.
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// If one of my HAVING preds is a truth-test that always evaluates to TRUE,
// remove it; in particular, remove IS_NOT_UNKNOWN(IS_NOT_NULL(myAggrExpr))
// (doubtless created by dissectOutSubqueries in NormItemExpr.cpp)
// as redundant, the aggregation already being enforced by this GroupByAgg.
// ---------------------------------------------------------------------
DBGSETDBG( "TRANSFORM_DEBUG" )
DBGIF(
unp = "";
unp += "sel:";
selectionPred().unparse(unp);
unp += "\nagg:";
aggregateExpr().unparse(unp);
)
ItemExpr *bottomOfTest;
ValueIdSet &agg = aggregateExpr();
ValueIdSet &sel = selectionPred();
for (ValueId svid = sel.init(); sel.next(svid); sel.advance(svid))
{
bottomOfTest = UnLogicMayBeAnEliminableTruthTest(svid.getItemExpr(),TRUE);
if (bottomOfTest)
if (bottomOfTest->isAnAggregate())
for (ValueId avid = agg.init(); agg.next(avid); agg.advance(avid))
if (bottomOfTest == avid.getItemExpr())
{
DBGIF(
cerr << unp << endl;
cerr << "Eliminating aggr "<< svid << endl;
)
sel.subtractElement(svid); // svid, not avid!
}
else
{
DBGIF(
cerr << unp << endl;
cerr << "Eliminating having-pred " << svid << endl;
)
sel.subtractElement(svid);
}
}
// ---------------------------------------------------------------------
// Check which expressions can be evaluated by my child.
// Modify the Group Attributes of those children who inherit some of
// these expressions.
// Check if any of the HAVING clause predicates can be pushed down
// (only when a Group By list is given).
// ---------------------------------------------------------------------
// if this is a rollup groupby, then do not pushdown having pred to
// child node. If pushdown is done, then it might incorrectly process rows that
// are generated during rollup groupby processing.
// For ex:
// insert into t values (1);
// select a from t group by rollup(a) having a is not null;
// If 'having' pred is pushdown to scan node as a where pred,
// then SortGroupBy will return all rollup groups generated
// and represented as null. They will not be filtered out which
// they would if having pred is applied after rollup group materialization.
// Maybe later we can optimize so this pushdown is done if possible,
// for ex, if there are no 'is null/not null' having preds on grouping cols.
if (NOT isRollup())
{
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred()
);
}
NABoolean needsNewVEGRegion = FALSE;
if (groupExpr().isEmpty() && (NOT containsNullRejectingPredicates()))
{
needsNewVEGRegion = TRUE;
normWARef.locateAndSetVEGRegion(this);
}
// ---------------------------------------------------------------------
// Normalize the child.
// ---------------------------------------------------------------------
child(0) = child(0)->normalizeNode(normWARef);
if (needsNewVEGRegion)
normWARef.restoreOriginalVEGRegion();
fixEssentialCharacteristicOutputs();
if (CmpCommon::getDefault(CASCADED_GROUPBY_TRANSFORMATION) != DF_OFF)
{
checkForCascadedGroupBy(normWARef);
}
return this; // return a -> to self
} // GroupbyAgg::normalizeNode()
// -----------------------------------------------------------------------
// GroupByAgg::semanticQueryOptimizeNode()
// This instance of the SQO virtual method is the same as the base class
// implementation except that it also keeps track of which
// VEGRegion we are currently in.
// -----------------------------------------------------------------------
RelExpr * GroupByAgg::semanticQueryOptimizeNode(NormWA & normWARef)
{
NABoolean needsNewVEGRegion = FALSE;
if (groupExpr().isEmpty() && (NOT containsNullRejectingPredicates()))
{
needsNewVEGRegion = TRUE;
normWARef.locateAndSetVEGRegion(this);
}
// ---------------------------------------------------------------------
// UnNest the child.
// ---------------------------------------------------------------------
child(0) = child(0)->semanticQueryOptimizeNode(normWARef);
if (needsNewVEGRegion)
normWARef.restoreOriginalVEGRegion();
eliminateCascadedGroupBy(normWARef);
return this;
} // GroupByAgg::semanticQueryOptimizeNode()
// This method checks if we can merge multiple group by nodes that are next
// to each other into a single group by and then marks the group by node
// that can be eliminated so it could eliminated during the SQO phase.
// Following are the conditions under which a bottom GB node can be eliminated
// 1) If the grouping columns of the top group by node are a subset of the
// grouping columns of the bottom group by node.
// 2) If all the aggreate expressions of the top group by can be rewritten to
// use the bottom values in such a way it does not change the output.
// For now this method would handle the following aggregate expressions
// to be rolled up.
// SUM(SUM(a)) => SUM(a)
// SUM( COUNT(a)) => COUNT(a)
// SUM( COUNT(*)) => COUNT(*)
// MIN( MIN(a)) => MIN(a)
// MAX( MAX(a)) => MAX(a)
void GroupByAgg::checkForCascadedGroupBy(NormWA & normWARef)
{
if (child(0)->getOperatorType() == REL_GROUPBY)
{
GroupByAgg *childGB = (GroupByAgg*)(child(0)->castToRelExpr());
if ( childGB->groupExpr().contains(groupExpr()) &&
childGB->selectionPred().isEmpty() )
{
NABoolean allExprsCanBeRolledup = TRUE;
for (ValueId x = aggregateExpr().init();
aggregateExpr().next(x) &&
allExprsCanBeRolledup;
aggregateExpr().advance(x))
{
CMPASSERT(x.getItemExpr()->isAnAggregate());
Aggregate *aggrExpr = (Aggregate *) x.getItemExpr();
if (!aggrExpr->isDistinct() &&
aggrExpr->child(0)->isAnAggregate())
{
Aggregate *childAggrExpr = (Aggregate *) aggrExpr->child(0)->castToItemExpr();
if (!childAggrExpr->isDistinct())
{
switch (aggrExpr->getOperatorType())
{
case ITM_SUM:
if (aggrExpr->child(0)->getOperatorType() != ITM_SUM &&
aggrExpr->child(0)->getOperatorType() != ITM_COUNT)
allExprsCanBeRolledup = FALSE;
break;
case ITM_MIN:
if (aggrExpr->child(0)->getOperatorType() != ITM_MIN)
allExprsCanBeRolledup = FALSE;
break;
case ITM_MAX:
if (aggrExpr->child(0)->getOperatorType() != ITM_MAX)
allExprsCanBeRolledup = FALSE;
break;
case ITM_COUNT_NONULL:
if (!normWARef.compilingMVDescriptor())
{
allExprsCanBeRolledup = FALSE;
}
else
aggrExprsToBeDeleted() += x;
break;
default:
allExprsCanBeRolledup = FALSE;
break;
}
}
else
allExprsCanBeRolledup = FALSE;
}
else
{
if (normWARef.compilingMVDescriptor() &&
(aggrExpr->getOperatorType() == ITM_COUNT &&
aggrExpr->child(0)->getOperatorType() == ITM_CONSTANT))
aggrExprsToBeDeleted() += x;
else
allExprsCanBeRolledup = FALSE;
}
}
if (allExprsCanBeRolledup)
{
childGB->setIsMarkedForElimination(TRUE);
normWARef.setContainsGroupBysToBeEliminated(TRUE);
}
else
aggrExprsToBeDeleted().clear();
}
}
}
void GroupByAgg::eliminateCascadedGroupBy(NormWA & normWARef)
{
if (child(0)->getOperatorType() == REL_GROUPBY)
{
GroupByAgg *childGB = (GroupByAgg*)(child(0)->castToRelExpr());
short value = 1;
if (childGB->isMarkedForElimination())
{
for (ValueId y = aggrExprsToBeDeleted().init();
aggrExprsToBeDeleted().next(y);
aggrExprsToBeDeleted().advance(y))
{
ItemExpr *constValue = new (CmpCommon::statementHeap())
SystemLiteral(&(y.getType()), &value, sizeof(short));
y.replaceItemExpr(constValue);
constValue->synthTypeAndValueId();
}
aggregateExpr() -= aggrExprsToBeDeleted();
aggrExprsToBeDeleted().clear();
for (ValueId x = aggregateExpr().init();
aggregateExpr().next(x);
aggregateExpr().advance(x))
{
CMPASSERT(x.getItemExpr()->isAnAggregate());
Aggregate *aggrExpr = (Aggregate *) x.getItemExpr();
CMPASSERT(aggrExpr->child(0)->isAnAggregate())
if (aggrExpr->getOperatorType() == ITM_SUM &&
aggrExpr->child(0)->getOperatorType() == ITM_COUNT)
{
aggrExpr->setOperatorType(ITM_COUNT);
// Need to update the type as well
const NAType &origSumType = x.getType();
const NAType &origCountType = aggrExpr->child(0)->getValueId().getType();
// here we change the type of the old SUM(), now new COUNT() to that
// of the original count. This to prevent numeric overflow error.
// See solution: 10-100514-0329.
x.changeType(&origCountType);
// Ideally we should put in a cast node to cast the new count
// type back to the original sum type to maintain the properties
// of the original valueId, but since groupBys only outputs
// valueIds from the aggregateExpr or groupExpr we can't do this
// here. Cast is not an aggregate function, so it cannot go in the
// aggregate expression, and if we group by it we will change the
// the meaning of the groupby.
// so for now we assume we will be ok since Numeric(19) and largeInt
// are roughly eqivalent.
// ItemExpr * castNode =
// new(newNormWA.wHeap()) Cast((x.getItemExpr(),
// &(origSumType));
}
aggrExpr->child(0) = aggrExpr->child(0)->child(0);
}
child(0) = child(0)->child(0);
}
}
}
NABoolean GroupByAgg::prepareMeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &commonPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
// The caller of this method took care of most adjustments to
// make. The main thing the groupby node needs to do is to add any
// outputs that are required to its characteristic outputs.
ValueIdSet myAvailableValues(groupExpr_);
ValueIdSet referencedValues;
ValueIdSet myOutputsToAdd;
ValueIdSet unCoveredExpr;
myAvailableValues += aggregateExpr_;
valuesForVEGRewrite += aggregateExpr_;
// The caller may be asking for expressions on columns, maybe
// even an expression involving grouping columns and aggregates
// and multiple tables, therefore use the isCovered method to
// determine those subexpressions that we can produce here.
NABoolean allCovered =
outputsToAdd.isCovered(myAvailableValues,
*(getGroupAttr()),
referencedValues,
myOutputsToAdd,
unCoveredExpr);
if (allCovered)
myOutputsToAdd = outputsToAdd;
getGroupAttr()->addCharacteristicOutputs(myOutputsToAdd);
return TRUE;
}
// ***********************************************************************
// $$$$ Scan
// member functions for class Scan
// ***********************************************************************
// -----------------------------------------------------------------------
// Scan::transformNode()
// -----------------------------------------------------------------------
void Scan::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
// ---------------------------------------------------------------------
// Transform the entire column list of the base table to pick up
// equivalences of base table columns and index columns
// ---------------------------------------------------------------------
const ValueIdList &allCols = getTableDesc()->getColumnList();
ItemExpr *oldPtr;
ExprValueId newPtr;
for (CollIndex i = 0; i < allCols.entries(); i++)
{
oldPtr = allCols[i].getItemExpr();
newPtr = oldPtr;
oldPtr->transformNode(normWARef, newPtr, locationOfPointerToMe,
getGroupAttr()->getCharacteristicInputs());
// the column list shouldn't be changed by the transformation
CMPASSERT(oldPtr == newPtr.getPtr());
// ---------------------------------------------------------------------
// Create a VEG with all equivalent index columns and equivalent columns
// ---------------------------------------------------------------------
if (oldPtr->getOperatorType() == ITM_BASECOLUMN)
{
const ValueIdSet &eic = ((BaseColumn *)oldPtr)->getEIC();
for (ValueId eqVid = eic.init(); eic.next(eqVid); eic.advance(eqVid))
{
normWARef.addVEG(((BaseColumn *)oldPtr)->getValueId(),eqVid);
}
//check if this is an clustering key column
NABoolean isClusteringKeyColumn = FALSE;
ValueIdList ckColumns = getTableDesc()->getClusteringIndex()
->getIndexKey();
for (CollIndex j=0; j < ckColumns.entries(); j++)
{
if (allCols[i].getNAColumn()->getPosition() ==
ckColumns[j].getNAColumn()->getPosition())
{
isClusteringKeyColumn = TRUE;
break;
}
}
// If it is a nullable clustering key column and there are indexes
// then set the special nulls flag to TRUE so that during an index
// join the equality predicate between the clustering key
// of the base and the index does reutrn NULL equals NULL
// as TRUE and so finds the base table row in the index table.
if ( isClusteringKeyColumn &&
allCols[i].getType().supportsSQLnull() &&
eic.entries() > 0 )
{
ItemExpr * vegrefPtr = normWARef.getVEGReference(allCols[i]);
if (vegrefPtr)
((VEGReference *)vegrefPtr)->getVEG()->setSpecialNulls(TRUE);
}
}
else
CMPASSERT(oldPtr->getOperatorType() == ITM_BASECOLUMN);
}
// transform the selection predicates
transformSelectPred(normWARef, locationOfPointerToMe);
} // Scan::transformNode()
// -----------------------------------------------------------------------
// Scan::rewriteNode()
// -----------------------------------------------------------------------
void Scan::rewriteNode(NormWA & normWARef)
{
const ValueIdList &allCols = getTableDesc()->getColumnList();
ItemExpr *newPtr = NULL;
// ---------------------------------------------------------------------
// walk through all the columns of the table, normalizing them
// and adding the result into the ColumnVEGList of the table descriptor
// ---------------------------------------------------------------------
CollIndex i = 0;
for (i = 0; i < allCols.entries(); i++)
{
// ---------------------------------------------------------------------
// Create a VEG with all equivalent index columns
// ---------------------------------------------------------------------
newPtr = allCols[i].getItemExpr()->normalizeNode(normWARef);
getTableDesc()->addToColumnVEGList(newPtr->getValueId());
}
// -------------------------------------------------------------------------
// Normalize the indexes.
// -------------------------------------------------------------------------
for (i = 0;
i < (Int32)getTableDesc()->getIndexes().entries();
i++)
{
IndexDesc *idesc = getTableDesc()->getIndexes()[i];
ValueIdList indexOrder(idesc->getOrderOfKeyValues());
// ---------------------------------------------------------------------
// Normalize the asc/desc order of the index.
// ---------------------------------------------------------------------
indexOrder.normalizeNode(normWARef);
idesc->setOrderOfKeyValues(indexOrder);
// ---------------------------------------------------------------------
// Normalize the partitioning keys in the partitioning function.
// ---------------------------------------------------------------------
if (idesc->isPartitioned())
idesc->getPartitioningFunction()->normalizePartitioningKeys(normWARef);
}
// -------------------------------------------------------------------------
// Normalize the Vertical Partitions.
// -------------------------------------------------------------------------
for (i = 0;
i < (Int32)getTableDesc()->getVerticalPartitions().entries();
i++)
{
IndexDesc *idesc = getTableDesc()->getVerticalPartitions()[i];
ValueIdList indexOrder(idesc->getOrderOfKeyValues());
// ---------------------------------------------------------------------
// Normalize the asc/desc order of the index.
// ---------------------------------------------------------------------
indexOrder.normalizeNode(normWARef);
idesc->setOrderOfKeyValues(indexOrder);
// ---------------------------------------------------------------------
// Normalize the partitioning keys in the partitioning function.
// ---------------------------------------------------------------------
// Vertically partitioned tables always have a partitioning
// function, even if there is only one horizontal partition.
//
idesc->getPartitioningFunction()->normalizePartitioningKeys(normWARef);
}
// QSTUFF
// we need to normalize the potential outputs here to avoid problems
// during code generation
potentialOutputs_.normalizeNode(normWARef);
// QSTUFF
// ---------------------------------------------------------------------
// Rewrite the expressions in the selection predicates and
// in the Group Attributes.
// ---------------------------------------------------------------------
RelExpr::rewriteNode(normWARef);
} // Scan::rewriteNode()
// -----------------------------------------------------------------------
// Scan::recomputeOuterReferences()
// -----------------------------------------------------------------------
// void Scan::recomputeOuterReferences()
//
// No virtual method needed
//
// Scan::recomputeOuterReferences()
// -----------------------------------------------------------------------
// Scan::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * Scan::normalizeNode
( NormWA & normWARef )
{
if (nodeIsNormalized())
return this;
RelExpr::normalizeNode(normWARef);
if(CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) != DF_OFF &&
!normWARef.inMVQueryRewrite())
{
ValueIdSet vs,vs1;
ValueId exprId;
ItemExpr *inputItemExprTree = NULL;
ValueIdList selectionPredList(selectionPred());
inputItemExprTree = selectionPredList.rebuildExprTree(ITM_AND,FALSE,FALSE);
CollHeap *heap = normWARef.wHeap();
QRDescGenerator* descGenerator = new (heap) QRDescGenerator(false, heap);
if (CmpCommon::getDefault(MVQR_LOG_QUERY_DESCRIPTORS) == DF_DUMP_MV)
// Used for generating MV descriptors for queries in workload analysis mode.
descGenerator->setDumpMvMode();
// Desc generator needs equality sets or mvqr won't set range bitmap
// correctly for equijoin operands with additional range predicates.
descGenerator->createEqualitySets(selectionPred());
ItemExpr *result = NULL;
ItemExpr *ie = NULL ;
if( inputItemExprTree != NULL )
{
NABoolean transStatus = FALSE;
result = applyAssociativityAndCommutativity(descGenerator,heap,
inputItemExprTree, normWARef,
transStatus);
if(transStatus)
{
// result->synthTypeAndValueId(); // You can not remove it, it causes regression (however Bob told in Code review) for case core/test029
// delete from T29xv2 where j like 'f%'; -- ok ->ValueId of AND node is not available.
result->convertToValueIdSet(vs, NULL, ITM_AND);
if(CmpCommon::getDefault(RANGESPEC_TRANSFORMATION) == DF_MINIMUM )
{
for (exprId = vs.init(); vs.next(exprId); vs.advance(exprId))
{
ie = exprId.getItemExpr()->removeRangeSpecItems(&normWARef);
if (ie->getOperatorType() == ITM_AND)
{
OperatorTypeEnum op = ie->child(0)->getOperatorType();
if ( (op == ITM_GREATER_EQ) ||(op == ITM_GREATER) ||
(op == ITM_LESS) ||(op == ITM_LESS_EQ))
{
if(!((BiRelat*)ie->child(0).getPtr())->derivativeOfLike())
{
vs1.insert(ie->child(0)->getValueId());
vs1.insert(ie->child(1)->getValueId());
continue ;
}
}
}
vs1.insert(ie->getValueId());
}
vs.clear();
vs += vs1 ;
}
//doNotReplaceAnItemExpressionForLikePredicates(result,vs,result);
vs.normalizeNode(normWARef);
setSelectionPredicates(vs);
// For testing purpose:
// ValueIdList selectionPredList1(vs);
// ItemExpr * inputItemExprTree0 = selectionPredList1.rebuildExprTree(ITM_AND,FALSE,FALSE);
// oldTree = revertBackToOldTree(heap,inputItemExprTree0);
// oldTree->convertToValueIdSet(leafs, NULL, ITM_AND);
// doNotReplaceAnItemExpression(oldTree,leafs,oldTree);
}
}
}
// the following block of code can transform an OR predicate into
// semijoin(Scan, TupleList)
// where the Scan is this scan node.
// The transformation is in general guarded by tight heuristics
// so that OR preds can be evaluated using a hash table (code in generator)
// selection preds of a scan node can be affected by this code block.
ValueIdSet & preds = selectionPred();
ValueId exprId;
ItemExprList valuesListIE(normWARef.wHeap());
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context());
ExprGroupId newJoin = this;
ItemExpr *retItemExpr = NULL;
ValueId colVid;
Lng32 numParams;
if (normWARef.getMergeUpdDelCount() == 0)
for (exprId = preds.init(); preds.next(exprId); preds.advance(exprId))
{
if (exprId.getItemExpr()->canTransformToSemiJoin(valuesListIE,
getTableDesc(), numParams, colVid, normWARef.wHeap()))
{
// it is an OR pred. that meets the basic correctness conditions
if (!passSemiJoinHeuristicCheck(exprId, valuesListIE.entries(), numParams, colVid))
{
continue; // did not satisfy heuristics
}
TupleList * tl = new(normWARef.wHeap())
TupleList(valuesListIE.convertToItemExpr(RIGHT_LINEAR_TREE));
tl->setCreatedForInList(TRUE);
RelRoot * rr = new (normWARef.wHeap()) RelRoot(tl);
retItemExpr = new (normWARef.wHeap())
QuantifiedComp(ITM_EQUAL_ANY, colVid.getItemExpr(), rr, FALSE);
((QuantifiedComp*)retItemExpr)->setCreatedFromINlist(TRUE);
retItemExpr->bindNode(&bindWA);
if(bindWA.errStatus())
{
CmpCommon::diags()->clear();
bindWA.resetErrStatus();
continue ;
}
ExprValueId nePtr(retItemExpr);
retItemExpr->transformNode(normWARef, nePtr,
newJoin, getGroupAttr()->getCharacteristicInputs());
if(!(newJoin->getOperator().match(REL_SEMITSJ)))
continue ;
// is an OR pred that passed the heuristics check
preds.remove(exprId);
}
}
// we have changed the tree and introduced at least one semijoin.
if ((RelExpr *)newJoin != this)
{
((RelExpr *)newJoin)->getGroupAttr()->setCharacteristicOutputs
(getGroupAttr()->getCharacteristicOutputs());
((RelExpr *)newJoin)->getGroupAttr()->setCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
primeGroupAttributes();
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
ExprGroupId eg(newJoin);
newJoin->transformNode(normWARef,eg);
newJoin = newJoin->normalizeNode(normWARef);
}
TableDesc * tableDesc = getTableDesc();
// Make sure we rewrite the computedColumn Expressions
const ValueIdList &allCols = tableDesc->getColumnList();
ItemExpr *iePtr;
CollIndex i = 0;
for (i = 0; i < allCols.entries(); i++)
{
iePtr = allCols[i].getItemExpr();
if (((BaseColumn *) iePtr)->getNAColumn()->isComputedColumn())
{
BaseColumn *bc = ((BaseColumn *) iePtr);
ItemExpr *ccExpr = bc->getComputedColumnExpr().getItemExpr();
ccExpr = ccExpr->normalizeNode(normWARef);
bc->setComputedColumnExpr(ccExpr->getValueId());
}
}
SelectivityHint * selHint = tableDesc->selectivityHint();
if (selHint)
{
selHint->setLocalPreds(getSelectionPredicates());
}
CardinalityHint * cardHint = tableDesc->cardinalityHint();
if (cardHint)
{
cardHint->setLocalPreds(getSelectionPredicates());
}
return ((RelExpr *)newJoin);
} // Scan::normalizeNode()
NABoolean Scan::prepareMeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &commonPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
// The caller of this method took care of most adjustments to
// make. The main thing the scan node needs to do is to add any
// outputs that are required to its characteristic outputs.
ValueIdSet myColSet(getTableDesc()->getColumnVEGList());
ValueIdSet referencedCols;
ValueIdSet myOutputsToAdd;
ValueIdSet unCoveredExpr;
// The caller may be asking for expressions on columns, maybe
// even an expression involving multiple tables, therefore use
// the isCovered method to determine those subexpressions that we
// can produce here.
outputsToAdd.isCovered(myColSet,
*(getGroupAttr()),
referencedCols,
myOutputsToAdd,
unCoveredExpr);
getGroupAttr()->addCharacteristicOutputs(myOutputsToAdd);
valuesForVEGRewrite.insertList(getTableDesc()->getColumnList());
keyColumns.insertList(getTableDesc()->getClusteringIndex()->getIndexKey());
return TRUE;
}
/* This method applies a long list of heuristics to determine whether
its better to use a semijoin to evaluate the OR pred or if we should
wait till the generator and use the hash table implementation
OR_PRED_TO_SEMIJOIN = 0 ==> semijoin trans is turned OFF
OR_PRED_TO_SEMIJOIN = <val1>==> semijoin trans kicks in if
a. hash table transformation does not apply for some reason and
b. number of literals in OR pred > <val1>
default is 25.
OR_PRED_TO_JUMPTABLE = 0 ==> hash table trans is turned OFF in generator
OR_PRED_TO_JUMPTABLE = <val2> ==> hash table implemenation shuts OFF for in lists
larger than this size. default value is 5,000
OR_PRED_TO_SEMIJOIN_TABLE_MIN_SIZE : The key column heuristic applies only if table
has more rows than this setting. Default is 10000. The key column heuristic says that
semi join transformation can give a good plan only if number of rows read by probes coming
in less than small fraction of a big table.
OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO : Relevant only to the key column heuristic.
This default specifies the ratio specified in the previous comment.
The default value is 0.10. Currently join preds on key columns and multiple IN
lists on key columns are not handled well by the key col heuristic.
The other heuristic checked here relates to the partioning key. If the in list size
is less than half the number of partitions and the partitioning key is covered by
equality preds then we figue that it is better to do the semijoin transformation and
open only a few partitions. Opening a few partitions and sending on avg. one probe to each
one (total number of probes is guaranteed to be less than half the number of partitions)
is better than opening all the partitions and scanning the entire table once.
The first argument vid is the giant OR predicate that we already know meets all
logical criteria for transformation to semijoin.
*/
NABoolean Scan::passSemiJoinHeuristicCheck(ValueId vid, Lng32 numValues,
Lng32 numParams, ValueId colVid) const
{
Lng32 orPredToSemiJoin =
ActiveSchemaDB()->getDefaults().getAsLong(OR_PRED_TO_SEMIJOIN);
Lng32 orPredToJumpTable =
ActiveSchemaDB()->getDefaults().getAsLong(OR_PRED_TO_JUMPTABLE);
Lng32 orPredToSemiJoinTableMinSize =
ActiveSchemaDB()->getDefaults().getAsLong(OR_PRED_TO_SEMIJOIN_TABLE_MIN_SIZE);
float orPredToSemiJoinMaxRatio ;
ActiveSchemaDB()->getDefaults().getFloat(OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO,
orPredToSemiJoinMaxRatio);
if (orPredToSemiJoin == 0) // feature is turned OFF
return FALSE;
// if pcode is not available then the hash table implentation does not
// apply. Be more aggressive with semijoin trans.
DefaultToken pcodeOptLevel = CmpCommon::getDefault(PCODE_OPT_LEVEL);
NABoolean unSupportedType = FALSE;
NABoolean noPCodeSupport = FALSE;
UInt32 optFlags = (UInt32)CmpCommon::getDefaultLong(PCODE_OPT_FLAGS);
if (((optFlags & PCodeCfg::INDIRECT_BRANCH) == 0) ||
(pcodeOptLevel == DF_OFF) || (pcodeOptLevel == DF_MINIMUM))
{
noPCodeSupport = TRUE;
}
if (colVid.getType().getTypeQualifier() == NA_NUMERIC_TYPE)
{
const NumericType &ntype = (NumericType &)colVid.getType() ;
if (ntype.isBigNum() || ntype.isDecimal() || (ntype.getScale() > 0))
unSupportedType = TRUE;
}
if (numValues > orPredToSemiJoin) // num of in list values still has to be
{ // greater than OR_PRED_TO_SEMIJOIN
if ( noPCodeSupport ||
(orPredToJumpTable == 0) || (orPredToJumpTable < numValues)|| // hash table imp. is OFF or In list VERY large
(numParams > orPredToSemiJoin) || // params not supported hash table imp.
unSupportedType )
return TRUE;
}
NABoolean isBigTable = FALSE;
CostScalar totalRowCount = getTableDesc()->getTableColStats()[0]->getColStats()->getRowcount();
if (totalRowCount > orPredToSemiJoinTableMinSize)
isBigTable = TRUE;
// We do cycle through all indexes of the base table though
// there is no guarantee that the index we base our decision upon here
// will be chosen by the optimizer.
const LIST(IndexDesc *) & ixlist = getTableDesc()->getIndexes();
for (CollIndex ix =0; ix < ixlist.entries(); ix++)
{
IndexDesc* idesc = ixlist[ix];
ValueIdList keyCols, partKeyCols;
getTableDesc()->getEquivVEGCols(idesc->getIndexKey(), keyCols);
getTableDesc()->getEquivVEGCols(idesc->getPartitioningKey(), partKeyCols);
CollIndex keyColIndex = keyCols.index(colVid);
CollIndex partKeyColIndex = partKeyCols.index(colVid);
if (partKeyColIndex != NULL_COLL_INDEX) // 'a' is a partitioning key column
{
NABoolean applyPartKeyHeuristic = FALSE;
if ((numValues < 0.5*(idesc->getNAFileSet()->getCountOfPartitions())) &&
isBigTable && (numValues > orPredToSemiJoin))
{
// number of clauses in IN List is less than half the number of partitions
applyPartKeyHeuristic = TRUE;
}
for (CollIndex i =0;
(applyPartKeyHeuristic && (i < partKeyCols.entries())); i++)
{
if (i == partKeyColIndex)
continue ;
if (!partKeyCols[i].getItemExpr()->doesExprEvaluateToConstant(FALSE,TRUE)) // equality preds on all part key columns, except 'a'
{
applyPartKeyHeuristic = FALSE;
}
}
if (applyPartKeyHeuristic)
return TRUE;
}
ItemExpr* ie;
if ((keyColIndex != NULL_COLL_INDEX)&& isBigTable) // 'a' is a key column of this index
{
NABoolean fullKeyConstant = TRUE;
NABoolean keyConstantUptoCol = TRUE;
for (CollIndex i =0; i < keyCols.entries(); i++)
{
if (i == keyColIndex)
continue ;
ie = keyCols[i].getItemExpr();
if (!(ie->doesExprEvaluateToConstant(FALSE,TRUE))) // equality preds on all key columns
{
if (i < keyColIndex)
{
fullKeyConstant = FALSE;
keyConstantUptoCol = FALSE;
}
else
{
fullKeyConstant = FALSE;
}
break;
}
}
if (fullKeyConstant)
return TRUE;
if (keyConstantUptoCol && (numValues > orPredToSemiJoin))
return TRUE;
// the following block separates out the key predicates from the selection
// preds of this scan. Then we estimated the number of rows that will
// reult after applying these key predicates. Only local predicates are
// considered. OR preds on key columns, join preds on key columns, etc.
// are not included in this computation. Hopefully these pred types can also be
// considered eventually. Code is mostly a copy of AppliedStatMan::getStatsForCANodeId()
ValueIdSet nonKeyPredicates (getSelectionPred());
ValueIdSet externalInputs = getGroupAttr()->getCharacteristicInputs();
ValueIdSet nonKeyColumnSet;
idesc->getNonKeyColumnSet(nonKeyColumnSet);
SearchKey * skey = new(CmpCommon::statementHeap())
SearchKey (idesc->getIndexKey(),
idesc->getOrderOfKeyValues(),
externalInputs, TRUE,
nonKeyPredicates,
nonKeyColumnSet,
idesc);
const CorrName& name = getTableDesc()->getNATable()->getTableName();
Scan *scanExpr = new STMTHEAP Scan(name, getTableDesc(), REL_SCAN, STMTHEAP);
scanExpr->setBaseCardinality((Cardinality)totalRowCount.getValue()) ;
GroupAttributes * gaExpr = new STMTHEAP GroupAttributes();
scanExpr->setSelectionPredicates(skey->keyPredicates());
gaExpr->setCharacteristicOutputs(getGroupAttr()->getCharacteristicOutputs());
scanExpr->setGroupAttr(gaExpr);
gaExpr->setLogExprForSynthesis(scanExpr);
EstLogPropSharedPtr outputEstLogProp = scanExpr->getGroupAttr()->outputLogProp((*GLOBAL_EMPTY_INPUT_LOGPROP));
CostScalar keyPredRowCount = outputEstLogProp->getResultCardinality() ;
delete skey;
delete scanExpr; // gaExpr is deleted here too
if (( keyPredRowCount < ((CostScalar)orPredToSemiJoinMaxRatio)*totalRowCount)&&
(numValues > orPredToSemiJoin))
return TRUE; // ratio of rows chosen by keypreds is less than specified
// by the default OR_PRED_TO_SEMIJOIN_PROBES_MAX_RATIO
} // end of isBigTable IF block
} // end of loop over all index paths
// part key and key column heuristic did not apply
return FALSE;
}
// ***********************************************************************
// $$$$ Tuple
// methods for class Tuple
// ***********************************************************************
// ***********************************************************************
// $$$$ GenericUpdate
// member functions for class GenericUpdate
// ***********************************************************************
void GenericUpdate::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
// ---------------------------------------------------------------------
// Transform the child,
// unless it's a leaf op introduced by Binder Index Maintenance.
// ---------------------------------------------------------------------
ValueId val_id;
if (child(0)) {
// Make values available to child
child(0)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
child(0)->transformNode(normWARef, child(0));
} else
CMPASSERT(getOperator().match(REL_ANY_LEAF_GEN_UPDATE));
// only if update and scan on the same table,
// i.e. no temp tables are involved
if (((getOperatorType() == REL_UNARY_UPDATE ||
getOperatorType() == REL_UNARY_DELETE))){
if(child(0)->getOperatorType() == REL_SCAN) {
Scan * scanNode = (Scan *)(child(0)->castToRelExpr());
const NATable *scanTable = scanNode->getTableDesc()->getNATable();
if(scanTable->getSpecialType() != ExtendedQualName::TRIGTEMP_TABLE){
ValueIdList topValueIds = oldToNewMap().getTopValues();
ValueIdList bottomValueIds = oldToNewMap().getBottomValues();
for (CollIndex v = 0; v < topValueIds.entries();v++){
normWARef.addVEG(topValueIds[v],bottomValueIds[v]);
}
}
}
}
// ---------------------------------------------------------------------
// Transform the computable expressions associated with me.
// If a subquery appears in the compute list, then let the subquery
// transformation cause a semijoin to be performed between the
// child of the GenericUpdate and the GenericUpdate.
// ---------------------------------------------------------------------
NABoolean origInGenericUpdateAssignFlag(normWARef.inGenericUpdateAssign());
normWARef.setInGenericUpdateAssign(TRUE);
if (newRecExpr().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */) )
{
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
// -----------------------------------------------------------------
// Transform my new child.
// -----------------------------------------------------------------
child(0)->transformNode(normWARef, child(0));
}
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
normWARef.setInGenericUpdateAssign(TRUE);
// QSTUFF
if (newRecBeforeExpr().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */) )
{
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
// -----------------------------------------------------------------
// Transform my new child.
// -----------------------------------------------------------------
child(0)->transformNode(normWARef, child(0));
}
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
// QSTUFF
if (isMerge())
{
normWARef.setInGenericUpdateAssign(TRUE) ;
if (mergeInsertRecExpr().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */) )
{
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
// -----------------------------------------------------------------
// Transform my new child.
// -----------------------------------------------------------------
child(0)->transformNode(normWARef, child(0));
}
normWARef.setInGenericUpdateAssign(origInGenericUpdateAssignFlag) ;
// remember previous "are we in mergeUpdateWhere?" flag
NABoolean origInMergeUpdWhere(normWARef.inMergeUpdWhere());
normWARef.setInMergeUpdWhere(TRUE); // we're in a mergeUpdateWhere
if (mergeUpdatePred().transformNode
(normWARef, child(0), getGroupAttr()->getCharacteristicInputs(),
FALSE /* Move predicates */) )
{ // restore previous "are we in mergeUpdateWhere?" flag
normWARef.setInMergeUpdWhere(origInMergeUpdWhere) ;
// Transform my new child.
child(0)->transformNode(normWARef, child(0));
}
// restore previous "are we in mergeUpdateWhere?" flag
normWARef.setInMergeUpdWhere(origInMergeUpdWhere) ;
}
ValueId exprId;
for (exprId = newRecExpr().init(); newRecExpr().next(exprId); newRecExpr().advance(exprId))
{
ItemExpr *thisIE = exprId.getItemExpr();
thisIE = thisIE->removeOneRowAggregate( thisIE, normWARef );
}
// QSTUFF
for (exprId = newRecBeforeExpr().init(); newRecBeforeExpr().next(exprId); newRecBeforeExpr().advance(exprId))
{
ItemExpr *thisIE = exprId.getItemExpr();
thisIE = thisIE->removeOneRowAggregate( thisIE, normWARef );
}
// QSTUFF
for (exprId = mergeInsertRecExpr().init(); mergeInsertRecExpr().next(exprId); mergeInsertRecExpr().advance(exprId))
{
ItemExpr *thisIE = exprId.getItemExpr();
thisIE = thisIE->removeOneRowAggregate( thisIE, normWARef );
}
for (exprId = mergeUpdatePred().init();
mergeUpdatePred().next(exprId);
mergeUpdatePred().advance(exprId))
{
ItemExpr *thisIE = exprId.getItemExpr();
thisIE = thisIE->removeOneRowAggregate( thisIE, normWARef );
}
// ---------------------------------------------------------------------
// For key expressions only normalize the right hand side of the =
// left side should have been a different valueId from the one below
// ---------------------------------------------------------------------
ValueIdList keyList = beginKeyPred();
if (keyList.entries() > 0)
{
for (CollIndex i = 0; i < keyList.entries(); i++)
{
ItemExpr * eqPtr = ((keyList[i]).getValueDesc())->getItemExpr();
(*eqPtr)[1]->transformNode(normWARef, eqPtr->child(1), child(0),
getGroupAttr()->getCharacteristicInputs());
(*eqPtr)[0]->markAsTransformed();
eqPtr->markAsTransformed();
}
}
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
beginKeyPred().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs());
// ---------------------------------------------------------------------
// Transform the check constraint expressions.
// Indicate that we are processing a complex scalar expression to
// suppress the performance of transitive closure.
// ---------------------------------------------------------------------
normWARef.setComplexScalarExprFlag();
normWARef.setInConstraintsFlag();
checkConstraints().transformNode(normWARef, child(0),
getGroupAttr()->getCharacteristicInputs());
normWARef.restoreComplexScalarExprFlag();
normWARef.restoreInConstraintsFlag();
// There should be no select predicates here,
// except if it's an embedded insert.
if (!getGroupAttr()->isEmbeddedInsert())
{
CMPASSERT(selectionPred().isEmpty()
// QSTUFF
OR getGroupAttr()->isGenericUpdateRoot()
// QSTUFF
);
}
// fix CR: message bytes increase with rowsets (CR 10-010720-4032)
if ( child(0) )
child(0)->recomputeOuterReferences();
// QSTUFF
if (!selectionPred().isEmpty()){
transformSelectPred(normWARef, locationOfPointerToMe);
}
// QSTUFF
// ---------------------------------------------------------------------
// Transform the entire column list of the base table to pick up
// equivalences of base table columns and index columns
// ---------------------------------------------------------------------
const ValueIdList &allCols = getTableDesc()->getColumnList();
ItemExpr *oldPtr;
ExprValueId newPtr;
ValueId eqVid;
CollIndex i = 0;
for (i = 0; i < allCols.entries(); i++) {
oldPtr = allCols[i].getItemExpr();
newPtr = oldPtr;
oldPtr->transformNode(normWARef, newPtr, locationOfPointerToMe,
getGroupAttr()->getCharacteristicInputs());
// the column list shouldn't be changed by the transformation
CMPASSERT(oldPtr == newPtr.getPtr());
// ---------------------------------------------------------------------
// Create a VEG with all equivalent index columns
// ---------------------------------------------------------------------
if (oldPtr->getOperatorType() == ITM_BASECOLUMN) {
const ValueIdSet &eic = ((BaseColumn *)oldPtr)->getEIC();
for (eqVid = eic.init(); eic.next(eqVid); eic.advance(eqVid)) {
// for trigger temp tables
if (updatedTableName_.getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE &&
getOperatorType() == REL_LEAF_INSERT) {
normWARef.addVEGInOuterRegion(((BaseColumn *)oldPtr)->getValueId(),eqVid);
}
// no trigger temp tables
else {
normWARef.addVEG(((BaseColumn *)oldPtr)->getValueId(),eqVid);
}
}
}
else {
CMPASSERT(oldPtr->getOperatorType() == ITM_BASECOLUMN);
}
}
// ---------------------------------------------------------------------
// Prime the Group Attributes for the GenericUpdate.
// ---------------------------------------------------------------------
primeGroupAttributes();
} // GenericUpdate::transformNode()
// -----------------------------------------------------------------------
// GenericUpdate::rewriteNode()
// -----------------------------------------------------------------------
void GenericUpdate::rewriteNode(NormWA & normWARef)
{
// QSTUFF
const ValueIdList &allCols = getTableDesc()->getColumnList();
ItemExpr *newPtr = NULL;
// ---------------------------------------------------------------------
// walk through all the columns of the table, normalizing them
// and adding the result into the ColumnVEGList of the table descriptor
// ---------------------------------------------------------------------
CollIndex j = 0;
for (j = 0; j < allCols.entries(); j++)
{
// ---------------------------------------------------------------------
// Create a VEG with all equivalent index columns
// ---------------------------------------------------------------------
newPtr = allCols[j].getItemExpr()->normalizeNode(normWARef);
getTableDesc()->addToColumnVEGList(newPtr->getValueId());
}
// -------------------------------------------------------------------------
// Normalize the indexes.
// -------------------------------------------------------------------------
for (j = 0;
j < (Int32)getTableDesc()->getIndexes().entries();
j++)
{
IndexDesc *idesc = getTableDesc()->getIndexes()[j];
ValueIdList indexOrder(idesc->getOrderOfKeyValues());
// ---------------------------------------------------------------------
// Normalize the asc/desc order of the index.
// ---------------------------------------------------------------------
indexOrder.normalizeNode(normWARef);
idesc->setOrderOfKeyValues(indexOrder);
// ---------------------------------------------------------------------
// Normalize the partitioning keys in the partitioning function.
// ---------------------------------------------------------------------
if (idesc->isPartitioned())
idesc->getPartitioningFunction()->normalizePartitioningKeys(normWARef);
}
// we need to normalize the potential outputs here to avoid problems
// during code generation
potentialOutputs_.normalizeNode(normWARef);
// QSTUFF
precondition_.normalizeNode(normWARef);
// these are no longer used in the following phases,
// so remove them instead of rewriting them
exprsInDerivedClasses_.clear();
// ---------------------------------------------------------------------
// Rewrite the expressions in the selection predicates and
// in the Group Attributes.
// ---------------------------------------------------------------------
RelExpr::rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Rewrite values in the newrec expressions.
// ---------------------------------------------------------------------
if (newRecExpr().normalizeNode(normWARef))
{
}
// QSTUFF
if (newRecBeforeExpr().normalizeNode(normWARef))
{
}
// QSTUFF
if (executorPred().normalizeNode(normWARef))
{
}
if (isMerge())
{
if (mergeInsertRecExpr().normalizeNode(normWARef))
{
}
if (mergeUpdatePred().normalizeNode(normWARef))
{
}
}
// ---------------------------------------------------------------------
// Rewrite expressions in the order by list, if this is an insert.
// ---------------------------------------------------------------------
if (getOperatorType() == REL_UNARY_INSERT)
{
Insert * ins
= (Insert *)(this->castToRelExpr());
if (ins->reqdOrder().normalizeNode(normWARef))
{
}
}
/*
// QSTUFF
// this has been moved up before rewriting the index item expressions
// ---------------------------------------------------------------------
// walk through all the columns of the table, normalizing them
// and adding the result into the ColumnVEGList of the table descriptor
// ---------------------------------------------------------------------
const ValueIdList &allCols = getTableDesc()->getColumnList();
ItemExpr *newPtr = NULL;
for (CollIndex i = 0; i < allCols.entries(); i++)
{
// ---------------------------------------------------------------------
// Create a VEG with all equivalent index columns
// ---------------------------------------------------------------------
newPtr = allCols[i].getItemExpr()->normalizeNode(normWARef);
getTableDesc()->addToColumnVEGList(newPtr->getValueId());
}
// QSTUFF
*/
// ---------------------------------------------------------------------
// Rewrite values in the key expressions.
// ---------------------------------------------------------------------
// For key expressions only normalize the right hand side of the =
// left side should have been a different valueId from the one below
ValueIdList keyList = beginKeyPred();
if (keyList.entries() > 0)
{
for (CollIndex i = 0; i < keyList.entries(); i++)
{
ItemExpr * eqPtr = ((keyList[i]).getValueDesc())->getItemExpr();
ItemExpr * right_side = (*eqPtr)[1]->normalizeNode(normWARef);
eqPtr->child(1) = right_side;
}
}
// ---------------------------------------------------------------------
// Rewrite the ValueIdMap between the select and the update part so
// it has VEGReferences init (note that we avoided VEGies that span
// both the select and the update part, this is (probably?) one
// reason why we only normalized one half of the keys preds above.
// ---------------------------------------------------------------------
updateToSelectMap_.normalizeNode(normWARef);
// ---------------------------------------------------------------------
// Rewrite values in the check constraint expressions.
// Indicate that we are processing a complex scalar expression to
// suppress the performance of transitive closure.
// ---------------------------------------------------------------------
normWARef.setComplexScalarExprFlag();
normWARef.setInConstraintsFlag();
if (checkConstraints().normalizeNode(normWARef))
{
}
normWARef.restoreComplexScalarExprFlag();
normWARef.restoreInConstraintsFlag();
// ---------------------------------------------------------------------
// Rewrite the expressions in the TriggerBindInfo object which is part
// of the inlining info.
// ---------------------------------------------------------------------
if (getInliningInfo().getTriggerBindInfo())
{
getInliningInfo().getTriggerBindInfo()->normalizeMembers(normWARef);
}
} // GenericUpdate::rewriteNode()
// -----------------------------------------------------------------------
// GenericUpdate::recomputeOuterReferences()
// -----------------------------------------------------------------------
void GenericUpdate::recomputeOuterReferences()
{
// Should replace with appropriate virtual methods
// Solution 10-040114-2405 start
// Our transformation for input rowsets always involves a unpack and a
// flow operator. Hence we shouldnt be accessing any input rowset
// directly. Remove it's reference from the required inputs
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
ValueId vid;
ValueIdSet inRowsets;
ItemExpr *ie ;
for (vid = outerRefs.init(); outerRefs.next(vid); outerRefs.advance(vid))
{
ie = vid.getItemExpr();
if (ie->getOperatorType() != ITM_CONSTANT) {
if ((vid.getType().getTypeQualifier() == NA_ROWSET_TYPE) ||
(( ie->getOperatorType() == ITM_DYN_PARAM) &&
(((DynamicParam *) ie)->getRowsetSize() != 0)))
inRowsets +=vid;
}
}
// Remove input rowset references
outerRefs -=inRowsets;
// Solution 10-040114-2405 end
if ((getOperatorType() != REL_UNARY_INSERT) && (getOperatorType() != REL_LEAF_INSERT) &&
(getOperatorType() != REL_UNARY_DELETE) && (getOperatorType() != REL_LEAF_DELETE) &&
(getOperatorType() != REL_UNARY_UPDATE) && (getOperatorType() != REL_LEAF_UPDATE)) {
getGroupAttr()->setCharacteristicInputs(outerRefs);
return;
}
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
ValueIdSet allMyExpr(newRecExpr());
allMyExpr += newRecBeforeExpr();
allMyExpr += executorPred();
allMyExpr += usedColumns();
allMyExpr += getSelectionPred();
allMyExpr += exprsInDerivedClasses_;
ValueIdSet beginKeyPredSet(beginKeyPred());
allMyExpr += beginKeyPredSet;
if (isMerge())
{
allMyExpr += mergeInsertRecExpr();
allMyExpr += mergeUpdatePred();
}
allMyExpr.weedOutUnreferenced(outerRefs);
// Add references needed by children, if any
Int32 arity = getArity();
for (Int32 i = 0; i < arity; i++)
{
outerRefs += child(i).getPtr()->getGroupAttr()->getCharacteristicInputs();
}
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // GenericUpdate::recomputeOuterReferences
// -----------------------------------------------------------------------
// GenericUpdate::normalizeNode
// -----------------------------------------------------------------------
RelExpr * GenericUpdate::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
if (isMerge())
normWARef.incrementMergeUpdDelCount();
// Call the super class to do the normalization work.
RelExpr *normalizedThis = RelExpr::normalizeNode(normWARef);
if ((getOperator().match(REL_ANY_GEN_UPDATE) || // general update cases
getOperator().match(REL_UNARY_INSERT) // update of a key column
)
&&
(getInliningInfo().hasTriggers() || // driving trigger temp table insert
getInliningInfo().isMVLoggingInlined() // driving MV IUD log insert
)
)
{
Lng32 actualMessageSize = getGroupAttr()->getCharacteristicOutputs().getRowLength();
// 2 headers: one for record header and the other for the message header
Lng32 maxMessageSize = (ActiveSchemaDB()->getDefaults().getAsULong(LOCAL_MESSAGE_BUFFER_SIZE) * 1024) -
(2*(ActiveSchemaDB()->getDefaults().getAsULong(DP2_MESSAGE_HEADER_SIZE_BYTES)));
// check row size against max executor message buffer size
if (actualMessageSize >= maxMessageSize)
{
Lng32 tableRecordLength = getTableDesc()->getNATable()->getRecordLength();
NAString tableName = getTableDesc()->getNATable()->getTableName().getQualifiedNameAsAnsiString();
*CmpCommon::diags() << DgSqlCode(-12070)
<< DgString0(tableName)
<< DgInt0(tableRecordLength)
<< DgInt1((Lng32)maxMessageSize/2);
return this;
}
}
/// YYY
if (getOperator().match(REL_ANY_UNARY_GEN_UPDATE))
{
Scan * scan = getLeftmostScanNode();
if (scan && scan->requiresHalloweenForUpdateUsingIndexScan())
setAvoidHalloween(TRUE);
}
if (producedMergeIUDIndicator_ != NULL_VALUE_ID)
{
ValueId dummy;
if (NOT getGroupAttr()->getCharacteristicOutputs().referencesTheGivenValue(
producedMergeIUDIndicator_,
dummy))
// nobody asked for the merge IUD indicator, therefore remove
// it, (e.g. simple table without index maintenance)
producedMergeIUDIndicator_ = NULL_VALUE_ID;
}
return normalizedThis;
}
// -----------------------------------------------------------------------
// Insert::normalizeNode()
// The purpuse of this method is to eliminate the Tuple node of an
// INSERT-VALUES statement. After normalization is done, the Tuple node
// becomes redundant, since all the information is inside the Insert node
// anyway.
// -----------------------------------------------------------------------
RelExpr * Insert::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
// Call the super class to do the normalization work.
RelExpr *normalizedThis = GenericUpdate::normalizeNode(normWARef);
// If this already is a LeafInsert node - the work is done.
if (normalizedThis->getOperatorType() == REL_LEAF_INSERT)
return normalizedThis;
// If the child is not a Tuple node - nothing to do here.
CMPASSERT(normalizedThis->getArity() > 0);
if (normalizedThis->child(0)->getOperatorType() != REL_TUPLE)
return normalizedThis;
if (normalizedThis->child(0)->getSelectionPred().isEmpty())
{
// Now get rid of the Tuple node and start a new (although shortlived)
// life as a LeafInsert node. The optimizer will next transform it
// to a DP2Insert node.
normalizedThis->child(0) = (RelExpr *)NULL;
normalizedThis->setOperatorType(REL_LEAF_INSERT);
}
// else this is the case of an insert node to an ON STATEMENT MV
// an insert to a statement MV is inlined with an update to an
// ON STATEMENT MV source table
return normalizedThis;
}
// ***********************************************************************
// $$$$ RelRoot
// member functions for class RelRoot
// ***********************************************************************
// -----------------------------------------------------------------------
// ***NOTE*** These methods must be called AFTER the transformation phase
// or they will not return the correct answer.
//
// A sql statement cursor is updatable if all of the following are true:
// -- it is a SELECT statement
// -- there is only one underlying table, and no subquery references that tbl
// -- there are no aggregates present
// -- neither GROUP BY, DISTINCT, nor ORDER BY is specified
// -- all view columns must be column references
// -- no column reference can occur more than once
// -- The underlying table is not a materialized view
//
// A view is updatable similarly, except that
// -- ORDER BY *is* allowed (if it's allowed in a view at all)
//
// See Ansi 6.3 and 7.9 SR 12, and references to "read-only table".
// -----------------------------------------------------------------------
NABoolean RelRoot::isUpdatableBasic(NABoolean isView,
NABoolean &isInsertable) const
{
CMPASSERT(nodeIsBound() && nodeIsTransformed());
// ## Must ensure this still works when we have updatable Stored Procedures
// QSTUFF
Scan *scan;
GenericUpdate *gu = 0;
// QSTUFF
// QSTUFF
if (child(0)->getGroupAttr()->isEmbeddedUpdateOrDelete() &&
child(0)->getGroupAttr()->isGenericUpdateRoot()){
gu = (GenericUpdate *) child(0)->castToRelExpr();
if (gu->getOperator().match(REL_ANY_UNARY_GEN_UPDATE))
scan = (Scan *)(child(0)->castToRelExpr())->getLeftmostScanNode();
else
return FALSE;
}
else
// QSTUFF
{
// if child is a FirstN node, skip it.
if ((child(0)->castToRelExpr()->getOperatorType() == REL_FIRST_N) &&
(child(0)->child(0)))
scan = (Scan *)child(0)->child(0)->castToRelExpr();
else
scan = (Scan *)child(0)->castToRelExpr();
}
if (scan->getOperatorType() != REL_SCAN)
return FALSE;
if (scan->accessOptions().accessType() == BROWSE_) // "read-only table"
return FALSE;
TransMode::IsolationLevel il;
if ((NOT isView) ||
(CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES) == DF_NONE))
ActiveSchemaDB()->getDefaults().getIsolationLevel
(il);
else
ActiveSchemaDB()->getDefaults().getIsolationLevel
(il,
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
if (scan->accessOptions().accessType() == ACCESS_TYPE_NOT_SPECIFIED_ &&
il == TransMode::READ_UNCOMMITTED_)
return FALSE;
NATable *naTable = ActiveSchemaDB()->getNATableDB()->get(&
scan->getTableDesc()->getNATable()->getExtendedQualName());
CMPASSERT(naTable);
if (naTable->getReferenceCount() > 1)
// QSTUFF
if (getGroupAttr()->isEmbeddedUpdateOrDelete()){
if (naTable->getReferenceCount() > 2)
return FALSE;
}
else
// QSTUFF
// A subquery references the scan tbl
return FALSE;
if (naTable->isAnMV())
return FALSE; // A materialized view is not updatable. -- MV
if (naTable->isPartitionNameSpecified())
return FALSE; // If the PARTITION clause is specified in the view's query text
// then the view is not updatable.
// Check option can only check predicates in the
// where clause, the partition clause is like an extra predicate, in that it
// restricts the statements action to a single partition. But this extra predicate
// cannot be enforced by our current check option mechanism.
// Similarly if the PARTITION clause is specified
// in the query specification a cursor declaration then the cursor is not updatable.
ValueIdSet selectCols;
if (isView)
{
for (CollIndex i = 0; i < compExpr().entries(); i++)
{
ValueId idcol = compExpr()[i];
const NAColumn *nacol = idcol.getNAColumn(TRUE/*okIfNotColumn*/);
if (!nacol) // not a column reference
return FALSE;
// QSTUFF
// in case of an embedded update within a view there may be an old and
// a new column pointing to the same base table. We have to detect that
// and prevent those views to be updatable.
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
{
CMPASSERT(gu);
for (CollIndex j = 0;
j < gu->getTableDesc()->getColumnList().entries();
j++)
{
if ( gu->getTableDesc()->
getColumnList()[j].getItemExpr()->getValueId() == idcol)
{
idcol = scan->getTableDesc()->
getColumnList()[j].getItemExpr()->getValueId();
}
}
}
// QSTUFF
if (selectCols.contains(idcol)) // colref appears multiple times
return FALSE; // (cf. errors 4017, 4022 in Binder)
selectCols += idcol;
// A system column is ok as long as user doesn't actually UPDATE or INSERT
// it (by definition, the system supplies a default when not explicitly
// named in INSERT)
// if (nacol->isSystemColumn() && // cf. error 4013 in Binder
// nacol->getDefaultValue() == NULL)
// isInsertable = FALSE;
}
}
// All columns not selected in the view must have a default value
// for the view to be "insertable" (Tandem notion, not Ansi;
// see SCMPBIDD for SQL/MP definition).
// We don't care what default info a system column has;
// by definition a system column is always filled in (defaulted).
// Cf. error 4024 in Binder.
if (isView)
{
const ValueIdList &allCols = scan->getTableDesc()->getColumnList();
for (CollIndex i = 0; i < allCols.entries(); i++)
{
const ValueId idcol = allCols[i];
const NAColumn *nacol = idcol.getNAColumn();
if (!selectCols.contains(idcol) &&
!nacol->getDefaultValue() &&
!nacol->isSystemColumn())
{
isInsertable = FALSE;
break;
}
} // for allCols
} // isView
return TRUE;
}
NABoolean RelRoot::isUpdatableCursor() // this is NOT const
{
NABoolean junk;
if (!isUpdatableBasic(FALSE, junk)) return FALSE;
// Ansi 13.1 SR 5a -- no updatability clause specified, but ORDER BY was.
if (!updatableSelect_)
if (reqdOrder().entries()) return FALSE; // ORDER BY col-list
// ##When INSENSITIVE and SCROLL are supported, this rule also applies
// The following mods to the updatable-column list are only done if
// we have to (for efficiency).
if (!updateCol().entries() || reqdOrder().entries()) {
// "FOR UPDATE;" w/o col-list --
// is equivalent to "FOR UPDATE OF all-cols", per Ansi 13.1 SR 5b + 13.
//
ValueIdSet upd(updateCol());
if (!upd.entries()) {
const ColumnDescList &cols =
*getScanNode()->getRETDesc()->getColumnList();
for (CollIndex i = 0; i < cols.entries(); i++) {
const ValueId idcol = cols[i]->getValueId();
const NAColumn *nacol = idcol.getNAColumn(TRUE/*okIfNotColumn*/);
if (nacol)
upd += idcol;
}
}
// Genesis 10-990201-0094. Ansi 17.18 SR 5.
// Remove any ORDER BY cols from the FOR UPDATE OF cols,
// then we let cli/Statement handle it (error CLI_INVALID_UPDATE_COLUMN).
// ## We really should enhance StaticCompiler to catch these
// ## syntax errors (and also CLI_NON_UPDATABLE_SELECT_CURSOR)
// ## at compile-time not run-time.
// ## This would require intersecting an updateWhereCurrentOf's
// ## newRecExpr's target columns' NAColumns (or full-col-names)
// ## with its
// ## cursor's updateCols' NAColumns (or full-col-names),
// ## via some extra lookup in cursor PLTs in StmtDeclStatCurs process().
upd -= ValueIdSet(reqdOrder());
updateCol() = ValueIdList(upd); // this is NOT const
}
if (!updateCol().entries()) return FALSE;
return TRUE;
}
NABoolean RelRoot::isUpdatableView(NABoolean &isInsertable) const
{
isInsertable = TRUE;
if (!isUpdatableBasic(TRUE, isInsertable))
{
isInsertable = FALSE;
return FALSE;
}
return TRUE;
}
void RelRoot::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
markAsTransformed();
// QSTUFF
// in case of embedded updates or deletes we have to prevent outer
// predicates being pushed into subtrees being generated by a generic
// update. We achieve that by recording whether a node is at the root
// of a GenericUpdate subtree and preventing predicates being pushed
// beyond that node. By contruction we know that those nodes are either
// anti semi-joins or unary updates.
// we prevent predicates from being pushed down by forcing them not to be
// covered by the coverTest method which in turn causes them not to be
// pushed down by pushdowncoveredExpressions.
// This works fine execpt for equality predicates (x.x = 10) which are usually
// veggyfied by the transformation pass. Since the compiler assumes that all those
// predicates have been pushed down to the leaves it will just forget about
// them at code generation time. To prevent that we disable generation of
// veggies for constant equality termsand rely on pushdown covered
// expression to do the right thing...which seems to work just fine.
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
normWARef.setInEmbeddedUpdateOrDelete(TRUE);
// QSTUFF
// Embedded insert has the same equality predicate pushdown problems
// as embedded updates or deletes. Set the flag to prevent the pushdown.
if (getGroupAttr()->isEmbeddedInsert())
normWARef.setInEmbeddedInsert(TRUE);
// ---------------------------------------------------------------------
// Make a working copy of the NormWA for each (sub)query tree.
// ---------------------------------------------------------------------
NormWA newNormWA(normWARef);
// ---------------------------------------------------------------------
// Each Subquery represents its own region for the construction of
// VEGPredicates.
// ---------------------------------------------------------------------
if (isTrueRoot())
newNormWA.allocateAndSetVEGRegion(IMPORT_AND_EXPORT,this);
else
newNormWA.clearStateInformation(); // each subquery tree has its own state
// RelRoots predicates in the selectPred() are to be evaluated
// above the context of the RelRoot. Predicates from the child of
// the RelRoot are to stay there.
//
// Predicates are found at this stage in a RelRoot were pushed
// down by the parent to be transformed here. The parent was either
// a Rename node with or I'm the right child of a Semi or Outer Join.
//
// ---------------------------------------------------------------------
// Make values available to child
// ---------------------------------------------------------------------
child(0)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
// ---------------------------------------------------------------------
// Transform the child
// ---------------------------------------------------------------------
child(0)->transformNode(newNormWA, child(0));
if ((isTrueRoot()) &&
(child(0)) &&
((child(0)->getOperatorType() == REL_SORT_LOGICAL) ||
((child(0)->getOperatorType() == REL_FIRST_N) &&
((child(0)->child(0)) &&
(child(0)->child(0)->getOperatorType() == REL_SORT_LOGICAL)))))
{
SortLogical * sl = NULL;
if (child(0)->getOperatorType() == REL_SORT_LOGICAL)
sl = (SortLogical*)child(0)->castToRelExpr();
else
sl = (SortLogical*)child(0)->child(0)->castToRelExpr();
if (NOT hasOrderBy())
{
// move order by sort key from SortLogical child to me.
reqdOrder() = sl->getSortKey();
}
}
// ---------------------------------------------------------------------
// Transform the computable expressions associated with me.
// If a subquery appears in the compute list, then let the subquery
// transformation cause a semijoin to be performed between the
// child of the RelRoot and the subquery.
// ---------------------------------------------------------------------
newNormWA.setInSelectList() ;
if (compExpr().transformNode(newNormWA, child(0),
getGroupAttr()->getCharacteristicInputs()))
{
// -----------------------------------------------------------------
// Transform my new child.
// -----------------------------------------------------------------
child(0)->transformNode(newNormWA, child(0));
}
newNormWA.restoreInSelectList() ;
// ---------------------------------------------------------------------
// Definitely no subqueries in the host variables, dynamic parameters
// and constant values.
// ---------------------------------------------------------------------
if (inputVars().transformNode(newNormWA, child(0),
getGroupAttr()->getCharacteristicInputs()))
{
ABORT("Internal error in RelRoot::transformNode - subquery in inputVars");
}
// ---------------------------------------------------------------------
// Definitely no subqueries in the order by list, at least until SQL MCLLXIV!
// ---------------------------------------------------------------------
if (reqdOrder().transformNode(newNormWA, child(0),
getGroupAttr()->getCharacteristicInputs()))
{
ABORT("Internal error in RelRoot::transformNode - subquery in reqdOrder");
}
pullUpPreds();
// transform the selection predicates
transformSelectPred(newNormWA, locationOfPointerToMe);
// We are currently assuming that no subqueries have been introduced above me;
// any new subquery parent would just silently be ignored!
CMPASSERT( this == locationOfPointerToMe ); // Genesis 10-970828-6025
normWARef.setCorrelatedSubqCount(newNormWA.getCorrelatedSubqCount());
normWARef.setContainsSemiJoinsToBeTransformed
(newNormWA.containsSemiJoinsToBeTransformed());
if (isTrueRoot())
{
// -----------------------------------------------------------------
// Sometimes a Left Join can be transformed to an Inner Join if
// there are binary comparison predicates that can filter out
// null augmented rows. In such a case, the VEGRegion created
// the Left Join needs to be merged into its parent VEGRegion.
// -----------------------------------------------------------------
normWARef.processVEGRegions();
// Restore the original VEGRegion.
newNormWA.restoreOriginalVEGRegion();
// if updatability of the cursor was not disabled explicitly
// by specifying a READ ONLY clause, then check to see if
// the cursor really is updatable. Retrieve child's pkeys,
// if the cursor is updatable.
if (updatableSelect() == TRUE
// QSTUFF
&&
// we allow simple views containing embedded deletes
// to be updated...but that does not translate into an
// updatable cusor
! child(0)->getGroupAttr()->isGenericUpdateRoot()
// QSTUFF
)
{
if (isUpdatableCursor())
{
updatableSelect() = TRUE;
// add child's clustering key columns to pkeyList.
// Convert nodes are added to convert the key value to
// the actual key type at runtime. The key value id gets
// replaced by a veg ref, so it is important that we 'remember'
// what the correct key type is and then convert to that type.
// This list is used to generate expression to compute a row
// of primary key values that will be returned to CLI so it
// could be passed in to an UPDATE...WHERE CURRENT OF... query.
// if child is a FirstN node, skip it.
Scan * scan = NULL;
if ((child(0)->castToRelExpr()->getOperatorType() == REL_FIRST_N) &&
(child(0)->child(0)))
scan = (Scan *)child(0)->child(0)->castToRelExpr();
else
scan = (Scan *)child(0)->castToRelExpr();
const ValueIdList * keyList =
&(scan->getTableDesc()->getClusteringIndex()->getIndexKey());
CollIndex i = 0;
for (i = 0; i < keyList->entries(); i++)
{
ItemExpr * castNode =
new(newNormWA.wHeap()) Cast((*keyList)[i].getItemExpr(),
&((*keyList)[i].getType()));
castNode->synthTypeAndValueId();
pkeyList().insert(castNode->getValueId());
}
ValueIdList nonKeyColList;
scan->getTableDesc()->getClusteringIndex()->getNonKeyColumnList(nonKeyColList);
for (i = 0; i < nonKeyColList.entries(); i++)
{
ItemExpr * castNode =
new(newNormWA.wHeap()) Cast(nonKeyColList[i].getItemExpr(),
&(nonKeyColList[i].getType()));
castNode->synthTypeAndValueId();
pkeyList().insert(castNode->getValueId());
}
} // updatable cursor select
else // nonupdatable cursor
{
updatableSelect() = FALSE;
if (updateColTree_)
{
// cursor has FOR UPDATE OF clause that can't be honored.
*CmpCommon::diags() << DgSqlCode(-4118);
locationOfPointerToMe = (RelExpr*)NULL;
}
}
}
else
updatableSelect() = FALSE;
}
else
{
// -----------------------------------------------------------------
// Modify the Group Attributes of my child so that it receives all
// the input values that I receive.
// Assign my selection predicates to the child.
// -----------------------------------------------------------------
child(0)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
child(0)->selectionPred() += getSelectionPred();
// -- Triggers
child(0)->getInliningInfo().merge(&getInliningInfo());
locationOfPointerToMe = child(0); // my parent now -> my child
child(0)->setFirstNRows(getFirstNRows());
deleteInstance(); // Goodbye!
} // eliminate intermediate RelRoots
} // RelRoot::transformNode()
// -----------------------------------------------------------------------
// RelRoot::pullUpPreds()
// -----------------------------------------------------------------------
void RelRoot::pullUpPreds()
{
// A RelRoot never pulls up predicates from its children.
child(0)->recomputeOuterReferences();
} // RelRoot::pullUpPreds()
// -----------------------------------------------------------------------
// RelRoot::recomputeOuterReferences()
// -----------------------------------------------------------------------
void RelRoot::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
if (NOT getGroupAttr()->getCharacteristicInputs().isEmpty())
{
ValueIdSet leafValues, emptySet;
GroupAttributes emptyGA;
child(0)->getGroupAttr()->getCharacteristicInputs().
getLeafValuesForCoverTest(leafValues, emptyGA, emptySet);
CMPASSERT((getGroupAttr()->getCharacteristicInputs().contains
(child(0)->getGroupAttr()->getCharacteristicInputs())) ||
(getGroupAttr()->getCharacteristicInputs().contains (leafValues)));
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// Remove from outerRefs those valueIds that are not needed
// by my selection predicate or by my computed expression list.
// Need to add the orderby list since it is not a subset of the
// computed expression list.
ValueIdSet allMyExpr(getSelectionPred());
allMyExpr.insertList(compExpr());
allMyExpr.insertList(reqdOrder());
allMyExpr.weedOutUnreferenced(outerRefs);
// Add to outerRefs those that my child need.
outerRefs += child(0).getPtr()->getGroupAttr()->getCharacteristicInputs();
// set my Character Inputs to this new minimal set.
getGroupAttr()->setCharacteristicInputs(outerRefs);
}
} // RelRoot::recomputeOuterReferences()
// -----------------------------------------------------------------------
// RelRoot::rewriteNode()
// -----------------------------------------------------------------------
void RelRoot::rewriteNode(NormWA & normWARef)
{
CMPASSERT(isTrueRoot());
// ---------------------------------------------------------------------
// Save the original external inputs. The original values have to be
// made available by someone and that someone is the top root.
// --------------------------------------------------------------------
ValueIdSet externalInputs(getGroupAttr()->getCharacteristicInputs());
// ---------------------------------------------------------------------
// Rewrite the value expressions using the VEG expressions that are
// created when the transitive closure of "=" predicates was computed.
// Transform a Left Join to an Inner Join, whenever possible.
// ---------------------------------------------------------------------
RelExpr::rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Add the original external inputs to the characteristic inputs.
// --------------------------------------------------------------------
getGroupAttr()->addCharacteristicInputs(externalInputs);
// ---------------------------------------------------------------------
// Rewrite expressions in the computable expressions.
// ---------------------------------------------------------------------
if (compExpr().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite expressions in the sort key list.
// ---------------------------------------------------------------------
if (reqdOrder().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite expressions in the pkey list.
// ---------------------------------------------------------------------
if ((updatableSelect() == TRUE) &&
(pkeyList().normalizeNode(normWARef)))
{
}
} // RelRoot::rewriteNode()
// -----------------------------------------------------------------------
// RelRoot::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * RelRoot::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
CMPASSERT(isTrueRoot());
// ---------------------------------------------------------------------
// Locate the VEGRegion for the root.
// ---------------------------------------------------------------------
normWARef.locateAndSetVEGRegion(this);
// ---------------------------------------------------------------------
// Rewrite value expressions in the query tree using the VEG notation.
// Convert Left Joins to Inner Joins, if possible.
// Note that this is an extra walk through the query tree and is
// hidden in between the tranformNode() and normalizeNode() phases.
// Its purpose is to perform a top-down, left-to-right tree walk in
// the transformed tree and initiate the rewrite on its way up.
// This will cause all of the values that are generated at the leaves
// to be normalized, i.e, rewritten in terms of the VEG notation,
// before expressions that reference them further up in the tree
// are normalized.
// ---------------------------------------------------------------------
rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Check which expressions can be evaluated by my child.
// Modify the Group Attributes of those children who inherit some of
// these expressions.
// ---------------------------------------------------------------------
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred()
);
ValueIdList orderByList = reqdOrder();
ValueIdSet myCharInput = getGroupAttr()->getCharacteristicInputs();
// This was added to fix the problem exposed by the case 10-010321-1842
// Compiler failed to create a plan when query had sort order req. by
// column number which is expression containing dynamic parameter and
// covered by another column in RelRoot requiredOutput like
// SELECT a,a/(?p) FROM t ORDER BY 2; For this case we need to
// enforce that Sort operator can sort on this expression by keeping
// parameter ?p in RelRoot child's group requiredInput. Previously,
// expression got removed from this group requiredOutput, the only
// reference to ?p was removed, and as a result ?p was not kept in
// this group requiredInput.
// NOTE. This solution will force the Sort operator to be done
// directly below the Root node.
if (orderByList.entries() > 0)
{
ValueIdSet orderBySet(orderByList),
coveredOrderBySet,
inputsNeededForOrderBy,
coveredOrderBySubExpr,
uncoveredOrderByExpr;
GroupAttributes * childGAPtr = child(0).getPtr()->getGroupAttr();
childGAPtr->coverTest(orderBySet,
myCharInput,
coveredOrderBySet,
inputsNeededForOrderBy,
&coveredOrderBySubExpr);
childGAPtr->addCharacteristicInputs(inputsNeededForOrderBy);
}
// ---------------------------------------------------------------------
// Normalize the child.
// ---------------------------------------------------------------------
child(0) = child(0)->normalizeNode(normWARef);
// ---------------------------------------------------------------------
// Restore the region before returning
// ---------------------------------------------------------------------
normWARef.restoreOriginalVEGRegion();
fixEssentialCharacteristicOutputs();
if (NOT normWARef.getExtraHubVertex())
normWARef.setExtraHubVertex(this);
// ---------------------------------------------------------------------
// Synthesize logical properties
// ---------------------------------------------------------------------
synthLogProp(&normWARef);
normWARef.setMergeUpdDelCount(0);
// check for any errors occured during normalization
if (CmpCommon::diags()->mainSQLCODE() < 0)
return NULL;
else
return this;
} // RelRoot::normalizeNode()
// -----------------------------------------------------------------------
// RelRoot::semanticQueryOptimizeNode()
// -----------------------------------------------------------------------
RelExpr * RelRoot::semanticQueryOptimizeNode(NormWA & normWARef)
{
if (nodeIsSemanticQueryOptimized())
return this;
markAsSemanticQueryOptimized() ;
// sematicQueryOptimize(SQO) is undertaken only if
// (a) there are subqueries that can be unnested OR
// (b) semijoins that can be transformed to inner joins OR
// (c) joins that can be eliminated.
// (d) joins tha can be extra hub
if (normWARef.requiresSemanticQueryOptimization() )
{
// make a copy of the current query tree. If there is an exception
// during the SQO phase we can proceed with the copied.
// SQO can provide impoved performance but is not needed for
// correctness.
RelExpr *copyTree = child(0)->
copyRelExprTree(CmpCommon::statementHeap());
Lng32 numSQOPasses = 0;
Lng32 multiPassJoinElimLimit =
ActiveSchemaDB()->getDefaults().getAsLong(MULTI_PASS_JOIN_ELIM_LIMIT);
try
{
while ((numSQOPasses == 0) ||
(((numSQOPasses < multiPassJoinElimLimit) ||
(multiPassJoinElimLimit < 0)) &&
(normWARef.containsJoinsToBeEliminated() ||
normWARef.checkForExtraHubTables())))
{
normWARef.locateAndSetVEGRegion(this);
normWARef.setCheckForExtraHubTables(FALSE);
// ---------------------------------------------------------------------
// Semantic Query Optimize the child.
// ---------------------------------------------------------------------
child(0) = child(0)->semanticQueryOptimizeNode(normWARef);
child(0) = inlineTempTablesForCSEs(normWARef);
normWARef.restoreOriginalVEGRegion();
normWARef.setExtraHubVertex(NULL);
normWARef.setContainsJoinsToBeEliminated(FALSE);
recursivePushDownCoveredExpr(&normWARef);
numSQOPasses++ ;
}
}
catch(AssertException & e)
{
// Undo any common expression changes done during Unnesting so that
// we can start over.
normWARef.getSqoWA()->undoChanges(normWARef);
*CmpCommon::diags() << DgSqlCode(2078)
<< DgString0(e.getCondition())
<< DgString1(e.getFileName())
<< DgInt0((Lng32)e.getLineNum());
child(0) = copyTree ;
if (normWARef.requiresRecursivePushdown())
{
recursivePushDownCoveredExpr(&normWARef,
FALSE // no need to do any synthLogProp
);
}
}
}
else if (normWARef.requiresRecursivePushdown())
{
recursivePushDownCoveredExpr(&normWARef,
FALSE // no need to do any synthLogProp
);
}
// for debugging
if (normWARef.getCommonSubExprRefCount() > 0 &&
CmpCommon::getDefault(CSE_PRINT_DEBUG_INFO) == DF_ON)
CommonSubExprRef::displayAll();
return this;
} // RelRoot::semanticQueryOptimizeNode()
RelExpr * RelRoot::inlineTempTablesForCSEs(NormWA & normWARef)
{
RelExpr *result = NULL;
const LIST(CSEInfo *) * cses = CmpCommon::statement()->getCSEInfoList();
if (cses && cses->entries() > 0)
{
// If this query tree has any common subexpressions that need
// to be materialized as temp tables, then insert these
// materialization steps (called CTi below) between the root
// and its child node, Q, like this:
//
// Root Root
// | |
// Q MapValueIds
// |
// BlockedUnion
// / \
// Union Q
// / \
// ... CTn
// /
// Union
// / \
// CT1 CT2
//
// The common subexpressions may depend on each other, so make
// sure to create them in the right order and to use blocked
// union instead of a regular union if there are such
// dependencies.
NABitVector toDoVec; // still to be done
NABitVector readyVec; // ready, all predecessors are done
NABitVector doneVec; // already done
// first, figure out all the CSEs that we have to process
for (CollIndex i=0; i<cses->entries(); i++)
if (cses->at(i)->getInsertIntoTemp() != NULL)
toDoVec += i;
// Loop over the to-do list, finding new entries for which we
// already processed all of their predecessors. In this context,
// the children are the predecessors, since we have to build the
// graph bottom-up. In other words, find a topological reverse
// order of the lexical graph of the CSEs.
while (toDoVec.entries() > 0)
{
RelExpr *thisLevelOfInserts = NULL;
for (CollIndex c=0; toDoVec.nextUsed(c); c++)
{
CSEInfo *info = cses->at(c);
// predecessor (child) CSEs that have to be computed before we
// can attempt to compute this one
const LIST(CountedCSEInfo) &predecessors(info->getChildCSEs());
NABoolean isReady = TRUE;
for (CollIndex p=0; p<predecessors.entries(); p++)
{
Int32 cseId = predecessors[p].getInfo()->getCSEId();
CMPASSERT(cses->at(cseId)->getCSEId() == cseId);
if (!doneVec.contains(cseId) &&
cses->at(cseId)->getInsertIntoTemp() != NULL)
// a predecessor CSE for which we have to
// materialize a temp table has not yet
// been processed - can't do this one
isReady = FALSE;
}
if (isReady)
{
// no predecessors or all predecessors have been
// done
readyVec += c;
}
}
// At this point we will have one or more CSEs in readyVec.
// All of their predecessors (if any) have already been
// processed. Now make a Union backbone to process all the
// CSEs in readyVec in parallel.
// If we find nothing, we may have circular dependencies,
// and this is not allowed
// (recursive queries will have to be handled separately)
CMPASSERT(readyVec.entries() > 0);
for (CollIndex r=0; readyVec.nextUsed(r); r++)
{
CSEInfo *info = cses->at(r);
if (thisLevelOfInserts == NULL)
thisLevelOfInserts = info->getInsertIntoTemp();
else
{
thisLevelOfInserts = CommonSubExprRef::makeUnion(
thisLevelOfInserts,
info->getInsertIntoTemp(),
FALSE);
}
} // loop over ready list
if (result == NULL)
result = thisLevelOfInserts;
else
result = CommonSubExprRef::makeUnion(
result,
thisLevelOfInserts,
TRUE);
toDoVec -= readyVec;
doneVec += readyVec;
readyVec.clear();
} // while loop over to-do-list
} // CSEs exist for this statement
if (result)
{
const ValueIdSet &childOutputs(
child(0).getGroupAttr()->getCharacteristicOutputs());
ValueIdList outputValueList;
ValueIdList unionValueList;
// make a final blocked union between the inlined
// insert statements and the actual query
Union *topUnion = CommonSubExprRef::makeUnion(
result,
child(0),
TRUE);
// This top-level union has a right child that produces the
// desired outputs. The left child produces fake dummy ValueIds,
// it doesn't produce any rows. Since the root expects the right
// child's ValueIds, we put a MapValueIds on top that maps the
// values back to what they were in the right child.
for (ValueId o=childOutputs.init();
childOutputs.next(o);
childOutputs.advance(o))
{
ItemExpr *leftFake = new(CmpCommon::statementHeap())
NATypeToItem(o.getType().newCopy(CmpCommon::statementHeap()));
leftFake->synthTypeAndValueId();
ValueIdUnion *vidUnion = new(CmpCommon::statementHeap())
ValueIdUnion(leftFake->getValueId(),
o,
NULL_VALUE_ID,
topUnion->getUnionFlags());
vidUnion->synthTypeAndValueId();
topUnion->addValueIdUnion(vidUnion->getValueId(),
CmpCommon::statementHeap());
outputValueList.insert(o);
unionValueList.insert(vidUnion->getValueId());
topUnion->getGroupAttr()->addCharacteristicOutput(
vidUnion->getValueId());
}
result = new(CmpCommon::statementHeap())
MapValueIds(topUnion,
ValueIdMap(outputValueList, unionValueList),
CmpCommon::statementHeap());
result->setGroupAttr(new (CmpCommon::statementHeap()) GroupAttributes());
result->getGroupAttr()->addCharacteristicInputs(
topUnion->getGroupAttr()->getCharacteristicInputs());
result->getGroupAttr()->setCharacteristicOutputs(childOutputs);
result->synthLogProp(&normWARef);
}
else
// no change, return child pointer
result = child(0);
return result;
}
// -----------------------------------------------------------------------
// Filter::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * Filter::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
markAsNormalized();
ValueIdSet outerReferences, nonLocalPreds;
ValueIdSet predsToPushDown, valuesReqdByParent, availableInputs;
// differs from the base class implementation in that
// predicates with outer references are not pushed down to child but
// are retained in this Filter node.
availableInputs = getGroupAttr()->getCharacteristicInputs();
availableInputs.getOuterReferences(outerReferences);
availableInputs -= outerReferences ;
predsToPushDown = selectionPred() ;
if (selectionPred().getReferencedPredicates(outerReferences, nonLocalPreds))
{
predsToPushDown -= nonLocalPreds;
computeValuesReqdForPredicates(nonLocalPreds,
valuesReqdByParent) ;
}
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
availableInputs,
predsToPushDown,
&valuesReqdByParent);
CMPASSERT( predsToPushDown.isEmpty() );
child(0) = child(0)->normalizeNode(normWARef);
fixEssentialCharacteristicOutputs();
return this;
} // Filter::normalizeNode()
// -----------------------------------------------------------------------
// SortLogical::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * SortLogical::normalizeNode(NormWA & normWARef)
{
if (nodeIsNormalized())
return this;
RelExpr::normalizeNode(normWARef);
// eliminate me, I am no longer needed.
return child(0);
} // SortLogical::normalizeNode()
NABoolean RelExpr::hasFilterChild()
{
if (getArity() == 1 && child(0)->getOperatorType() == REL_FILTER)
return TRUE;
else if (getArity() == 1 && child(0)->getArity() == 1 &&
child(0)->child(0)->getOperatorType() == REL_FILTER)
return TRUE;
else
return FALSE;
}
// If subquery unnesting fails for some reason at a particular level
// then the Filter node at that level can be elimated by pushing
// its selection predicate to its child. This is not strictly necessary
// as the optimizer has Rules to eliminate Filter nodes. But we do so
// since it helps with cardinality estimation after the SQO phase.
// The Filter nodes selection predicates are only pushed down to its child,
// aand not any further down the query tree.
void RelExpr::eliminateFilterChild()
{
if(child(0) &&
child(0)->getOperatorType() == REL_FILTER)
{
RelExpr* filterNode = child(0).getPtr() ;
filterNode->pushdownCoveredExpr(
filterNode->getGroupAttr()->getCharacteristicOutputs(),
filterNode->getGroupAttr()->getCharacteristicInputs(),
filterNode->selectionPred());
if (filterNode->selectionPred().isEmpty())
child(0) = filterNode->child(0) ;
else
{
// Pushdown failed to push the predicate for some reason.
// add it by hand and call pushdown again with an empty predicate
// to recompute the IO.
filterNode->child(0)->selectionPred() += filterNode->selectionPred();
filterNode->selectionPred().clear();
filterNode->pushdownCoveredExpr(
filterNode->getGroupAttr()->getCharacteristicOutputs(),
filterNode->getGroupAttr()->getCharacteristicInputs(),
filterNode->selectionPred());
child(0) = filterNode->child(0) ;
}
}
return ;
}
// called at the end of SQO phase tio guarantee that all outputs
// minimal. Prior to this call the SQO phase can have outputs that
// are not minimal for threse three reasons
// (a) unnesting for a subquery failed and predicates from Filter were
// pushed down only to its child
// (b) unnesting for a subquery failed because outputs from left child
// tree could not be prmoted sufficiently. getMoreOutputsIfPossible() can
// leave some nodes with more than the minimal set of outputs in this case.
// (c) The pullUpGroupBy transformation calls pushDownCoveredExpr only
// upto the children of the join being transformed and not all the way down.
void RelExpr::recursivePushDownCoveredExpr(NormWA * normWAPtr,
NABoolean doSynthLogProp)
{
Int32 arity = getArity();
// --------------------------------------------------------------------
// Check which expressions can be evaluated by my child.
// Modify the Group Attributes of those children who
// inherit some of these expressions.
// ---------------------------------------------------------------------
if (getOperator().match(REL_ANY_JOIN))
{
if ((NOT normWAPtr->getExtraHubVertex()) && !isExtraHub())
normWAPtr->setExtraHubVertex(this);
}
pushdownCoveredExpr(getGroupAttr()->getCharacteristicOutputs(),
getGroupAttr()->getCharacteristicInputs(),
selectionPred());
if (getOperator().match(REL_ANY_JOIN) && doSynthLogProp)
{
// Make sure equiJoinPredicates_ gets updated
// in case pushdownCovereExpr() changed any of the joins
// predicates.
synthLogProp();
}
// ---------------------------------------------------------------------
// pushDown expressions from children
// ---------------------------------------------------------------------
for (Int32 i = 0; i < arity; i++)
child(i)->recursivePushDownCoveredExpr(normWAPtr);
if (doSynthLogProp)
processCompRefOptConstraints(normWAPtr);
return;
}
// base class implementation does nothing
void RelExpr::processCompRefOptConstraints(NormWA * normWAPtr)
{
}
NABoolean RelExpr::prepareTreeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &newPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
NABoolean result = TRUE;
CollIndex nc = getArity();
ValueIdSet newLocalPredicates(newPredicatesToAdd);
ValueIdSet newVEGPreds;
newLocalPredicates.findAllOpType(ITM_VEG_PREDICATE, newVEGPreds);
// recursively call this for the children
for (CollIndex i=0; i<nc && result; i++)
{
ValueIdSet childPredsToRemove(predicatesToRemove);
ValueIdSet childPredsToAdd(newPredicatesToAdd);
ValueIdSet childAvailValues(outputsToAdd);
childAvailValues += child(i).getGroupAttr()->getCharacteristicOutputs();
childAvailValues += child(i).getGroupAttr()->getCharacteristicInputs();
childPredsToRemove.removeUnCoveredExprs(childAvailValues);
childPredsToAdd.removeUnCoveredExprs(childAvailValues);
result = child(i)->prepareTreeForCSESharing(
outputsToAdd,
childPredsToRemove,
childPredsToAdd,
inputsToRemove,
valuesForVEGRewrite,
keyColumns,
info);
// if the child already had or has added any of the requested
// outputs, then add them to our own char. outputs
ValueIdSet childAddedOutputs(
child(i).getGroupAttr()->getCharacteristicOutputs());
childAddedOutputs.intersectSet(outputsToAdd);
getGroupAttr()->addCharacteristicOutputs(childAddedOutputs);
// Todo: CSE: consider using recursivePushDownCoveredExpr
// instead of pushing these new predicates in this method
newVEGPreds.intersectSet(childPredsToAdd);
newLocalPredicates -= childPredsToAdd;
}
if (result)
{
// Remove the predicates from our selection predicates.
// Note that prepareMeForCSESharing() is supposed to remove
// these predicates from all other places in the node.
predicates_ -= predicatesToRemove;
// Todo: CSE: need to remove predicates that are "similar" to
// the ones requested, e.g. same columns and constants, but
// an "=" operator with a different ValudId?
// add any predicates that aren't covered by one of the children
// and also add VEGPredicates that are covered by both of the
// children
newLocalPredicates += newVEGPreds;
predicates_ += newLocalPredicates;
// Remove the char. inputs the caller asked to remove.
// At this time we are not doing additional checks to
// ensure these inputs aren't referenced anymore in
// our node. We rely on the caller to ensure that
// these extra inputs are only needed by the predicates
// that we removed.
getGroupAttr()->removeCharacteristicInputs(inputsToRemove);
}
// Call a virtual method on this node to give it a chance to
// remove the predicates from any other places where they might be
// storing them, and to add any outputs it produces locally. Also
// give it a chance to say "no" to the whole idea of pulling out
// predicates and changing char. inputs and outputs (the default
// behavior).
if (result)
result = prepareMeForCSESharing(outputsToAdd,
predicatesToRemove,
newLocalPredicates,
inputsToRemove,
valuesForVEGRewrite,
keyColumns,
info);
return result;
}
// Note that the caller of this method is responsible for adding those
// new outputs to the group attributes that come from the children and
// for removing the requested inputs. The caller also removes
// "predicatesToRemove" from the selection predicates. This method
// only needs to do the following:
// - Add any new outputs to the char. outputs that are generated
// directly by this node (not by its children)
// - Add "newPredicatesToAdd" to any other places where predicates
// are needed, remove then from the selection predicates if they
// should be stored elsewhere
// - Remove "predicatesToRemove" from this node
// (not from the children, that is done by the caller)
// - Make sure that "inputsToRemove" isn't referenced anywhere else
// in this node
NABoolean RelExpr::prepareMeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &newPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
// A class derived from RelExpr must explicitly define
// this method to support being part of a shared CSE
char buf[100];
snprintf(buf, sizeof(buf), "Operator %s not supported",
getText().data());
info->getConsumer(0)->emitCSEDiagnostics(buf);
return FALSE;
}
void Join::processCompRefOptConstraints(NormWA * normWAPtr)
{
if (CmpCommon::getDefault(ELIMINATE_REDUNDANT_JOINS) != DF_OFF)
{
GroupAttributes &myGA = *getGroupAttr();
GroupAttributes &leftGA = *child(0).getGroupAttr();
GroupAttributes &rightGA = *child(1).getGroupAttr();
const ValueIdSet &leftConstraints = leftGA.getConstraints();
const ValueIdSet &rightConstraints = rightGA.getConstraints();
if (normWAPtr && isInnerNonSemiJoin())
matchRIConstraint(leftGA,rightGA, normWAPtr) ;
// Full Outer Join has a join pred that affect the rows that flow from the left
if (NOT isFullOuterJoin())
myGA.addSuitableCompRefOptConstraints(leftConstraints,
getSelectionPredicates(), this);
// only non semi inner join rely solely on selection pred to control rows from
// thr right. Other joins use a join pred also.
if (isInnerNonSemiJoin())
myGA.addSuitableCompRefOptConstraints(rightConstraints,
getSelectionPredicates(), this);
}
}
void GroupByAgg::processCompRefOptConstraints(NormWA * normWAPtr)
{
if (CmpCommon::getDefault(ELIMINATE_REDUNDANT_JOINS) != DF_OFF)
{
getGroupAttr()->addSuitableCompRefOptConstraints
(child(0).getGroupAttr()->getConstraints(),getSelectionPredicates(), this);
}
}
void Filter::processCompRefOptConstraints(NormWA * normWAPtr)
{
if (CmpCommon::getDefault(ELIMINATE_REDUNDANT_JOINS) != DF_OFF)
{
GroupAttributes &myGA = *getGroupAttr();
myGA.addSuitableCompRefOptConstraints
(child(0).getGroupAttr()->getConstraints(),getSelectionPredicates(), this);
}
}
// ***********************************************************************
// $$$$ Rename
// member functions for class Rename,
// used by sub-classes: RenameTable and RenameReference
// ***********************************************************************
void Rename::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
// The rename table node has outlived its usefulness; remove from the tree.
locationOfPointerToMe = child(0);
// Move the predicates down to my child, OR my grandchild if child is a root.
// Move the characteristic inputs down to my child, AND my grandchild if
// child is a root node (the root must have at least as many inputs as any
// of its children -- see assertion in RelRoot::recomputeOuterRefs).
//
// This moving past the root to the grandchild seems like it should be
// unnecessary, that RelRoot::transformNode would do this for us anyway.
// The problem is if this RenameTable is in the topmost (outermost) scope
// and that scope has a predicate containing a subquery -- e.g.
// select * from (select a from t1) x where a>(select b from t2);
// -- without this "grandchild fix" the semijoin introduced by the subquery
// was being placed above the topmost root (!) and that entire subq pred
// was being lost. This was Genesis case 10-970828-6025.
RelExpr *descendant = child(0);
descendant->getGroupAttr()->addCharacteristicInputs // child
(getGroupAttr()->getCharacteristicInputs());
if (descendant->getOperatorType() == REL_ROOT)
descendant = descendant->child(0); // grandchild
descendant->selectionPred() += getSelectionPred(); // child or grandchild
descendant->getGroupAttr()->addCharacteristicInputs // child or grandchild
(getGroupAttr()->getCharacteristicInputs());
// transform my child
locationOfPointerToMe->transformNode(normWARef, locationOfPointerToMe);
// -- Triggers
locationOfPointerToMe->getInliningInfo().merge(&getInliningInfo());
// Verify that my child or whoever replaced it is now transformed
CMPASSERT( locationOfPointerToMe->nodeIsTransformed());
} // Rename::transformNode()
//////////////////////////////////////////////////////////////////////////////
// The purpose of this method is to fix the inputs of the tentative branch
// of the before triggers tree. After binding, the inputs for the temp insert
// side are the expressions that represent the NEW values. These expressions
// should not be inputs, but rather calculated in the temp insert node itself
// using as inputs just basic columns. This method calculates the correct
// inputs based on the inputs from above (inputs of the TSJ node), and the
// values generated below the tentativeGU node (the OLD values).
// When this is done, we call the transformNode() method of the superclass.
//////////////////////////////////////////////////////////////////////////////
void BeforeTrigger::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
// Call the inherited method to do the vanishing trick.
Rename::transformNode(normWARef, locationOfPointerToMe);
if (parentTSJ_ != NULL) // Is this the top most BeforeTrigger node?
{
// Find the interesting nodes we need.
RelExpr *tsjNode = parentTSJ_;
RelExpr *rootNode = tsjNode->child(1);
RelExpr *tempInsert = rootNode->child(0);
RelExpr *tupleNode = tempInsert->child(0);
// locationOfPointerToMe now points to the node below the tentative
// node and before triggers. It's outputs are the values generated
// by the subtree below the original GU (including sub-queries).
const ValueIdSet& generatedValues =
locationOfPointerToMe->getGroupAttr()->getCharacteristicOutputs();
// The inputs of the TSJ node are the values needed from above:
// transition variables and the executeId value.
const ValueIdSet& externalInputs =
tsjNode->getGroupAttr()->getCharacteristicInputs();
// Together they are the set of basic input values the temp Insert
// node needs to evaluate the NEW expressions.
ValueIdSet minInputs;
minInputs.insert(generatedValues);
minInputs.insert(externalInputs);
// The root node has the max required inputs with all the expressions.
ValueIdSet maxInputs(rootNode->getGroupAttr()->getCharacteristicInputs());
// Leave only the inputs required to evaluate the expressions.
// problem is it also weeds out subqueries...
// maxInputs.weedOutUnreferenced(minInputs);
// Set the minimum inputs in all the nodes of the temp insert subtree.
rootNode ->getGroupAttr()->setCharacteristicInputs(minInputs);
tempInsert->getGroupAttr()->setCharacteristicInputs(minInputs);
tupleNode ->getGroupAttr()->setCharacteristicInputs(minInputs);
}
}
// ***********************************************************************
// $$$$ RelRoutine
// member functions for class RelRoutine
//
// The other intermediate classes derived from RelRoutine does not need
// their own recomputeOuterReferences() at this point
// That would be classes like
// TableValuedFunction
// BuiltinTableValuedFunction
// IsolatedNonTableUDR
// for example
// ***********************************************************************
// -----------------------------------------------------------------------
// RelRoutine::transformNode()
// -----------------------------------------------------------------------
void RelRoutine::transformNode(NormWA &normWARef,
ExprGroupId & locationOfPointerToMe)
{
if (nodeIsTransformed())
return;
// ---------------------------------------------------------------------
// Transform the computable expressions associated with me.
// If a subquery appears in the compute list, then let the subquery
// transformation cause a join to be performed between the
// node were we found the reference to the UDF on the left
// and the UDF on the right.
//
// Note that we procInputParamsVids and procInputAllParamsVids may now
// be divergent since we don't transform the the procAllParamsVids
// So we really should not use procAllParamsVids any more!
// ---------------------------------------------------------------------
if (getProcInputParamsVids().transformNode(normWARef, locationOfPointerToMe,
getGroupAttr()->getCharacteristicInputs()))
{
// -----------------------------------------------------------------
// Transform my new child.
// -----------------------------------------------------------------
locationOfPointerToMe->transformNode(normWARef, locationOfPointerToMe);
}
// Make sure all the normal stuff is taken care of.
// We need to do this before we transforms the inputs so that
// we deal with the Tuple Child CallSp inserts below for subqueries
// as an input.
//
// A call with a subquery as an input parameter gets transfered to something
// like this:
//
// At bind time the RelExpr tree for a Call with a subquery as an input
// looks like this:
//
// CallSP
// \
// Tuple(Subq)
//
// After transform it looks like this:
//
// CallSP
// \
// Join
// / \
// Tuple GrbyAgg
// \
// Scan
//
// UDFs will not have the Tuple child and its subqueries
// and UDF as inputs was transformed when we transformed the UDFunction
// ItemExpr earlier.
// transform the selection predicates
transformSelectPred(normWARef, locationOfPointerToMe);
primeGroupAttributes();
markAsTransformed();
}
// -----------------------------------------------------------------------
// RelRoutine::recomputeOuterReferences()
// -----------------------------------------------------------------------
void RelRoutine::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are not referenced
// by the input parameters.
// ---------------------------------------------------------------------
if (NOT getGroupAttr()->getCharacteristicInputs().isEmpty())
{
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// Weed out those inputs not needed by my parameters or
// by my predicates
GroupAttributes emptyGA;
ValueIdSet leafExprSet, emptySet;
ValueIdSet exprSet(getProcInputParamsVids());
exprSet.getLeafValuesForCoverTest(leafExprSet, emptyGA, emptySet);
leafExprSet += getSelectionPred();
leafExprSet.weedOutUnreferenced(outerRefs);
getGroupAttr()->setCharacteristicInputs(outerRefs);
}
}
// -----------------------------------------------------------------------
// RelRoutine::rewriteNode()
// -----------------------------------------------------------------------
void RelRoutine::rewriteNode(NormWA &normWARef)
{
// ---------------------------------------------------------------------
// Make sure to rewrite all of our parameter inputs and predicates.
// ---------------------------------------------------------------------
selectionPred().normalizeNode(normWARef);
getProcInputParamsVids().normalizeNode(normWARef);
getProcOutputParamsVids().normalizeNode(normWARef);
getProcAllParamsVids().normalizeNode(normWARef);
// if a CallSP had a subquery or UDFs as an input parameter it gets attached
// as child(0) at bind time, so we need to rewrite it too. This
// child gets moved by the optimizer - UdrToTSJFlow rule.
// If IsolatedScalarUDFs, on the other hand, contains subqueries or UDFs in
// its input parameters, we transform those the normal way at transform
// time.
if (child(0) != NULL)
child(0)->rewriteNode(normWARef);
// ---------------------------------------------------------------------
// Rewrite my own Group Attributes
// ---------------------------------------------------------------------
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
}
// ***********************************************************************
// $$$$ Tuple
// member functions for class Tuple
// ***********************************************************************
void Tuple::transformNode(NormWA & normWARef,
ExprGroupId &locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
//markAsTransformed(); NO! We call RelExpr::transformNode() below!
ValueIdSet subqueryOrIsolatedUDFunctionPredicates;
// remove the subquery or Isolated UDFunction predicates from the
// tupleExpr() list
tupleExpr().removeSubqueryOrIsolatedUDFunctionPredicates(
subqueryOrIsolatedUDFunctionPredicates);
// -- Triggers
getGroupAttr()->setCharacteristicOutputs(tupleExpr());
// ---------------------------------------------------------------------
// Save the original inputs to use when the subquery predicates get
// transformed.
// ---------------------------------------------------------------------
ValueIdSet externalInputs = getGroupAttr()->getCharacteristicInputs();
// Let RelExpr:: do the work
RelExpr::transformNode(normWARef, locationOfPointerToMe);
// ---------------------------------------------------------------------
// Transform the subqueries or Isolated UDFunctions in the tupleExpr() list
// ---------------------------------------------------------------------
// semiJoin's that are added should be added directly below my
// original parent
if (subqueryOrIsolatedUDFunctionPredicates.transformNode(normWARef,
locationOfPointerToMe,
externalInputs))
{
locationOfPointerToMe->transformNode(normWARef,
locationOfPointerToMe);
// We are on our way back from a number of transformNode()s.
// Let's just make sure that the final usurper got transformed
CMPASSERT( locationOfPointerToMe->nodeIsTransformed());
}
} // Tuple::transformNode()
// -----------------------------------------------------------------------
// Tuple::recomputeOuterReferences()
// -----------------------------------------------------------------------
void Tuple::recomputeOuterReferences()
{
// ---------------------------------------------------------------------
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
// ---------------------------------------------------------------------
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
ValueIdSet allMyExpr(getSelectionPred());
allMyExpr.insertList(tupleExpr());
allMyExpr.weedOutUnreferenced(outerRefs);
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // Tuple::recomputeOuterReferences()
// -----------------------------------------------------------------------
// Tuple::rewriteNode()
// -----------------------------------------------------------------------
void Tuple::rewriteNode(NormWA & normWARef)
{
// ---------------------------------------------------------------------
// Rewrite the tuple expressions
// ---------------------------------------------------------------------
if (tupleExpr().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite the selection expressions
// ---------------------------------------------------------------------
if (selectionPred().normalizeNode(normWARef))
{
}
// ---------------------------------------------------------------------
// Rewrite my own Group Attributes
// ---------------------------------------------------------------------
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // Tuple::rewriteNode()
// -----------------------------------------------------------------------
// Tuple::normalizeNode()
// -----------------------------------------------------------------------
RelExpr * Tuple::normalizeNode(NormWA & normWARef)
{
// -- Triggers
// If predicates should not be pushed down here, delete them.
if (rejectPredicates() && !selectionPred().isEmpty())
selectionPred().clear();
// Let RelExpr:: do the work
return RelExpr::normalizeNode(normWARef);
}
// ***********************************************************************
// member functions for class TupleList
// ***********************************************************************
void TupleList::transformNode(NormWA & normWARef,
ExprGroupId &locationOfPointerToMe)
{
Tuple::transformNode(normWARef, locationOfPointerToMe);
} // TupleList::transformNode()
// -----------------------------------------------------------------------
// TupleList::recomputeOuterReferences()
// -----------------------------------------------------------------------
void TupleList::recomputeOuterReferences()
{
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
ValueIdSet allMyExpr(getSelectionPred());
ValueIdSet refExpr, emptySet;
GroupAttributes emptyGA;
allMyExpr.insertList(tupleExpr());
tupleExprTree()->getLeafValuesForCoverTest(refExpr, emptyGA, emptySet);
allMyExpr += refExpr;
allMyExpr.weedOutUnreferenced(outerRefs);
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // TupleList::recomputeOuterReferences()
// -----------------------------------------------------------------------
// TupleList::rewriteNode()
// -----------------------------------------------------------------------
void TupleList::rewriteNode(NormWA & normWARef)
{
Tuple::rewriteNode(normWARef);
} // TupleList::rewriteNode()
// ***********************************************************************
// Member functions for class Transpose
// ***********************************************************************
// Transpose::transformNode() -------------------------------------------
// Unconditional query transformations such as the transformation of
// a subquery to a semijoin are implemented by the virtual function
// transformNode(). The aim of such transformations is to bring the
// query tree to a canonical form. transformNode() also ensures
// that the "required" (or characteristic) input values are "minimal"
// and the "required" (or characteristic) outputs values are
// "maximal" for each operator.
//
// transformNode() is an overloaded name, which is used for a set
// of methods that implement the transformation phase of query
// normalization.
//
// We use the term query tree for a tree of relational operators,
// each of which can contain none or more scalar expression trees.
// The transformations performed by transformNode() brings scalar
// expressions into a canonical form. The effect of most such
// transformations is local to the scalar expression tree.
// However, the transformation of a subquery requires a semijoin
// to be performed between the relational operator that contains
// the subquery and the query tree for the subquery. The effect
// of such a subquery transformation is therefore visible not
// only in the scalar expression tree but also in the relational
// expression tree.
//
// Parameters:
//
// NormWA & normWARef
// IN : a pointer to the normalizer work area
//
// ExprGroupId & locationOfPointerToMe
// IN : a reference to the location that contains a pointer to
// the RelExpr that is currently being processed.
//
// This implementation is basically the same as the RelExpr:transformNode,
// but here we need to tranform each member of each ValueIdUnion of
// transUnionVals().
//
void Transpose::transformNode(NormWA &normWARef,
ExprGroupId &locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
// If this node has already been transformed, we are done.
//
if (nodeIsTransformed())
return;
// Make sure that it is only transformed once.
//
markAsTransformed();
// transformNode takes up a bound tree and turns into a transformed
// tree. For a RelExpr that means the following.
// + expressions are transformed. If the expressions contain
// subqueries then new RelExpr are created for them and
// they are usually added above (as an ancestor) of the node
// that contained them.
// + predicates are pulled up from the children and their
// required inputs are modified
// + the required inputs of the node the node itself are changed
// from being a sufficient set to being a sufficient minimal
// set.
//
// Transform the child.
// Pull up their transformed predicates
// recompute their required inputs.
//
child(0)->transformNode(normWARef, child(0));
// The child has now been transformed.
// A new semiJoin may now be my direct descendant and my original
// child a descendant of it.
// In either case my child has now been transformed.
RelExpr *origChild = child(0); // My original child
// Transform each expression of each ValueIdUnion.
// (Do not transform the ValueIdIUnion, but each of its members)
// The keyCol ValueIdUnion does not need to be transformed,
// so the loop index could start at 1.
//
for(CollIndex v = 0; v < transUnionVectorSize(); v++) {
ValueIdList &valIdList = transUnionVector()[v];
for(CollIndex i = 0; i < valIdList.entries(); i++) {
ValueIdUnion *valIdu = ((ValueIdUnion *)valIdList[i].
getValueDesc()->getItemExpr());
CollIndex numEntries = valIdu->entries();
for(CollIndex j = 0; j < numEntries; j++) {
// original expression before transformation.
//
ItemExpr * iePtr = valIdu->getSource(j).getItemExpr();
// The transformed expression.
//
ExprValueId nePtr(iePtr);
// Transform the Item Expression.
iePtr->transformNode(normWARef,
nePtr,
child(0),
getGroupAttr()->getCharacteristicInputs());
// If the original expression was transformed, update the entry
// in the ValueIdUnion
//
if (nePtr != (const ItemExpr *)iePtr) {
valIdu->setSource(j, nePtr->getValueId());
}
}
}
}
if(origChild != child(0)) {
// The transpose expressions were on a subquery that had not been
// processed before. Normalize the new tree that has become
// our child.
//
child(0)->transformNode(normWARef, child(0));
}
// Pull up the predicates and recompute the required inputs
// of whoever my children are now.
//
pullUpPreds();
// transform the selection predicates
//
transformSelectPred(normWARef, locationOfPointerToMe);
} // Transpose::transformNode()
// Transpose::rewriteNode() ---------------------------------------------
// rewriteNode() is the virtual function that computes
// the transitive closure for "=" predicates and rewrites value
// expressions.
//
// Parameters:
//
// NormWA & normWARef
// IN : a pointer to the normalizer work area
//
// This implementation is basically the same as RelExpr::rewriteNode()
// but here we need to normalize each member of each ValueIdUnion of
// transUnionVals().
//
void Transpose::rewriteNode(NormWA & normWARef)
{
// Rewrite the expressions of the child node.
//
child(0)->rewriteNode(normWARef);
// normalize each member of each ValueIdUnion of transUnionVals().
// (may be able to get away without normalizing the first (key Values)
// ValueIdUnion. If this is so, the index could start at 1).
//
for(CollIndex v = 0; v < transUnionVectorSize(); v++) {
ValueIdList &valIdList = transUnionVector()[v];
for(CollIndex i = 0; i < valIdList.entries(); i++) {
ValueIdUnion *valIdu = ((ValueIdUnion *)valIdList[i].
getValueDesc()->getItemExpr());
CollIndex numEntries = valIdu->entries();
// Normalize each expression. This may generate new
// ValueIds for the members of the ValueIdUnion.
//
for(CollIndex j = 0; j < numEntries; j++) {
valIdu->normalizeSpecificChild(normWARef, j);
}
}
}
// Rewrite the expressions in the selection preidcates.
//
if (selectionPred().normalizeNode(normWARef))
{
}
// ++MV
if (getUniqueColumns().normalizeNode(normWARef))
{
}
// --MV
// Rewrite the expressions in the Group Attributes.
//
getGroupAttr()->normalizeInputsAndOutputs(normWARef);
} // Transpose::rewriteNode()
// Transpose::recomputeOuterReferences() --------------------------------
// This method is used by the normalizer for recomputing the
// outer references (external dataflow input values) that are
// still referenced by each operator in the subquery tree
// after the predicate pull up is complete.
//
// Side Effects: sets the characteristicInputs of the groupAttr.
//
void Transpose::recomputeOuterReferences()
{
// This is virtual method on RelExpr.
// When this is called it is assumed that the children have already
// been transformed.
// The required inputs of the child are therefore already minimal
// and sufficient.
// It is also assumed that the RelExpr itself has been bound.
// That implies that the group attributes have already been allocated
// and the required inputs is a sufficient (but not necessarilly minimum)
// set of external values needed to evaluate all expressions in this subtree.
//
// Delete all those input values that are no longer referenced on
// this operator because the predicates that reference them have
// been pulled up.
//
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// The set of valueIds need by this node.
//
ValueIdSet allMyExpr(getSelectionPred());
// Add the valueIds of each member of each ValueIdUnion of transUnionVals().
//
for(CollIndex v = 0; v < transUnionVectorSize(); v++) {
ValueIdList &valIdList = transUnionVector()[v];
for(CollIndex i = 0; i < valIdList.entries(); i++) {
ValueIdUnion *valIdu = ((ValueIdUnion *)valIdList[i].
getValueDesc()->getItemExpr());
CollIndex numEntries = valIdu->entries();
for(CollIndex j = 0; j < numEntries; j++) {
// Add the valueIds of each member.
//
allMyExpr += valIdu->getSource(j);
}
}
}
// Remove from outerRefs those valueIds that are not needed
// by all my expressions
//
allMyExpr.weedOutUnreferenced(outerRefs);
// Add to outerRefs those that my children need.
//
outerRefs += child(0).getPtr()->getGroupAttr()->getCharacteristicInputs();
// set my Character Inputs to this new minimal set.
//
getGroupAttr()->setCharacteristicInputs(outerRefs);
} // Transpose::recomputeOuterReferences()
// ***********************************************************************
// Member functions for class Pack
// ***********************************************************************
// -----------------------------------------------------------------------
// Pack::pullUpPreds() is refined to disallow the pullup of predicates
// from the operator's child which may be made up of non-packed columns.
// The pack node packs all the columns it receives from its child and
// predicates evaluated by child couldn't be evaluated here on the packed
// columns any more.
// -----------------------------------------------------------------------
void Pack::pullUpPreds()
{
// ---------------------------------------------------------------------
// Simply don't pull up child's selection predicates. Still need to tell
// child to recompute its outer references due to the warning below.
// ---------------------------------------------------------------------
child(0)->recomputeOuterReferences();
// ---------------------------------------------------------------------
// WARNING: One rule that this procedure must follow is
// that recomputeOuterReferences() must be called on the children even
// if no predicates are pulled up from them. This is to correct
// the outer references that are added to a right child of a
// semi or outer join when processing subqueries in the ON clause.
// ---------------------------------------------------------------------
}
// -----------------------------------------------------------------------
// Pack::recomputeOuterReferences() adds the packing factor to be the
// additional outer references needed by the Pack node.
// -----------------------------------------------------------------------
void Pack::recomputeOuterReferences()
{
// Original set of outer references.
ValueIdSet outerRefs = getGroupAttr()->getCharacteristicInputs();
// The set of valueIds need by the Pack operator.
ValueIdSet allMyExpr(getSelectionPred());
allMyExpr += packingFactor();
allMyExpr.insertList(packingExpr());
allMyExpr.insertList(requiredOrder());
// Remove from outerRefs those valueIds that are not needed by allMyExpr.
allMyExpr.weedOutUnreferenced(outerRefs);
// Add to outerRefs those that my children need.
outerRefs += child(0).getPtr()->getGroupAttr()->getCharacteristicInputs();
// Set my characteristic inputs to this new minimal set.
getGroupAttr()->setCharacteristicInputs(outerRefs);
}
// -----------------------------------------------------------------------
// Pack::tranformNode() tranforms the packing expression which might has
// a subquery in it.
// -----------------------------------------------------------------------
void Pack::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT(this == locationOfPointerToMe);
if(nodeIsTransformed()) return;
markAsTransformed();
// Make inputs available to child
child(0)->getGroupAttr()->addCharacteristicInputs
(getGroupAttr()->getCharacteristicInputs());
// ---------------------------------------------------------------------
// Transform the child
// ---------------------------------------------------------------------
child(0)->transformNode(normWARef,child(0));
if(requiredOrder().
transformNode(normWARef,
child(0),
getGroupAttr()->getCharacteristicInputs())) {
// The requiredOrder list apparently had some subqueries that had
// not been processed before (is this possible?). Normalize the
// new tree that has become our child.
//
child(0)->transformNode(normWARef, child(0));
}
// ---------------------------------------------------------------------
// Transform the computable expressions associated with me.
// If a subquery appears in the compute list, then let the subquery
// transformation cause a semijoin to be performed between Pack and its
// child.
// ---------------------------------------------------------------------
if(packingExpr_.transformNode(normWARef,
child(0),
getGroupAttr()->getCharacteristicInputs()))
{
// -------------------------------------------------------------------
// Transform my new child.
// -------------------------------------------------------------------
child(0)->transformNode(normWARef,child(0));
}
// Pull up the predicates and recompute the required inputs
// of whoever my children are now.
pullUpPreds();
// transform the selection predicates
transformSelectPred(normWARef,locationOfPointerToMe);
}
// -----------------------------------------------------------------------
// Pack::rewriteNode() needs to rewrite the packing expressions as well
// as the selPreds and the inputs/outputs.
// -----------------------------------------------------------------------
void Pack::rewriteNode(NormWA& normWA)
{
// First rewrite the child node.
child(0)->rewriteNode(normWA);
// Rewrite the Pack node's own expressions and its inputs/outputs.
packingFactor().normalizeNode(normWA);
packingExpr().normalizeNode(normWA);
selectionPred().normalizeNode(normWA);
requiredOrder().normalizeNode(normWA);
getGroupAttr()->normalizeInputsAndOutputs(normWA);
}
// ***********************************************************************
// $$$$ CommonSubExprRef
// member functions for class CommonSubExprRef
// ***********************************************************************
void CommonSubExprRef::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( locationOfPointerToMe.getPtr() == this );
if (nodeIsTransformed())
return;
markAsTransformed();
// set lexicalRefNumFromParent_ for expanded refs, now that
// we can be sure the lexical ref has been bound
if (isAnExpansionOf_)
lexicalRefNumFromParent_ = isAnExpansionOf_->lexicalRefNumFromParent_;
// Allocate a new VEG region for the child, to prevent VEGies that
// cross the potentially common part and the rest of the query tree.
//normWARef.allocateAndSetVEGRegion(EXPORT_ONLY, this);
child(0)->getGroupAttr()->addCharacteristicInputs(
getGroupAttr()->getCharacteristicInputs());
child(0)->transformNode(normWARef, child(0));
pullUpPreds();
transformSelectPred(normWARef, locationOfPointerToMe);
//normWARef.restoreOriginalVEGRegion();
}
void CommonSubExprRef::pullUpPreds()
{
// To preserve the commonality of common subexpressions, we
// don't allow to pull predicates out of them.
// so do nothing here, preventing predicate pull-up
// alternatively, we could do the pull-up and record the
// pulled-up predicates here
// RelExpr::pullUpPreds();
// pulledPredicates_ += selectionPred();
}
void CommonSubExprRef::pushdownCoveredExpr(
const ValueIdSet & outputExpr,
const ValueIdSet & newExternalInputs,
ValueIdSet & predicatesOnParent,
const ValueIdSet * setOfValuesReqdByParent,
Lng32 childIndex)
{
// Remember the predicates we pushed down, since other consumers of
// this CSE may not have pushed the equivalent
// predicates. Therefore, if we want to materialize a common
// subexpressions, any predicates that were pushed down and are not
// common to all the consumers must be pulled back out before we can
// share a common query tree.
ValueIdSet predsPushedThisTime(predicatesOnParent);
if (pushedPredicates_.isEmpty())
// this is also the time to record the original set of inputs
// for this node, before predicate pushdown can alter the inputs
commonInputs_ = getGroupAttr()->getCharacteristicInputs();
RelExpr::pushdownCoveredExpr(outputExpr,
newExternalInputs,
predicatesOnParent,
setOfValuesReqdByParent,
childIndex);
predsPushedThisTime -= predicatesOnParent;
pushedPredicates_ += predsPushedThisTime;
}
void CommonSubExprRef::rewriteNode(NormWA & normWARef)
{
RelExpr::rewriteNode(normWARef);
nonVEGColumns_ = columnList_;
columnList_.normalizeNode(normWARef);
commonInputs_.normalizeNode(normWARef);
normWARef.incrementCommonSubExprRefCount();
}
RelExpr * CommonSubExprRef::semanticQueryOptimizeNode(NormWA & normWARef)
{
RelExpr *result = this;
NABoolean ok = TRUE;
CSEInfo *info = CmpCommon::statement()->getCSEInfo(internalName_);
// do the analysis top-down
analyzeAndPrepareForSharing(*info);
RelExpr::semanticQueryOptimizeNode(normWARef);
switch (info->getAnalysisOutcome(id_))
{
case CSEInfo::EXPAND:
// Not able to share the CSE, expand the CSE by eliminating
// this node and putting its child tree in its place. In this
// case, analyzeAndPrepareForSharing() left the tree unchanged.
result = child(0).getPtr();
break;
case CSEInfo::CREATE_TEMP:
determineTempTableType(*info);
if (createTempTable(*info))
{
RelExpr *ins = createInsertIntoTemp(*info, normWARef);
if (ins)
info->setInsertIntoTemp(ins);
else
result = NULL;
}
else
result = NULL;
if (!result)
break;
// fall through to the next case
case CSEInfo::TEMP:
// We are able to share this CSE between multiple consumers.
// Replace this node with a scan on the temp table that
// holds the CSE results.
result = createTempScan(*info, normWARef);
break;
case CSEInfo::ERROR:
// diags should be set
CMPASSERT(CmpCommon::diags()->mainSQLCODE() < 0);
break;
default:
CMPASSERT(0);
}
if (result == NULL)
emitCSEDiagnostics("Error in creating temp table or temp table insert",
TRUE);
return result;
}
NABoolean CommonSubExprRef::prepareMeForCSESharing(
const ValueIdSet &outputsToAdd,
const ValueIdSet &predicatesToRemove,
const ValueIdSet &commonPredicatesToAdd,
const ValueIdSet &inputsToRemove,
ValueIdSet &valuesForVEGRewrite,
ValueIdSet &keyColumns,
CSEInfo *info)
{
// the caller of this method already took care of the adjustments to
// make, just make sure that all predicates could be pushed down to
// the child
if (!getSelectionPred().isEmpty())
{
// this should not happen
emitCSEDiagnostics("Unable to push common predicates into child tree");
return FALSE;
}
return TRUE;
}
CSEInfo::CSEAnalysisOutcome CommonSubExprRef::analyzeAndPrepareForSharing(CSEInfo &info)
{
// do a few simple shortcuts first
// Make sure this consumer is in the main list of consumers. Note
// that the analysis is done top-down and that currently the only
// two places where we make copies of the tree are in
// RelRoot::semanticQueryOptimizeNode() and in this method. The copy
// made in the root is only used when we bypass SQO completely.
// Although we may sometimes look at unused copies during the CSE
// analysis phase, this guarantees (for now) that the analyzing
// consumer always is and stays in the list of consumers. If we ever
// make additional copies of the tree we may need to reconsider this
// logic.
if (info.getConsumer(id_) != this)
{
info.replaceConsumerWithAnAlternative(this);
DCMPASSERT(info.getConsumer(id_) == this);
}
// If another consumer has already done the analysis, return its result.
// Note: Right now, all the consumers do the same, in the future, we could
// expand some and share others.
if (info.getIdOfAnalyzingConsumer() >= 0)
return info.getAnalysisOutcome(id_);
// mark me as the analyzing consumer
info.setIdOfAnalyzingConsumer(id_);
if (CmpCommon::getDefault(CSE_USE_TEMP) == DF_OFF)
{
emitCSEDiagnostics("Forced with CQD CSE_USE_TEMP CQD 'off'");
info.setAnalysisOutcome(CSEInfo::EXPAND);
return CSEInfo::EXPAND;
}
CSEInfo::CSEAnalysisOutcome result = CSEInfo::UNKNOWN_ANALYSIS;
NABoolean canShare = TRUE;
NABitVector neededColumnsBitmap;
ValueIdList tempTableColumns;
const ValueIdSet &charOutputs(getGroupAttr()->getCharacteristicOutputs());
CollIndex numConsumers = info.getNumConsumers();
RelExpr *copyOfChildTree = NULL;
// A laundry list of changes to undo the effects of normalization,
// specifically of pushing predicates down and of minimizing the
// outputs. Also, a list of new common selection predicates to add.
ValueIdSet outputsToAdd;
ValueIdSet predicatesToRemove(pushedPredicates_);
ValueIdSet newPredicatesToAdd;
ValueIdSet commonPredicates(pushedPredicates_);
ValueIdSet inputsToRemove(child(0).getGroupAttr()->getCharacteristicInputs());
ValueIdSet *nonCommonPredicatesArray =
new(CmpCommon::statementHeap()) ValueIdSet[numConsumers];
ValueIdMap *myColsToConsumerMaps =
new(CmpCommon::statementHeap()) ValueIdMap[numConsumers];
ItemExpr *nonCommonPredicatesORed = NULL;
int numORedPreds = 0;
NABoolean singleLexicalRefWithTempedAncestors =
(info.getNumLexicalRefs() == 1);
Int32 numPreliminaryRefs = 0;
ValueIdSet childTreeKeyColumns;
// ------------------------------------------------------------------
// CSE Analysis phase
// ------------------------------------------------------------------
// loop over the consumers of the CSE to negotiate a common set
// of columns to retrieve and a common set of predicates that can
// remain pushed down
for (CollIndex c=0; c<numConsumers && canShare; c++)
{
CommonSubExprRef *consumer = info.getConsumer(c);
const ValueIdList &cCols(consumer->columnList_);
ValueIdSet availableValues(cCols);
ValueIdSet requiredValues(
consumer->getGroupAttr()->getCharacteristicOutputs());
const ValueIdSet &cPreds(consumer->pushedPredicates_);
ValueIdSet mappedPreds;
ValueId dummy;
CSEInfo *infoToCheck = &info;
CommonSubExprRef *childToCheck = consumer;
NABoolean ancestorIsTemped = FALSE;
// look for a chain of only lexical ancestors of which one is
// materialized in a temp table
while (!ancestorIsTemped &&
infoToCheck->getNumLexicalRefs() == 1 &&
childToCheck &&
childToCheck->parentRefId_ >= 0)
{
// look at the ancestor and what it is planning to do
infoToCheck = CmpCommon::statement()->getCSEInfoById(
childToCheck->parentCSEId_);
CMPASSERT(infoToCheck);
CommonSubExprRef *parent =
infoToCheck->getConsumer(childToCheck->parentRefId_);
CSEInfo::CSEAnalysisOutcome parentOutcome =
infoToCheck->getAnalysisOutcome(parent->getId());
if (parentOutcome == CSEInfo::CREATE_TEMP ||
parentOutcome == CSEInfo::TEMP)
ancestorIsTemped = TRUE;
childToCheck = parent;
}
if (!ancestorIsTemped)
singleLexicalRefWithTempedAncestors = FALSE;
requiredValues += cPreds;
availableValues +=
consumer->getGroupAttr()->getCharacteristicInputs();
// Do a sanity check whether we can produce the required
// values (outputs and predicates) from the available values
// (tables of the original subexpression, to be a temp table).
// If not, one reason could be that we copied an expression
// and now have different ValueIds. This could be improved.
if (requiredValues.removeUnCoveredExprs(availableValues))
{
emitCSEDiagnostics(
"Characteristic outputs not covered by common subexpression");
canShare = FALSE;
}
// Check the required values of this consumer and add all of
// them (by position number of the original list) to the bit
// vector of required columns. Note that we might be able to
// optimize this somewhat for expressions.
for (CollIndex i=0; i<cCols.entries(); i++)
if (requiredValues.referencesTheGivenValue(cCols[i],
dummy,
TRUE,
TRUE))
neededColumnsBitmap += i;
if (!cPreds.isEmpty())
if (consumer->id_ == id_)
{
// Assert for now that we are still seeing the same node,
// not a copy. If this fails, think about whether making
// a copy might cause issues here, e.g. because some of
// the information has diverged.
DCMPASSERT(consumer == this);
// consumer is the same as "this"
mappedPreds = cPreds;
}
else
{
// another consumer, likely to use different ValueIds
// a ValueIdMap that maps my columns (top) to those of the
// other consumer (bottom)
ValueIdSet vegRefsWithDifferingConsts;
ValueIdSet vegRefsWithDifferingInputs;
myColsToConsumerMaps[c] = ValueIdMap(columnList_, cCols);
// make sure we can also map VEGPreds for any VEGRefs in the map
myColsToConsumerMaps[c].augmentForVEG(
TRUE, // add VEGPreds for existing VEGRefs
FALSE, // no need to add more VEGRefs
TRUE, // only do this if constants match
// only do this if the VEGies refer to
// the same outputs
&(getGroupAttr()->getCharacteristicInputs()),
&(consumer->getGroupAttr()->getCharacteristicInputs()),
&vegRefsWithDifferingConsts,
&vegRefsWithDifferingInputs);
// for now, don't work on trees that have VEGies with differing
// constants or inputs
if (vegRefsWithDifferingConsts.entries() > 0)
{
info.addVEGRefsWithDifferingConstants(vegRefsWithDifferingConsts);
emitCSEDiagnostics(
"Encountered VEGs with different constants in different consumers");
canShare = FALSE;
}
if (vegRefsWithDifferingInputs.entries() > 0)
{
info.addVEGRefsWithDifferingInputs(vegRefsWithDifferingInputs);
emitCSEDiagnostics("Encountered VEGs with different characteristic inputs");
canShare = FALSE;
}
// Check the inputs, all of the consumers must have the same inputs
// (parameters). We could see differences if query caching decides
// to parameterize the copies of the CTEs differently.
if (consumer->commonInputs_ != commonInputs_)
{
emitCSEDiagnostics(
"Differing inputs in CTE references, try CQD QUERY_CACHE '0'");
canShare = FALSE;
}
// rewrite the predicates on the consumer in terms of my
// own ValueIds
myColsToConsumerMaps[c].rewriteValueIdSetUp(mappedPreds, cPreds);
commonPredicates.findCommonSubexpressions(mappedPreds, FALSE);
}
// Save the mapped preds for later.
// Note: These are not final yet, until we have found
// common predicates among all the consumers.
nonCommonPredicatesArray[c] = mappedPreds;
}
if (singleLexicalRefWithTempedAncestors)
{
// if all the parent refs are materialized and each one is a
// copy of a single lexical ref, then that means that we will
// evaluate this CSE only once, therefore no need to materialize
// it
emitCSEDiagnostics(
"expression is only evaluated once because parent is materialized");
canShare = FALSE;
}
// translate the bit vector of required columns into a set of values
// that are required (by other consumers) but are not produced by my
// child tree
makeValueIdListFromBitVector(tempTableColumns,
columnList_,
neededColumnsBitmap);
outputsToAdd.insertList(tempTableColumns);
info.setNeededColumns(neededColumnsBitmap);
predicatesToRemove -= commonPredicates;
info.setCommonPredicates(commonPredicates);
if (canShare && info.getNeededColumns().entries() == 0)
{
// Temp table has no columns, looks like all we care about is
// the number of rows returned. This is not yet supported. We
// could make a table with a dummy column.
emitCSEDiagnostics("Temp table with no columns is not yet supported");
canShare = FALSE;
}
// Make an ORed predicate of all those non-common predicates of the
// consumers, to be applied on the common subexpression when creating
// the temp table. Also determine non-common predicates to be applied
// when scanning the temp table.
for (CollIndex n=0; n<numConsumers && canShare; n++)
{
// Now that we have the definitive set of common predicates,
// we can get the "uncommon" predicates, i.e. those that
// have to be evaluated on the individual scans of the temp
// tables. What we can do, however, is to OR these "uncommon"
// predicates and apply that OR predicate when building the
// temp table.
// repeat step from above, but this time remove the common
// preds from the array of non-common ones
commonPredicates.findCommonSubexpressions(nonCommonPredicatesArray[n],
TRUE);
if (nonCommonPredicatesArray[n].entries() > 0)
{
if (numORedPreds == n)
{
// build the ORed predicate
ItemExpr *uncommonPreds =
nonCommonPredicatesArray[n].rebuildExprTree();
if (nonCommonPredicatesORed)
nonCommonPredicatesORed =
new(CmpCommon::statementHeap()) BiLogic(
ITM_OR,
nonCommonPredicatesORed,
uncommonPreds);
else
nonCommonPredicatesORed = uncommonPreds;
numORedPreds++;
}
// rewrite the non-common predicates in terms of the consumer
// (the ValueIdMap should in many cases already have the
// correct translation)
myColsToConsumerMaps[n].rewriteValueIdSetDown(
nonCommonPredicatesArray[n],
info.getConsumer(n)->nonSharedPredicates_);
}
}
// adding the ORed non-common predicates makes sense only if all
// consumers have some such predicate. If at least one consumer
// doesn't, that's equivalent to a TRUE predicate, and TRUE OR x is
// always TRUE.
if (numORedPreds == numConsumers)
{
nonCommonPredicatesORed->synthTypeAndValueId();
newPredicatesToAdd += nonCommonPredicatesORed->getValueId();
info.addCommonPredicates(newPredicatesToAdd);
}
// ------------------------------------------------------------------
// Preparation phase
// ------------------------------------------------------------------
if (canShare)
{
// make a copy of the child tree, so we can revert back to the
// original tree if things don't work out
copyOfChildTree = child(0)->copyRelExprTree(CmpCommon::statementHeap());
outputsToAdd -= child(0).getGroupAttr()->getCharacteristicOutputs();
inputsToRemove -= commonInputs_;
canShare = copyOfChildTree->prepareTreeForCSESharing(
outputsToAdd,
predicatesToRemove,
newPredicatesToAdd,
inputsToRemove,
nonVEGColumns_,
childTreeKeyColumns,
&info);
if (!canShare)
emitCSEDiagnostics("Failed to prepare child tree for materialization");
else if (!copyOfChildTree->getGroupAttr()->getCharacteristicOutputs().contains(
outputsToAdd))
{
// we failed to produce the requested additional outputs
emitCSEDiagnostics("Failed to produce all the required output columns");
canShare = FALSE;
}
else
{
// remember est. log. props of the child, those will be transplanted
// into the temp scan later
cseEstLogProps_ =
copyOfChildTree->getGroupAttr()->outputLogProp(
(*GLOBAL_EMPTY_INPUT_LOGPROP));
// Get a preliminary bearing on how many times we are going
// to evaluate this CSE if it isn't shared. Note that this
// looks at the parent CSE's analysis outcome, and not all
// of these parents may be analyzed yet, so this may be an
// overestimate.
numPreliminaryRefs = info.getTotalNumRefs();
for (CollIndex k=0; k<tempTableColumns.entries(); k++)
if (childTreeKeyColumns.contains(tempTableColumns[k]))
info.addCSEKeyColumn(k);
}
}
if (canShare &&
CmpCommon::getDefault(CSE_USE_TEMP) != DF_ON)
{
// When CSE_USE_TEMP is set to SYSTEM, make a heuristic decision
// calculate some metrics for the temp table, based on row length,
// cardinality (or max. cardinality) and number of times it is used
Lng32 tempTableRowLength = tempTableColumns.getRowLength();
CostScalar cseTempTableSize = cseEstLogProps_->getResultCardinality() *
tempTableRowLength / numPreliminaryRefs;
CostScalar cseTempTableMaxSize = cseEstLogProps_->getMaxCardEst() *
tempTableRowLength / numPreliminaryRefs;
double maxTableSize =
ActiveSchemaDB()->getDefaults().getAsDouble(CSE_TEMP_TABLE_MAX_SIZE);
double maxTableSizeBasedOnMaxCard =
ActiveSchemaDB()->getDefaults().getAsDouble(CSE_TEMP_TABLE_MAX_MAX_SIZE);
// cumulative number of key columns referenced in consumers
Int32 totalKeyColPreds = 0;
// key cols that are referenced by a predicate in all consumers
ValueIdSet commonKeyCols(childTreeKeyColumns);
// check the total size of the temp table, divided by the number
// of times it is used
if (maxTableSize > 0 && cseTempTableSize > maxTableSize)
{
char buf[200];
snprintf(buf, sizeof(buf),
"Temp table size %e exceeds limit %e",
cseTempTableSize.getValue(),
maxTableSize);
emitCSEDiagnostics(buf);
canShare = FALSE;
}
else if (maxTableSizeBasedOnMaxCard > 0 &&
cseTempTableMaxSize > maxTableSizeBasedOnMaxCard)
{
char buf[200];
snprintf(buf, sizeof(buf),
"Temp table size %e (based on max card) exceeds limit %e",
cseTempTableMaxSize.getValue(),
maxTableSizeBasedOnMaxCard);
emitCSEDiagnostics(buf);
canShare = FALSE;
}
// determine which "key" columns are referenced by non-common
// predicates
for (CollIndex ncp=0; ncp<numConsumers; ncp++)
{
const ValueIdSet &nonCommonPreds(nonCommonPredicatesArray[ncp]);
ValueIdSet tempRefCols;
tempRefCols.accumulateReferencedValues(childTreeKeyColumns,
nonCommonPreds);
totalKeyColPreds += tempRefCols.entries();
nonCommonPreds.weedOutUnreferenced(commonKeyCols);
}
// decide against materialization if the average number of "key"
// columns referenced in each consumer is greater than
// CSE_PCT_KEY_COL_PRED_CONTROL percent
if (totalKeyColPreds >
(numConsumers * childTreeKeyColumns.entries() *
ActiveSchemaDB()->getDefaults().getAsDouble(CSE_PCT_KEY_COL_PRED_CONTROL) / 100.0))
{
char buf[200];
snprintf(buf, sizeof(buf),
"Number of potential key predicates in consumers (%d) exceeds limit %f",
totalKeyColPreds,
(numConsumers * childTreeKeyColumns.entries() *
ActiveSchemaDB()->getDefaults().getAsDouble(CSE_PCT_KEY_COL_PRED_CONTROL) / 100.0));
emitCSEDiagnostics(buf);
canShare = FALSE;
}
// decide against materialization if the number of key columns
// referenced by every consumer is > CSE_COMMON_KEY_PRED_CONTROL
if (commonKeyCols.entries() >
ActiveSchemaDB()->getDefaults().getAsLong(CSE_COMMON_KEY_PRED_CONTROL))
{
char buf[200];
snprintf(buf, sizeof(buf),
"All consumers have a predicate on %d common key columns, limit is %d",
commonKeyCols.entries(),
ActiveSchemaDB()->getDefaults().getAsLong(CSE_COMMON_KEY_PRED_CONTROL));
emitCSEDiagnostics(buf);
canShare = FALSE;
}
}
if (canShare)
{
result = CSEInfo::CREATE_TEMP;
child(0) = copyOfChildTree;
}
else if (result == CSEInfo::UNKNOWN_ANALYSIS)
result = CSEInfo::EXPAND;
info.setAnalysisOutcome(result);
return result;
}
void CommonSubExprRef::determineTempTableType(CSEInfo &info)
{
NABoolean createHiveTable =
(CmpCommon::getDefault(CSE_HIVE_TEMP_TABLE) == DF_ON);
if (createHiveTable)
info.setTempTableType(CSEInfo::HIVE_TEMP_TABLE);
else
info.setTempTableType(CSEInfo::VOLATILE_TEMP_TABLE);
}
NABoolean CommonSubExprRef::createTempTable(CSEInfo &info)
{
int result = TRUE;
const int maxCSENameLen = 12;
NAString tempTableName(COM_CSE_TABLE_PREFIX);
NAString tempTableSchema;
NAString tempTableCatalog;
CSEInfo::CSETempTableType tempTableType = info.getTempTableType();
char buf[32];
NAString tempTableDDL;
ValueIdList cols;
NAString cseNamePrefix(internalName_.data(),
MINOF(internalName_.length(),16));
// Note: Errors at this stage of the process may be recoverable, so
// we emit only warning diagnostics and just return FALSE if the
// temp table cannot be created
// Step 1: Create temp table name
// ------------------------------
// we create a name of this form:
// where
// ppp... is a prefix of the CTE name or an internal name
// (just to make it easier to identify, not really needed,
// we only use letters, digits, underscores)
// iii... is the SQL session id
// (Hive tables only, to keep different sessions apart)
// sss is the statement number in this session
// ccc is the CSE number in this statement
// Overall name length is 256, and both HDFS directory and file name
// can be quite long, so don't allow long user names as well. Note
// that the user name is just here to improve readability by humans,
// it's not needed for uniqueness.
if (cseNamePrefix.length() > maxCSENameLen)
cseNamePrefix.remove(maxCSENameLen);
cseNamePrefix.toUpper();
for (int p=0; p<cseNamePrefix.length(); p++)
{
char c = cseNamePrefix[p];
if (!(c >= '0' && c <= '9' ||
c >= 'A' && c <= 'Z' ||
c == '_'))
cseNamePrefix.replace(p,1,"_");
}
tempTableName += cseNamePrefix;
if (tempTableType == CSEInfo::HIVE_TEMP_TABLE)
{
tempTableName += "_";
tempTableName +=
CmpCommon::context()->sqlSession()->getSessionId();
}
snprintf(buf, sizeof(buf), "_S%u_%d",
CmpCommon::context()->getStatementNum(),
info.getCSEId());
tempTableName += buf;
if (tempTableType == CSEInfo::HIVE_TEMP_TABLE)
{
tempTableSchema = HIVE_SYSTEM_SCHEMA;
tempTableCatalog = HIVE_SYSTEM_CATALOG;
}
info.setTempTableName(QualifiedName(tempTableName,
tempTableSchema,
tempTableCatalog));
// Step 2: Create the DDL for the temp table
// -----------------------------------------
tempTableDDL += "CREATE ";
if (tempTableType == CSEInfo::VOLATILE_TEMP_TABLE)
tempTableDDL += "VOLATILE ";
tempTableDDL += "TABLE ";
if (tempTableType == CSEInfo::HIVE_TEMP_TABLE &&
tempTableSchema == HIVE_SYSTEM_SCHEMA ||
tempTableType == CSEInfo::VOLATILE_TEMP_TABLE)
{
// Hive table in default schema or volatile table,
// juse a one-part name
tempTableDDL += tempTableName;
}
else if (tempTableType == CSEInfo::HIVE_TEMP_TABLE)
{
// Hive table in a different schema, use a 2 part name
// (not yet supported)
tempTableDDL += tempTableSchema;
tempTableDDL += '.';
tempTableDDL += tempTableName;
}
else
{
// use a regular 3-part name
// (not yet supported)
tempTableDDL +=
info.getTempTableName().
getQualifiedNameAsAnsiString();
}
tempTableDDL += "(\n";
makeValueIdListFromBitVector(cols, columnList_, info.getNeededColumns());
for (CollIndex c=0; c<cols.entries(); c++)
{
char colName[10];
NAString colType;
snprintf(colName, sizeof(colName)," C%05d ", c);
tempTableDDL += colName;
if (tempTableType == CSEInfo::HIVE_TEMP_TABLE)
cols[c].getType().getMyTypeAsHiveText(&colType);
else
cols[c].getType().getMyTypeAsText(&colType);
if (colType == "unknown")
{
char buf[100];
colType = "";
cols[c].getType().getMyTypeAsText(&colType);
snprintf(buf, sizeof(buf),
"Unsupported data type for Hive temp table: %s",
colType.data());
emitCSEDiagnostics(buf, FALSE);
result = FALSE;
}
tempTableDDL += colType;
if (c+1 < cols.entries())
tempTableDDL += ",\n";
else
tempTableDDL += ")";
}
if (result)
info.setTempTableDDL(tempTableDDL);
// Step 3: Create the temp table
// -----------------------------
if (result)
if (tempTableType == CSEInfo::HIVE_TEMP_TABLE)
{
int m = CmpCommon::diags()->mark();
if (!CmpCommon::context()->execHiveSQL(tempTableDDL,
CmpCommon::diags()))
{
if (CmpCommon::statement()->recompiling() ||
CmpCommon::statement()->getNumOfCompilationRetries() > 0)
// ignore temp table creation errors if we are
// recompiling, the temp table may have been
// created in a previous compilation attempt
// (if not, we will run into other errors later)
CmpCommon::diags()->rewind(m);
else
{
result = FALSE;
// we will fall back to a previous tree and try to
// recover, make sure there are no errors from our
// failed attempt in the diags area
CmpCommon::diags()->negateAllErrors();
emitCSEDiagnostics(
"Error in creating Hive temp table");
}
}
}
else
{
// Todo: CSE: create volatile table
emitCSEDiagnostics("Volatile temp tables not yet supported");
result = FALSE;
}
// Step 4: Get the NATable for the temp table
// ------------------------------------------
if (result)
{
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context());
CorrName cn(info.getTempTableName());
NATable *tempNATable =
ActiveSchemaDB()->getNATableDB()->get(cn,
&bindWA,
NULL);
if (!tempNATable)
emitCSEDiagnostics("Unable to read metadata for temporary table");
else
info.setTempNATable(tempNATable);
}
return result;
}
RelExpr * CommonSubExprRef::createInsertIntoTemp(CSEInfo &info, NormWA & normWARef)
{
RelExpr *result = NULL;
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context());
CorrName cn(info.getTempTableName());
if (!info.getTempNATable())
// an earlier failure
return NULL;
TableDesc *tableDesc =
bindWA.createTableDesc(info.getTempNATable(),
cn,
FALSE);
ValueIdList srcValueList;
if (info.getTempTableType() == CSEInfo::HIVE_TEMP_TABLE)
{
// Create this tree:
//
// BlockedUnion
// / \
// Truncate FastExtract temp
// temp |
// cse
//
// In this tree "cse" is the child of this node and "temp" is
// the name of the Hive table. The tree is equivalent to what
// would be generated by an SQL statement
// "insert overwite table <temp> <cse>".
result = FastExtract::makeFastExtractTree(
tableDesc,
child(0).getPtr(),
TRUE, // overwrite the table
FALSE, // called outside the binder
TRUE, // this is a table for a common subexpression
&bindWA);
CMPASSERT(result->getOperatorType() == REL_UNION &&
result->child(1)->getOperatorType() == REL_FAST_EXTRACT);
RelExpr *fe = result->child(1);
makeValueIdListFromBitVector(srcValueList, columnList_, info.getNeededColumns());
CMPASSERT(fe->getOperatorType() == REL_FAST_EXTRACT);
static_cast<FastExtract *>(fe)->setSelectList(srcValueList);
fe->setGroupAttr(new (CmpCommon::statementHeap()) GroupAttributes());
fe->getGroupAttr()->addCharacteristicInputs(
fe->child(0).getGroupAttr()->getCharacteristicInputs());
result->child(0)->setGroupAttr(
new (CmpCommon::statementHeap()) GroupAttributes());
result->setGroupAttr(new (CmpCommon::statementHeap()) GroupAttributes());
result->getGroupAttr()->addCharacteristicInputs(
fe->getGroupAttr()->getCharacteristicInputs());
}
else
{
emitCSEDiagnostics(
"Unsupported temp table type in createInsertIntoTemp()",
TRUE);
}
info.setInsertIntoTemp(result);
return result;
}
RelExpr * CommonSubExprRef::createTempScan(CSEInfo &info, NormWA & normWARef) //
{
// check for earlier errors
if (!info.getInsertIntoTemp())
return NULL;
MapValueIds *result = NULL;
BindWA bindWA(ActiveSchemaDB(), CmpCommon::context());
CorrName cn(info.getTempTableName(),
CmpCommon::statementHeap(),
internalName_);
TableDesc *tableDesc =
bindWA.createTableDesc(info.getTempNATable(),
cn,
FALSE,
getHint());
Scan *scan =
new(CmpCommon::statementHeap()) Scan(cn, tableDesc);
// Run the new scan through bind and normalization phases, like the
// rest of the nodes have
ExprGroupId x(scan);
scan->bindSelf(&bindWA);
normWARef.allocateAndSetVEGRegion(IMPORT_ONLY, scan);
scan->transformNode(normWARef, x);
CMPASSERT(x.getPtr() == scan);
scan->rewriteNode(normWARef);
scan->normalizeNode(normWARef);
scan->synthLogProp(&normWARef);
normWARef.restoreOriginalVEGRegion();
scan->setCommonSubExpr(this);
// At this point we have a scan node on the temp table, with a new
// TableDesc that has new ValueIds. Make a map from the new ids to
// my own.
ValueIdList myOutputs;
ValueIdList tempTableOutputList;
ValueIdList tempTableVEGOutputList;
ValueIdSet tempTableOutputs;
ValueIdSet tempTablePreds;
makeValueIdListFromBitVector(myOutputs, columnList_, info.getNeededColumns());
tableDesc->getUserColumnList(tempTableOutputList);
tableDesc->getEquivVEGCols(tempTableOutputList, tempTableVEGOutputList);
CMPASSERT(myOutputs.entries() == tempTableVEGOutputList.entries());
ValueIdMap outToTempMap(myOutputs, tempTableVEGOutputList);
result = new(CmpCommon::statementHeap()) MapValueIds(scan,
outToTempMap,
CmpCommon::statementHeap());
result->setCSERef(this);
result->addValuesForVEGRewrite(nonVEGColumns_);
outToTempMap.rewriteValueIdSetDown(getGroupAttr()->getCharacteristicOutputs(),
tempTableOutputs);
// Todo: CSE: the rewrite below doesn't work with VEGPreds, and the
// augment method also isn't sufficient
outToTempMap.rewriteValueIdSetDown(nonSharedPredicates_, tempTablePreds);
scan->getGroupAttr()->setCharacteristicInputs(
getGroupAttr()->getCharacteristicInputs());
scan->getGroupAttr()->setCharacteristicOutputs(tempTableOutputs);
scan->setSelectionPredicates(tempTablePreds);
result->setGroupAttr(getGroupAttr());
return result;
}
void CommonSubExprRef::emitCSEDiagnostics(const char *message, NABoolean forceError)
{
// Normally this does nothing.
// With CQD CSE_DEBUG_WARNINGS ON, it emits diagnostics about the reason(s) why
// we don't share some common subexpressions.
// With forceError set to TRUE, it generates an internal error that causes the
// query to fail. This should be avoided as best as possible, since expanding
// the CSEs should have given us a successful plan.
if (CmpCommon::getDefault(CSE_DEBUG_WARNINGS) == DF_ON || forceError)
{
*CmpCommon::diags() << DgSqlCode(5001)
<< DgString0(internalName_.data())
<< DgString1(message);
if (forceError)
// throw an exception that forces the normalizer to skip the
// SQO phase and to revert to the original tree
AssertException(message, __FILE__, __LINE__).throwException();
}
}
// -----------------------------------------------------------------------
// IsolatedNonTableUDR::transformNode()
// -----------------------------------------------------------------------
void IsolatedNonTableUDR::transformNode(NormWA & normWARef,
ExprGroupId & locationOfPointerToMe)
{
CMPASSERT( this == locationOfPointerToMe );
if (nodeIsTransformed())
return;
// If we are a CallSP, the binder put the subquery or UDF in a Tuple
// node as child(0). Need to transform the child before we do the rest
// of the Node to allow the Tuple::transformNode() to remove the ValueId
// of the Subquery or UDF from its tupleExpr. Otherwise we end up with
// an illegal transformation.
//
// This would not be needed if CallSP worked like the other nodes.
// Consider fixing so the binder doesn't create the tuple, but allow
// the normal transformation like we do here do its magic.
//
// The other thing that is different for CallSP is that if it has a
// subquery or UDF in its inputs, it is not a leafNode until after
// we do the final transformation in TransRule.
//
// There we transform something like this:
//
// CallSP Join
// | / \
// Join ===> Join CallSp
// / \ / \
// Values T1 Values T1
//
if (child(0) != NULL)
{
child(0)->transformNode (normWARef, child(0));
// The RelRoutine:: transformNode () will transform the new child.
}
// Let the RelRoutine::transformNode() do the work.
RelRoutine::transformNode (normWARef, locationOfPointerToMe);
// The needeedValueIds is left over from the old CallSp class hierarchy
// It is believed that the inputParamsVids() should suffice.
// Will optimize this later.
getNeededValueIds() = getProcInputParamsVids();
// ---------------------------------------------------------------------
// Prime the Group Attributes
// ---------------------------------------------------------------------
primeGroupAttributes();
markAsTransformed();
} // IsolatedNonTableUDR::transformNode()
// -----------------------------------------------------------------------
// IsolatedNonTableUDR::rewriteNode()
// -----------------------------------------------------------------------
void IsolatedNonTableUDR::rewriteNode(NormWA &normWARef)
{
// ---------------------------------------------------------------------
// Make sure to rewrite all of our parameter inputs and predicates.
// ---------------------------------------------------------------------
getNeededValueIds().normalizeNode(normWARef);
RelRoutine::rewriteNode(normWARef);
}
//**********************************
// Constructor for class CqsWA
//***********************************
CqsWA::CqsWA():
tableCANodeList_(new (CmpCommon::statementHeap())
TableCANodeIdPairLookupList(
CmpCommon::statementHeap())
),
cqsCANodeIdMap_(new (CmpCommon::statementHeap())
CQSRelExprCANodeIdMap(30,
CmpCommon::statementHeap())
),
reArrangementSuccessful_(FALSE),
numberOfScanNodesinNQT_(0),
numberOfScanNodesinCQS_(0)
{}
//************************************************************************
// This method collects CANodeIds from each scan node of the Normalized
// tree
//************************************************************************
void CqsWA::gatherCANodeIDTableNamepairsForNormalizedTree( RelExpr *nqtExpr)
{
// leaf
if (nqtExpr->getArity() == 0)
{
if (nqtExpr->getOperatorType() == REL_SCAN)
{
Scan *scan = (Scan *) nqtExpr;
TableCANodeIdPair *tableIdPair = new (CmpCommon::statementHeap())
TableCANodeIdPair();
tableIdPair->Id_ = scan->getGroupAttr()->getGroupAnalysis()->
getNodeAnalysis()->getId();
tableIdPair->tabId_ = scan->getTableDesc();
getTableCANodeList()->insert(tableIdPair);
}
}
else
{
Int32 i =0;
for (; i<nqtExpr->getArity();i++)
{
gatherCANodeIDTableNamepairsForNormalizedTree(nqtExpr->child(i));
}
}
} // gatherCANodeIDTableNamepairsForNormalizedTree()
//**************************************************************************
// This method delegates responsibility of gathering CANodeIdSets (TableSets)
// for the CQS tree to class CQSRelExprNodeMap
//***************************************************************************
void CqsWA::gatherNodeIdSetsForCQSTree(RelExpr *cqsExpr)
{
CANodeIdSet set_= getcqsCANodeIdMap()->gatherNodeIdSetsForCQSTree(cqsExpr, this);
}
//*************************************************************************
// This method collects CANodeId values for all tables in the CQS tree.
//*************************************************************************
CANodeIdSet CQSRelExprCANodeIdMap::gatherNodeIdSetsForCQSTree(RelExpr* cqsExpr,
CqsWA *cwa)
{
Int32 arity = cqsExpr->getArity();
if ((arity == 0) && (cqsExpr->getOperatorType() == REL_FORCE_ANY_SCAN))
{
CQSRelExprCANodeIdPair * relExprNodeId = new (CmpCommon::statementHeap())
CQSRelExprCANodeIdPair();
cwa->incrementNumberOfScanNodesinCQS();
ScanForceWildCard *forcedScan = (ScanForceWildCard *) cqsExpr;
CANodeId Id_ = relExprNodeId->populateReturnCANodeId(forcedScan, cwa);
insertThisElement(forcedScan, relExprNodeId);
CANodeIdSet caNodeset(Id_);
return caNodeset;
}
else if (arity > 0)
{
CQSRelExprCANodeIdPair * relExprNodeId =
new (CmpCommon::statementHeap()) CQSRelExprCANodeIdPair();
if (cwa->isIndexJoin(cqsExpr))
{
relExprNodeId->leftChildSet_ = CANodeIdSet();
relExprNodeId->rightChildSet_=gatherNodeIdSetsForCQSTree(
cqsExpr->child(1), cwa);
}
else
{
relExprNodeId->leftChildSet_=gatherNodeIdSetsForCQSTree(
cqsExpr->child(0), cwa);
if (arity == 1)
relExprNodeId->rightChildSet_=CANodeIdSet();
else
// arity is 2
relExprNodeId->rightChildSet_=gatherNodeIdSetsForCQSTree(
cqsExpr->child(1), cwa);
}
relExprNodeId->forcedNode_ = cqsExpr;
insertThisElement(cqsExpr,relExprNodeId);
return relExprNodeId->leftChildSet_+
relExprNodeId->rightChildSet_;
}
else
{
// leaves other than scan such as Tuple...
// how do we treat derived tables? (values(1) as t(a))
return CANodeIdSet();
}
} // gatherNodeIdSetsForCQSTree()
//************************************************************************
// Given a Table Name or Index Name, this finds the corresponding CANodeId
// For MP tables: the table name needs to be like \node.$vol.subvol.tablename
// Otherwise, we assume that it is an MX table....
//************************************************************************
CANodeId CqsWA::findCANodeId(const NAString &tableName)
{
TableCANodeIdPairLookupList *tcpairList = getTableCANodeList();
TableCANodeIdPair *tcpair;
// if tableName is of form cat.sch.t ok
// otherwise get it to that form by appending current catalog and
// schema name as required.
// how about MP tables???????????? TBD.....
// for MX tables only or ANSI notation...
NAString tableNameAppend(CmpCommon::statementHeap());
for (CollIndex i=0; i < tcpairList->entries(); i++
)
{
tcpair = tcpairList->at(i);
if (tcpair->tabId_->getCorrNameObj().getCorrNameAsString() != "")
{
// if correlation name is set, do not append the default catalog
// and schema name
tableNameAppend = tableName;
}
else if (isMPTable(tableName))
{
tableNameAppend = tableName;
}
else
{
tableNameAppend = makeItThreePartAnsiString(tableName);
}
if ((tcpair->tabId_->getCorrNameObj().getQualifiedNameAsString()
== tableNameAppend) ||
(tcpair->tabId_->getCorrNameObj().getCorrNameAsString()
== tableNameAppend))
{
if (tcpair->visited_)
AssertException("", __FILE__, __LINE__).throwException();
tcpair->visited_ = TRUE;
return tcpair->Id_;
}
else
{
// check indexes
IndexDesc *indexDesc;
const LIST(IndexDesc *) &indexList = tcpair->tabId_->getIndexes();
for (CollIndex j=0; j < indexList.entries(); j++)
{
indexDesc = indexList.at(j);
if (tableNameAppend == indexDesc->getNAFileSet()->getExtFileSetName())
{
if (tcpair->visited_)
AssertException("", __FILE__, __LINE__).throwException();
tcpair->visited_ = TRUE;
return tcpair->Id_;
}
}
}
} // for
//if you are here, invoke error handling
AssertException("", __FILE__, __LINE__).throwException();
return CANodeId(); // keep VisualC++ happy
} // CqsWA::findCANodeId()
//*************************************************************
// This method collects CANodeId values for each table
// It then traverses the CQS tree and collects Tablesets for each
// node. Both left and right child table sets are kept at each node
// Tablesets are sets of CANodeId values...
//**************************************************************
void CqsWA::initialize(RelExpr *nqtExpr, RelExpr *cqsExpr)
{
gatherCANodeIDTableNamepairsForNormalizedTree(nqtExpr);
numberOfScanNodesinNQT_ = getTableCANodeList()->entries();
gatherNodeIdSetsForCQSTree(cqsExpr);
if (numberOfScanNodesinNQT_ > numberOfScanNodesinCQS_)
{
AssertException("", __FILE__, __LINE__).throwException();
}
}
//*****************************************************************
// For a given scan node, this collects the CANodeId
//*****************************************************************
CANodeId CQSRelExprCANodeIdPair::populateReturnCANodeId(RelExpr *scan,
CqsWA *cwa)
{
ScanForceWildCard *forcedScan = (ScanForceWildCard *) scan;
NAString tableName(forcedScan->getExposedName(),
CmpCommon::statementHeap());
NAString indexName( forcedScan->getIndexName(),CmpCommon::statementHeap());
CANodeId Id_;
if (tableName != "")
{
Id_ = cwa->findCANodeId(tableName);
}
else if (indexName != "")
{
Id_ = cwa->findCANodeId(indexName);
}
//else
// error & give-up
forcedNode_ = forcedScan;
leftChildSet_ = CANodeIdSet();
rightChildSet_ = CANodeIdSet();
return Id_;
} // CQSRelExprCANodeIdPair::populateReturnCANodeId()
//*********************************************************************
// Constructor for the map: maps CQS Relational expression pointer with
// CANodeId Sets of left subtree and right subtree
//*********************************************************************
CQSRelExprCANodeIdMap::CQSRelExprCANodeIdMap(ULng32 init_size,
CollHeap *outHeap):
HASHDICTIONARY(ULng32, CQSRelExprCANodeIdPair)
( &(CQSRelExprCANodeIdMap::HashFn),
init_size,
TRUE, // uniqueness
outHeap)
{}
//*******************************************************************
// A hash function required by Hashdictionary
//*******************************************************************
ULng32 CQSRelExprCANodeIdMap::HashFn(const ULng32 &key)
{
return key;
}
//**************************************************************
// Given the RelExpr pointer, this method gives the table subsets
//
//**************************************************************
CQSRelExprCANodeIdPair * CQSRelExprCANodeIdMap::get(RelExpr *key )
{
ULng32 *myKey_ = (ULng32 *)new (CmpCommon::statementHeap()) Long;
*(Long *)myKey_ = (const Long) (key);
CQSRelExprCANodeIdPair *result =
HASHDICTIONARY(ULng32, CQSRelExprCANodeIdPair)::getFirstValue(myKey_);
return result;
}
//****************************************************************
//
//****************************************************************
void CQSRelExprCANodeIdMap::insertThisElement(RelExpr * expr,
CQSRelExprCANodeIdPair *cqsNodeId)
{
ULng32 * myKey_ = (ULng32 *)new (CmpCommon::statementHeap())
Long;
*(Long *)myKey_ = (Long) (expr);
insert(myKey_, cqsNodeId);
}
//*********************************************************************
// pointer "this" is group by CQS expression. If the corresponding normalized
// relExpr is a group by, we process that expression; it could be a JBBC or
// not. If not we ignore it. This is because a group by may correspond to
// several groupby expressions at the end of optimization
//*********************************************************************
RelExpr *GroupByAgg::generateMatchingExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *relExpr)
{
if (relExpr->getOperator().match(REL_ANY_GROUP))
{
return CURRSTMT_CQSWA->checkAndProcessGroupByJBBC(relExpr,
lChildSet,
rChildSet,
this);
}
else
{
return RelExpr::generateMatchingExpr(lChildSet, rChildSet, relExpr);
}
} // GroupByAgg::generateMatchingExpr()
//************************************************************
// The default Implementation simply calls the routine on the
// child expression
//************************************************************
RelExpr *RelExpr::generateMatchingExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *relExpr)
{
CANodeIdSet leftTableSet, rightTableSet;
// throw an exception if the arity is not one
if (getArity() != 1)
AssertException("", __FILE__, __LINE__).throwException();
RelExpr *wcChild = child(0);
CURRSTMT_CQSWA->getTableSets(wcChild, leftTableSet, rightTableSet);
return (child(0)->generateMatchingExpr(leftTableSet,
rightTableSet,
relExpr));
} // RelExpr::generateMatchingExpr()
//*****************************************************************
// This recursive procedure traverses Join back bone of CQS tree and
// generates the logical relational expression using the normalized
// expression tree, relExpr.
//
//*****************************************************************
RelExpr *JoinForceWildCard::generateMatchingExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *relExpr)
{
Join *j = NULL;
// check if the argument relExpr is a join; give an error if not ?
if ( relExpr->getOperator().match(REL_ANY_JOIN) ||
relExpr->getOperatorType() == REL_MULTI_JOIN)
{
j = (Join *)relExpr->generateLogicalExpr(lChildSet, rChildSet);
}
else
{
// index join?
if (
(relExpr->getOperator().match(REL_SCAN)) &&
(lChildSet.isEmpty() && rChildSet.entries() == 1)
)
return relExpr;
if (relExpr->getOperator().match(REL_ANY_GROUP))
{
return CURRSTMT_CQSWA->checkAndProcessGroupByJBBC(relExpr,
lChildSet,
rChildSet,
this );
}
// throw an exception, otherwise
AssertException("", __FILE__, __LINE__).throwException();
}
if (j != NULL)
{
RelExpr *lChild = j->child(0);
RelExpr *rChild = j->child(1);
RelExpr *jwc = this;
CANodeIdSet leftTableSet, rightTableSet;
RelExpr *wcLeftChild = jwc->child(0);
RelExpr *wcRightChild = jwc->child(1);
CURRSTMT_CQSWA->getTableSets(wcLeftChild, leftTableSet, rightTableSet);
j->child(0) = wcLeftChild->generateMatchingExpr(leftTableSet,
rightTableSet,
lChild);
CURRSTMT_CQSWA->getTableSets(wcRightChild, leftTableSet, rightTableSet);
j->child(1) = wcRightChild->generateMatchingExpr(leftTableSet,
rightTableSet,
rChild
);
j->pushdownCoveredExpr
(j->getGroupAttr()->getCharacteristicOutputs(),
j->getGroupAttr()->getCharacteristicInputs(),
j->selectionPred());
return j;
}
return NULL;
} //JoinForceWildCard::generateMatchingExpr()
//*******************************************************************
// check if a join's child is a GB and if it is a JBBC handle it
// appropriately
//*******************************************************************
RelExpr * CqsWA::checkAndProcessGroupByJBBC( RelExpr *relExpr,
CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *cqsExpr)
{
if (!relExpr->getOperator().match(REL_ANY_GROUP))
{
return relExpr;
}
// check if this GroupBy is a JBBC...
GBAnalysis *pGBAnalysis = NULL;
NodeAnalysis *nodeAnalysis = relExpr->getGroupAttr()->getGroupAnalysis()->
getNodeAnalysis();
GroupByAgg * gb = (GroupByAgg *) relExpr;
pGBAnalysis = gb->getGBAnalysis();
if (pGBAnalysis)
{
CANodeId id = nodeAnalysis->getId();
// you may not need this check
if (! QueryAnalysis::Instance()->getJBBCs().containsThisId(id))
AssertException("", __FILE__, __LINE__).throwException();
// get the child of GroupBy and re-arrange the join tree from
// TableSets....
RelExpr *childExpr = relExpr->child(0);
RelExpr *wcChild;
if (cqsExpr->getOperator().match(REL_ANY_GROUP))
{
wcChild = cqsExpr->child(0);
getTableSets(wcChild,lChildSet, rChildSet);
}
else
wcChild = cqsExpr;
relExpr->child(0) = wcChild->generateMatchingExpr
(lChildSet,rChildSet,childExpr);
relExpr->primeGroupAttributes();
relExpr->pushdownCoveredExpr
(relExpr->getGroupAttr()->getCharacteristicOutputs(),
relExpr->getGroupAttr()->getCharacteristicInputs(),
relExpr->selectionPred());
relExpr->synthLogProp();
return relExpr;
}
else
{
// not a JBBC
return relExpr;
// handle the case right child of a join GB<-Scan ?
}
// error
return NULL; // keep VisualC++ happy
}// CqsWA::checkAndProcessGroupByJBBC()
//*************************************************************************
// Given an expression from CQS tree as input, this method returns TableSets
// of its two children
//**************************************************************************
void CqsWA::getTableSets(RelExpr * cqsExpr,
CANodeIdSet & leftSet,
CANodeIdSet &rightSet)
{
CQSRelExprCANodeIdPair *NodeIdSets = getcqsCANodeIdMap()->get(cqsExpr);
leftSet = NodeIdSets->leftChildSet_;
rightSet = NodeIdSets->rightChildSet_;
} // CqsWA::getTableSets()
//************************************************************************
// We essentially ignore the exchange wild card: simply pass the control to
// it's child
//************************************************************************
RelExpr *ExchangeForceWildCard::generateMatchingExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *relExpr)
{
// check the rChildSet is empty
if (! rChildSet.isEmpty())
AssertException("", __FILE__, __LINE__).throwException();
CANodeIdSet leftTableSet, rightTableSet;
RelExpr *wcChild = child(0);
CURRSTMT_CQSWA->getTableSets(wcChild, leftTableSet, rightTableSet);
return child(0)->generateMatchingExpr(leftTableSet,
rightTableSet,
relExpr);
} // ExchangeForceWildCard::generateMatchingExpr()
//***********************************************************************
// return the relExpr as it is. we do error checking
//***********************************************************************
RelExpr *ScanForceWildCard::generateMatchingExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet,
RelExpr *relExpr)
{
// check lChildSet and rChildSet are empty
// check relExpr is a Scan
if ( relExpr->getOperator().match(REL_SCAN) &&
lChildSet.isEmpty() &&
rChildSet.isEmpty()
)
return relExpr;
else
{
AssertException("", __FILE__, __LINE__).throwException();
return NULL; // keep VisualC++ happy
}
} // ScanForceWildCard::generateMatchingExpr()
RelExpr * RelExpr::generateLogicalExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet)
{
AssertException("", __FILE__, __LINE__).throwException();
return NULL; // keep VisualC++ happy
}
//**************************************************************
// Split the join backbone along the requested child backbones
// returns a join node, if such a split is possible
// throws an exception otherwise.
//**************************************************************
RelExpr * MultiJoin::generateLogicalExpr (CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet)
{
Join *j = splitByTables(lChildSet, rChildSet);
// if the split is not possible, throw an exception
if (j == NULL)
AssertException("", __FILE__, __LINE__).throwException();
j->child(0)->synthLogProp();
j->child(1)->synthLogProp();
j->synthLogProp();
return j;
}
RelExpr * GroupByAgg::generateLogicalExpr(CANodeIdSet &lChildSet,
CANodeIdSet &rChildSet)
{
AssertException("", __FILE__, __LINE__).throwException();
return NULL; // keep VisualC++ happy
}
NABoolean RelRoot::forceCQS(RelExpr *cqsExpr)
{
RelExpr *nqtExpr = this;
if (CmpCommon::getDefault(FORCE_BUSHY_CQS) != DF_ON)
return FALSE;
// make a copy of nqtExpr. In case we encounter exceptions and unable
// to proceed, we give back the saved expression for further processing
// Take care of transitively called CmpAsserts.:w
RelExpr *rootExpr = nqtExpr;
RelExpr *nqtCopyExpr = nqtExpr->child(0)->
copyRelExprTree(CmpCommon::statementHeap());
try
{
// do not bother with this if this query is simple: single table query etc.
// no updates, compound statements etc, describe, union
// if CQS relexpr contains CutOp do not continue
if (CqsWA::shouldContinue(nqtExpr, cqsExpr))
{
RelExpr *parentExpr = nqtExpr;
nqtExpr = nqtExpr->child(0);
RelExpr *topJoin= nqtExpr;
while (! topJoin->getOperator().match(REL_ANY_JOIN) &&
! (topJoin->getOperatorType() == REL_MULTI_JOIN))
{
if (topJoin->getOperator().match(REL_ANY_LEAF_OP))
{
AssertException("", __FILE__, __LINE__).throwException();
}
// we look for the top most join
parentExpr = topJoin;
topJoin = topJoin->child(0);
if (topJoin == NULL)
{
AssertException("", __FILE__, __LINE__).throwException();
}
} // while no join is found
CURRENTSTMT->initCqsWA();
CURRSTMT_CQSWA->initialize(nqtExpr, cqsExpr);
CANodeIdSet leftTableSet, rightTableSet;
CURRSTMT_CQSWA->getTableSets(cqsExpr,leftTableSet,rightTableSet);
RelExpr *childExpr = cqsExpr->generateMatchingExpr(leftTableSet,
rightTableSet,
topJoin);
parentExpr->child(0) = childExpr;
ValueIdList orderByList = reqdOrder();
ValueIdSet valuesNeeded = parentExpr->getGroupAttr()->
getCharacteristicOutputs();
// now add orderByList, if any, to expected outputs of Root's child
// this is needed so that child can synthesize and keep the sortkey.
// see the related code in RelRoot::normalizeNode() and
// PhysicalProperty::enforceCoverageByGroupAttributes(). The latter
// resets sortKey if it is not part of child's output. This needs to be
// investigated at a latter time.
valuesNeeded.insertList(orderByList);
parentExpr->pushdownCoveredExpr
(valuesNeeded,
parentExpr->getGroupAttr()->getCharacteristicInputs(),
parentExpr->selectionPred());
parentExpr->synthLogProp();
CURRSTMT_CQSWA->reArrangementSuccessful_ = TRUE;
return TRUE;
} // shouldContinue?
return FALSE;
}
catch(...)
{
// decide on what message to give...
rootExpr->child(0)=nqtCopyExpr;
// reset any thing else?
CURRENTSTMT->clearCqsWA();
}
return FALSE;
}
NABoolean CqsWA::shouldContinue(RelExpr *nqtExpr, RelExpr *cqsExpr)
{
// check if the Normalized expression contains any unsupported operators.
if (CqsWA::containsNotSupportedOperator(nqtExpr))
return FALSE;
// check if the CQS expression contains any cuts..
if (CqsWA::containsCutOp(cqsExpr))
return FALSE;
return TRUE;
} // CqsWA::shouldContinue()
NABoolean CqsWA::containsNotSupportedOperator(RelExpr *nqtExpr)
{
if (nqtExpr->getOperatorType() == REL_COMPOUND_STMT ||
nqtExpr->getOperator().match(REL_ANY_GEN_UPDATE) ||
nqtExpr->getOperatorType() == REL_UNION ||
nqtExpr->getOperatorType() == REL_DESCRIBE ||
nqtExpr->getOperatorType() == REL_TUPLE_LIST ||
nqtExpr->getOperatorType() == REL_TUPLE ||
nqtExpr->getOperatorType() == REL_DDL)
return TRUE;
for (Int32 i=0; i < nqtExpr->getArity(); i++)
{
if (containsNotSupportedOperator(nqtExpr->child(i)))
return TRUE;
}
return FALSE;
} //CqsWA::containsNotSupportedOperator()
NABoolean CqsWA::containsCutOp(RelExpr *cqsExpr)
{
if (cqsExpr->isCutOp())
return TRUE;
for (Int32 i = 0; i < cqsExpr->getArity(); i++)
{
if (containsCutOp(cqsExpr->child(i)))
return TRUE;
}
return FALSE;
} // CqsWA::containsCutOp()
//***************************************************************
// if tablename is not of form cat.sch.t, make it so
//
//***************************************************************
NAString CqsWA::makeItThreePartAnsiString(const NAString & tableName)
{
NAString tableNameAppend(CmpCommon::statementHeap());
size_t catlen, schlen;
catlen = tableName.first('.');
schlen = tableName.last('.');
SchemaName s = CmpCommon::context()->schemaDB_->getDefaultSchema();
size_t len = tableName.length();
if ((catlen > len) && (schlen > len ))
{
// append current catalog and schema names...
tableNameAppend += s.getCatalogName();
tableNameAppend += '.';
tableNameAppend += s.getSchemaName();
tableNameAppend += '.';
tableNameAppend += tableName;
}
else if ((catlen > len) && (schlen < len ))
{
// append catalog name
tableNameAppend += s.getCatalogName();
tableNameAppend += '.';
tableNameAppend += tableName;
}
else
{
tableNameAppend = tableName;
}
return tableNameAppend;
} // CqsWA::makeItThreePartAnsiString()
//***********************************************************
// check if the CQS Relational expression is an index join
//***********************************************************
NABoolean CqsWA::isIndexJoin(RelExpr *cqsExpr)
{
if (cqsExpr->getArity() == 1) return FALSE;
if (cqsExpr->getOperator().match(REL_FORCE_JOIN) ||
cqsExpr->getOperator().match(REL_FORCE_NESTED_JOIN) ||
cqsExpr->getOperator().match(REL_FORCE_HASH_JOIN) ||
cqsExpr->getOperator().match(REL_FORCE_MERGE_JOIN)
)
{
JoinForceWildCard *jwc= (JoinForceWildCard *)cqsExpr;
if (jwc->getPlan() == JoinForceWildCard::FORCED_INDEXJOIN)
return TRUE;
return FALSE;
}
else
return FALSE;
} // CqsWA::isIndexJoin()
//***************************************************************
// is tableName an MP table: does it look like $vol.subvol.tname?
//***************************************************************
NABoolean CqsWA::isMPTable(const NAString &tableName)
{
size_t volumeLength, schemaLength, nameLength;
nameLength = tableName.length();
volumeLength = tableName.first('$');
if (volumeLength < nameLength)
{
schemaLength=tableName.last('.');
if ( schemaLength < nameLength &&
volumeLength < schemaLength)
{
return TRUE;
}
else
{
AssertException("", __FILE__, __LINE__).throwException();
return FALSE; // keep VisualC++ happy
}
}
else
{
return FALSE;
}
} // CqsWA::isMPTable()
| 1 | 18,942 | There may be a few isolated cases where the FirstN node is added during preCodeGen. Please see GenPreCode.cpp RelRoot::preCodeGen(). The example given there about Order by where sort is added in optimizer, or a FirstN where the N value is to be specified with a param seem to be cases where we would add the FirstN later. Will current change cause such views to marked as updateable? This is minor and could be resolved later, if any change is necessary. | apache-trafodion | cpp |
@@ -213,4 +213,14 @@ public class GoSurfaceNamer extends SurfaceNamer {
public String getCreateStubFunctionName(Interface service) {
return getGrpcClientTypeName(service).replace(".", ".New");
}
+
+ @Override
+ public String getStaticLangStreamingReturnTypeName(Method method, MethodConfig methodConfig) {
+ // Unsafe string manipulation: The name looks like "LibraryService_StreamShelvesClient",
+ // neither camel or underscore.
+ return String.format(
+ "%s_%sClient",
+ converter.getTypeName(method.getParent()).getNickname(),
+ method.getSimpleName());
+ }
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.go;
import com.google.api.codegen.CollectionConfig;
import com.google.api.codegen.MethodConfig;
import com.google.api.codegen.transformer.FeatureConfig;
import com.google.api.codegen.transformer.ModelTypeFormatterImpl;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.go.GoNameFormatter;
import com.google.api.codegen.util.go.GoTypeTable;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.annotations.VisibleForTesting;
import io.grpc.Status;
import java.util.List;
public class GoSurfaceNamer extends SurfaceNamer {
private final GoModelTypeNameConverter converter;
private final String packagePath;
public GoSurfaceNamer(String packagePath) {
this(new GoModelTypeNameConverter(), packagePath);
}
private GoSurfaceNamer(GoModelTypeNameConverter converter, String packagePath) {
super(new GoNameFormatter(), new ModelTypeFormatterImpl(converter), new GoTypeTable());
this.converter = converter;
this.packagePath = packagePath;
}
@Override
public String getPathTemplateName(Interface service, CollectionConfig collectionConfig) {
return inittedConstantName(
getReducedServiceName(service)
.join(collectionConfig.getEntityName())
.join("path")
.join("template"));
}
@Override
public String getPathTemplateNameGetter(Interface service, CollectionConfig collectionConfig) {
return publicMethodName(
getReducedServiceName(service).join(collectionConfig.getEntityName()).join("path"));
}
@Override
public String getStaticLangReturnTypeName(Method method, MethodConfig methodConfig) {
return converter.getTypeName(method.getOutputType()).getFullName();
}
@Override
public List<String> getDocLines(ProtoElement element) {
if (!(element instanceof Method)) {
return super.getDocLines(element);
}
Method method = (Method) element;
String text = DocumentationUtil.getDescription(method);
text = lowerFirstLetter(text);
return super.getDocLines(getApiMethodName(method) + " " + text);
}
@Override
public String getAndSavePagedResponseTypeName(
FeatureConfig featureConfig,
ModelTypeTable typeTable,
TypeRef inputTypeName,
TypeRef outputTypeName,
Field resourcesField) {
String typeName = converter.getTypeNameForElementType(resourcesField.getType()).getNickname();
int dotIndex = typeName.indexOf('.');
if (dotIndex >= 0) {
typeName = typeName.substring(dotIndex + 1);
}
return className(Name.anyCamel(typeName).join("iterator"));
}
private static String lowerFirstLetter(String s) {
if (s.length() > 0) {
s = Character.toLowerCase(s.charAt(0)) + s.substring(1);
}
return s;
}
@Override
public String getGrpcClientTypeName(Interface service) {
return converter.getTypeName(service).getNickname() + "Client";
}
@Override
public String getCallSettingsTypeName(Interface service) {
return className(clientNamePrefix(service).join("call").join("options"));
}
@Override
public String getDefaultApiSettingsFunctionName(Interface service) {
return privateMethodName(
Name.from("default").join(clientNamePrefix(service)).join("client").join("options"));
}
@Override
public String getDefaultCallSettingsFunctionName(Interface service) {
return privateMethodName(
Name.from("default").join(clientNamePrefix(service)).join("call").join("options"));
}
@Override
public String getApiWrapperClassName(Interface service) {
return className(clientNamePrefix(service).join("client"));
}
@Override
public String getApiWrapperClassConstructorName(Interface service) {
return publicMethodName(Name.from("new").join(clientNamePrefix(service)).join("client"));
}
@Override
public String getApiWrapperClassConstructorExampleName(Interface service) {
return publicMethodName(
Name.from("example").join("new").join(clientNamePrefix(service)).join("client"));
}
@Override
public String getApiMethodExampleName(Interface service, Method method) {
// We use "unsafe" string concatenation here.
// Godoc expects the name to be in format "ExampleMyType_MyMethod";
// it is the only place we have mixed camel and underscore names.
return publicMethodName(Name.from("example").join(clientNamePrefix(service)).join("client"))
+ "_"
+ getApiMethodName(method);
}
@Override
public String getLocalPackageName() {
// packagePath is in form "cloud.google.com/go/library/apiv1";
// we want "library".
String[] parts = packagePath.split("/");
return parts[parts.length - 2];
}
@Override
public String getExamplePackageName() {
return getLocalPackageName() + "_test";
}
@VisibleForTesting
Name clientNamePrefix(Interface service) {
Name name = getReducedServiceName(service);
// If the service name matches the package name, don't include the service name in the prefix.
// Eg, instead of "library.NewLibraryClient", we want "library.NewClient".
if (Name.from(getLocalPackageName()).equals(name)) {
return Name.from();
}
return name;
}
@Override
public String getStatusCodeName(Status.Code code) {
return publicFieldName(Name.upperUnderscore(code.toString()));
}
@Override
public String getTypeConstructor(String typeNickname) {
if (!typeNickname.startsWith("*")) {
return typeNickname;
}
return "&" + typeNickname.substring(1);
}
@Override
public String getGrpcContainerTypeName(Interface service) {
return "";
}
@Override
public String getServiceFileName(Interface service, String packageName) {
return classFileNameBase(getReducedServiceName(service).join("client"));
}
@Override
public String getExampleFileName(Interface service, String packageName) {
return classFileNameBase(
getReducedServiceName(service).join("client").join("example").join("test"));
}
@Override
public String getStubName(Interface service) {
return privateFieldName(clientNamePrefix(service).join("client"));
}
@Override
public String getCreateStubFunctionName(Interface service) {
return getGrpcClientTypeName(service).replace(".", ".New");
}
}
| 1 | 18,249 | I think you should compose the %sClient part using the Name class, and then do the remaining composition with plain concatenation. Side question: why does the return type name look like it is a client type name? | googleapis-gapic-generator | java |
@@ -63,6 +63,8 @@ storiesOf( 'PageSpeed Insights Module/Components', module )
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
+ }, {
+ padding: 0,
} )
.add( 'Dashboard widget (loading)', () => {
freezeFetch( /^\/google-site-kit\/v1\/modules\/pagespeed-insights\/data\/pagespeed/ ); | 1 | /**
* PageSpeed Insights Module Component Stories.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* Internal dependencies
*/
import DashboardPageSpeedWidget from '../assets/js/modules/pagespeed-insights/components/dashboard/DashboardPageSpeedWidget';
import { CORE_SITE } from '../assets/js/googlesitekit/datastore/site/constants';
import { CORE_USER, PERMISSION_MANAGE_OPTIONS } from '../assets/js/googlesitekit/datastore/user/constants';
import { CORE_MODULES } from '../assets/js/googlesitekit/modules/datastore/constants';
import * as fixtures from '../assets/js/modules/pagespeed-insights/datastore/__fixtures__';
import { STORE_NAME, STRATEGY_MOBILE, STRATEGY_DESKTOP } from '../assets/js/modules/pagespeed-insights/datastore/constants';
import { WithTestRegistry, freezeFetch } from '../tests/js/utils';
import { getWidgetComponentProps } from '../assets/js/googlesitekit/widgets/util';
const widgetComponentProps = getWidgetComponentProps( 'dashboardPageSpeed' );
storiesOf( 'PageSpeed Insights Module/Components', module )
.add( 'Dashboard widget', () => {
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedMobile, { url, strategy: STRATEGY_MOBILE } );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_MOBILE ] );
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedDesktop, { url, strategy: STRATEGY_DESKTOP } );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_DESKTOP ] );
dispatch( CORE_SITE ).receiveSiteInfo( {
referenceSiteURL: url,
currentEntityURL: null,
} );
dispatch( CORE_MODULES ).receiveGetModules( [
{
slug: 'pagespeed-insights',
active: true,
connected: true,
},
] );
};
return (
<WithTestRegistry callback={ setupRegistry }>
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
.add( 'Dashboard widget (loading)', () => {
freezeFetch( /^\/google-site-kit\/v1\/modules\/pagespeed-insights\/data\/pagespeed/ );
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
// Component will be loading as long as both reports are not present.
// Omit receiving mobile here to trigger the request only once.
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedDesktop, { url, strategy: STRATEGY_DESKTOP } );
dispatch( CORE_SITE ).receiveSiteInfo( {
referenceSiteURL: url,
currentEntityURL: null,
} );
dispatch( CORE_MODULES ).receiveGetModules( [
{
slug: 'pagespeed-insights',
active: true,
connected: true,
},
] );
};
return (
<WithTestRegistry callback={ setupRegistry }>
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
.add( 'Dashboard widget (Field Data Unavailable)', () => {
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedMobileNoFieldData, { url, strategy: STRATEGY_MOBILE } );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_MOBILE ] );
dispatch( STORE_NAME ).receiveGetReport( fixtures.pagespeedDesktopNoFieldData, { url, strategy: STRATEGY_DESKTOP } );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_DESKTOP ] );
dispatch( CORE_SITE ).receiveSiteInfo( {
referenceSiteURL: url,
currentEntityURL: null,
} );
dispatch( CORE_MODULES ).receiveGetModules( [
{
slug: 'pagespeed-insights',
active: true,
connected: true,
},
] );
};
return (
<WithTestRegistry callback={ setupRegistry }>
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
.add( 'Dashboard widget (Errors for Mobile and Desktop)', () => {
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
const mobileError = {
code: 'fetching_mobile_data_failed',
message: 'Fetching PageSpeed Insights report with strategy mobile failed.',
};
const desktopError = {
code: 'fetching_desktop_data_failed',
message: 'Fetching PageSpeed Insights report with strategy desktop failed.',
};
dispatch( STORE_NAME ).receiveError( mobileError, 'getReport', [ url, STRATEGY_MOBILE ] );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_MOBILE ] );
dispatch( STORE_NAME ).receiveError( desktopError, 'getReport', [ url, STRATEGY_DESKTOP ] );
dispatch( STORE_NAME ).finishResolution( 'getReport', [ url, STRATEGY_DESKTOP ] );
dispatch( CORE_SITE ).receiveSiteInfo( {
referenceSiteURL: url,
currentEntityURL: null,
} );
dispatch( CORE_MODULES ).receiveGetModules( [
{
slug: 'pagespeed-insights',
active: true,
connected: true,
},
] );
};
return (
<WithTestRegistry callback={ setupRegistry }>
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
.add( 'Dashboard widget (CTA)', () => {
const url = fixtures.pagespeedMobile.loadingExperience.id;
const setupRegistry = ( { dispatch } ) => {
dispatch( CORE_SITE ).receiveSiteInfo( {
referenceSiteURL: url,
currentEntityURL: null,
} );
dispatch( CORE_USER ).receiveCapabilities( {
[ PERMISSION_MANAGE_OPTIONS ]: true,
} );
dispatch( CORE_MODULES ).receiveGetModules( [
{
slug: 'pagespeed-insights',
active: false,
connected: false,
},
] );
};
return (
<WithTestRegistry callback={ setupRegistry }>
<DashboardPageSpeedWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
;
| 1 | 38,278 | All stories in this file also need the default padding. | google-site-kit-wp | js |
@@ -26,13 +26,14 @@ using Microsoft.AspNetCore.TestHost;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using OpenTelemetry.Metrics;
+using OpenTelemetry.Tests;
using Xunit;
namespace OpenTelemetry.Exporter.Prometheus.Tests
{
public sealed class PrometheusExporterMiddlewareTests
{
- private const string MeterName = "PrometheusExporterMiddlewareTests.Meter";
+ private static readonly string MeterName = Utils.GetCurrentMethodName();
[Fact]
public async Task PrometheusExporterMiddlewareIntegration() | 1 | // <copyright file="PrometheusExporterMiddlewareTests.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
#if !NET461
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
using System.Net;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.TestHost;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using OpenTelemetry.Metrics;
using Xunit;
namespace OpenTelemetry.Exporter.Prometheus.Tests
{
public sealed class PrometheusExporterMiddlewareTests
{
private const string MeterName = "PrometheusExporterMiddlewareTests.Meter";
[Fact]
public async Task PrometheusExporterMiddlewareIntegration()
{
var host = await new HostBuilder()
.ConfigureWebHost(webBuilder => webBuilder
.UseTestServer()
.UseStartup<Startup>())
.StartAsync();
var tags = new KeyValuePair<string, object>[]
{
new KeyValuePair<string, object>("key1", "value1"),
new KeyValuePair<string, object>("key2", "value2"),
};
using var meter = new Meter(MeterName, "0.0.1");
var counter = meter.CreateCounter<double>("counter_double");
counter.Add(100.18D, tags);
counter.Add(0.99D, tags);
using var response = await host.GetTestClient().GetAsync("/metrics").ConfigureAwait(false);
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
string content = await response.Content.ReadAsStringAsync().ConfigureAwait(false);
Assert.Equal(
$"# TYPE counter_double counter\ncounter_double{{key1=\"value1\",key2=\"value2\"}} 101.17 1633041000000\n",
content);
await host.StopAsync().ConfigureAwait(false);
}
public class Startup
{
public void ConfigureServices(IServiceCollection services)
{
services.AddOpenTelemetryMetrics(builder => builder
.AddMeter(MeterName)
.AddPrometheusExporter(o =>
{
o.GetUtcNowDateTimeOffset = () => new DateTimeOffset(2021, 9, 30, 22, 30, 0, TimeSpan.Zero);
if (o.StartHttpListener)
{
throw new InvalidOperationException("StartHttpListener should be false on .NET Core 3.1+.");
}
}));
}
public void Configure(IApplicationBuilder app)
{
app.UseOpenTelemetryPrometheusScrapingEndpoint();
}
}
}
}
#endif
| 1 | 22,295 | Curious - what would be the actual value? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -168,7 +168,8 @@ def main():
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
- CLASSES=datasets[0].CLASSES)
+ CLASSES=datasets[0].CLASSES,
+ PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector( | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 1 | 26,763 | Is it necessary to record the palette in the checkpoint? Any reason? | open-mmlab-mmdetection | py |
@@ -3,9 +3,10 @@ import listenbrainz.webserver
import pika
import time
import threading
+import socketio
from flask import current_app
-
+from listenbrainz.webserver.views.api_tools import LISTEN_TYPE_PLAYING_NOW, LISTEN_TYPE_IMPORT
class FollowDispatcher(threading.Thread):
| 1 | import json
import listenbrainz.webserver
import pika
import time
import threading
from flask import current_app
class FollowDispatcher(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
self.app = app
def callback_listen(self, channel, method, properties, body):
x = json.loads(body)
current_app.logger.error("from callback listen: %s", json.dumps(x, indent=4))
channel.basic_ack(delivery_tag=method.delivery_tag)
def callback_playing_now(self, channel, method, properties, body):
x = json.loads(body)
current_app.logger.error("From callback playing now: %s", json.dumps(x, indent=4))
channel.basic_ack(delivery_tag=method.delivery_tag)
def create_and_bind_exchange_and_queue(self, channel, exchange, queue):
channel.exchange_declare(exchange=exchange, exchange_type='fanout')
channel.queue_declare(callback=lambda x: None, queue=queue, durable=True)
channel.queue_bind(callback=lambda x: None, exchange=exchange, queue=queue)
def on_open_callback(self, channel):
self.create_and_bind_exchange_and_queue(channel, current_app.config['UNIQUE_EXCHANGE'], current_app.config['FOLLOW_LIST_QUEUE'])
channel.basic_consume(self.callback_listen, queue=current_app.config['FOLLOW_LIST_QUEUE'])
self.create_and_bind_exchange_and_queue(channel, current_app.config['PLAYING_NOW_EXCHANGE'], current_app.config['PLAYING_NOW_QUEUE'])
channel.basic_consume(self.callback_playing_now, queue=current_app.config['PLAYING_NOW_QUEUE'])
def on_open(self, connection):
connection.channel(self.on_open_callback)
def init_rabbitmq_connection(self):
while True:
try:
credentials = pika.PlainCredentials(current_app.config['RABBITMQ_USERNAME'], current_app.config['RABBITMQ_PASSWORD'])
connection_parameters = pika.ConnectionParameters(
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
virtual_host=current_app.config['RABBITMQ_VHOST'],
credentials=credentials,
)
self.connection = pika.SelectConnection(parameters=connection_parameters, on_open_callback=self.on_open)
break
except Exception as e:
current_app.logger.error("Error while connecting to RabbitMQ: %s", str(e), exc_info=True)
time.sleep(3)
def run(self):
with self.app.app_context():
while True:
current_app.logger.info("Starting player writer...")
current_app.logger.info("Connecting to RabbitMQ...")
self.init_rabbitmq_connection()
current_app.logger.info("Done!")
current_app.logger.info("Started!")
try:
self.connection.ioloop.start()
except KeyboardInterrupt:
current_app.logger.error("Keyboard interrupt!")
break
except Exception as e:
current_app.logger.error("Error in PlayerWriter: %s", str(e), exc_info=True)
time.sleep(3)
| 1 | 15,162 | Not sure how to best do this without creating a new connection to the server everytime. Do this in another thread? | metabrainz-listenbrainz-server | py |
@@ -167,6 +167,13 @@ describe Ncr::WorkOrder do
user = create(:user, client_slug: 'ncr')
expect(wo.slug_matches?(user)).to eq(true)
end
+
+ it "identifies eligible observers" do
+ wo = create(:ba80_ncr_work_order)
+ user = create(:user, client_slug: 'ncr')
+ expect(wo.proposal.eligible_observers.to_a).to include(user)
+ expect(wo.proposal.eligible_observers.to_a).to_not include(wo.observers)
+ end
end
describe '#organization' do | 1 | describe Ncr::WorkOrder do
include ProposalSpecHelper
describe '#relevant_fields' do
it "shows BA61 fields" do
wo = Ncr::WorkOrder.new
expect(wo.relevant_fields.sort).to eq([
:amount,
:building_number,
:cl_number,
# No :code
:description,
:direct_pay,
:expense_type,
:function_code,
:not_to_exceed,
:org_code,
# No :rwa_number
:soc_code,
:vendor
])
end
it "shows BA80 fields" do
wo = Ncr::WorkOrder.new(expense_type: 'BA80')
expect(wo.relevant_fields.sort).to eq([
:amount,
:building_number,
:cl_number,
:code,
:description,
:direct_pay,
# No Emergency
:expense_type,
:function_code,
:not_to_exceed,
:org_code,
:rwa_number,
:soc_code,
:vendor
])
end
end
describe '#setup_approvals_and_observers' do
let (:ba61_tier_one_email) { Ncr::WorkOrder.ba61_tier1_budget_mailbox }
let (:ba61_tier_two_email) { Ncr::WorkOrder.ba61_tier2_budget_mailbox }
it "creates approvers when not an emergency" do
form = create(:ncr_work_order, expense_type: 'BA61')
form.setup_approvals_and_observers
expect(form.observations.length).to eq(0)
expect(form.approvers.map(&:email_address)).to eq([
form.approving_official_email,
ba61_tier_one_email,
ba61_tier_two_email
])
form.reload
expect(form.approved?).to eq(false)
end
it "reuses existing approvals" do
form = create(:ncr_work_order, expense_type: 'BA61')
form.setup_approvals_and_observers
first_approval = form.individual_approvals.first
form.reload.setup_approvals_and_observers
expect(form.individual_approvals.first).to eq(first_approval)
end
it "creates observers when in an emergency" do
form = create(:ncr_work_order, expense_type: 'BA61',
emergency: true)
form.setup_approvals_and_observers
expect(form.observers.map(&:email_address)).to match_array([
form.approving_official_email,
ba61_tier_one_email,
ba61_tier_two_email
].uniq)
expect(form.approvals.length).to eq(0)
form.clear_association_cache
expect(form.approved?).to eq(true)
end
it "accounts for approver transitions when nothing's approved" do
ba80_budget_email = Ncr::WorkOrder.ba80_budget_mailbox
wo = create(:ncr_work_order, approving_official_email: '[email protected]', expense_type: 'BA61')
wo.setup_approvals_and_observers
expect(wo.approvers.map(&:email_address)).to eq [
'[email protected]',
ba61_tier_one_email,
ba61_tier_two_email
]
wo.update(org_code: 'P1122021 (192X,192M) WHITE HOUSE DISTRICT')
wo.setup_approvals_and_observers
expect(wo.reload.approvers.map(&:email_address)).to eq [
'[email protected]',
ba61_tier_two_email
]
wo.approving_official_email = '[email protected]'
wo.setup_approvals_and_observers
expect(wo.reload.approvers.map(&:email_address)).to eq [
'[email protected]',
ba61_tier_two_email
]
wo.approving_official_email = '[email protected]'
wo.update(expense_type: 'BA80')
wo.setup_approvals_and_observers
expect(wo.reload.approvers.map(&:email_address)).to eq [
'[email protected]',
ba80_budget_email
]
end
it "unsets the approval status" do
ba80_budget_email = Ncr::WorkOrder.ba80_budget_mailbox
wo = create(:ba80_ncr_work_order)
wo.setup_approvals_and_observers
expect(wo.approvers.map(&:email_address)).to eq [
wo.approving_official_email,
ba80_budget_email
]
wo.individual_approvals.first.approve!
wo.individual_approvals.second.approve!
expect(wo.reload.approved?).to be true
wo.update(expense_type: 'BA61')
wo.setup_approvals_and_observers
expect(wo.reload.pending?).to be true
end
it "does not re-add observers on emergencies" do
wo = create(:ncr_work_order, expense_type: 'BA61', emergency: true)
wo.setup_approvals_and_observers
expect(wo.approvals).to be_empty
expect(wo.observers.count).to be 3
wo.setup_approvals_and_observers
wo.reload
expect(wo.approvals).to be_empty
expect(wo.observers.count).to be 3
end
it "handles the delegate then update scenario" do
wo = create(:ba80_ncr_work_order)
wo.setup_approvals_and_observers
delegate = create(:user)
wo.approvers.second.add_delegate(delegate)
wo.individual_approvals.second.update(user: delegate)
wo.individual_approvals.first.approve!
wo.individual_approvals.second.approve!
wo.setup_approvals_and_observers
wo.reload
expect(wo.approved?).to be true
expect(wo.approvers.second).to eq delegate
end
it "respects user with same client_slug" do
wo = create(:ba80_ncr_work_order)
user = create(:user, client_slug: 'ncr')
expect(wo.slug_matches?(user)).to eq(true)
end
end
describe '#organization' do
it "returns the corresponding Organization instance" do
org = Ncr::Organization.all.last
work_order = Ncr::WorkOrder.new(org_code: org.code)
expect(work_order.organization).to eq(org)
end
it "returns nil for no #org_code" do
work_order = Ncr::WorkOrder.new
expect(work_order.organization).to eq(nil)
end
end
describe '#system_approver_emails' do
let (:ba61_tier_one_email) { Ncr::WorkOrder.ba61_tier1_budget_mailbox }
let (:ba61_tier_two_email) { Ncr::WorkOrder.ba61_tier2_budget_mailbox }
context "for a BA61 request" do
it "skips the Tier 1 budget approver for WHSC" do
work_order = create(:ncr_work_order, expense_type: 'BA61', org_code: Ncr::Organization::WHSC_CODE)
expect(work_order.system_approver_emails).to eq([
ba61_tier_two_email
])
end
it "includes the Tier 1 budget approver for an unknown organization" do
work_order = create(:ncr_work_order, expense_type: 'BA61', org_code: nil)
expect(work_order.system_approver_emails).to eq([
ba61_tier_one_email,
ba61_tier_two_email
])
end
end
context "for a BA80 request" do
it "uses the general budget email" do
ba80_budget_email = Ncr::WorkOrder.ba80_budget_mailbox
work_order = create(:ba80_ncr_work_order)
expect(work_order.system_approver_emails).to eq([ba80_budget_email])
end
it "uses the OOL budget email for their org code" do
budget_email = Ncr::WorkOrder.ool_ba80_budget_mailbox
org_code = Ncr::Organization::OOL_CODES.first
work_order = create(:ba80_ncr_work_order, org_code: org_code)
expect(work_order.system_approver_emails).to eq([budget_email])
end
end
end
describe '#total_price' do
let (:work_order) { create(:ncr_work_order, amount: 45.36)}
it 'gets price from amount field' do
expect(work_order.total_price).to eq(45.36)
end
end
describe "#pubic_identifier" do
it "prepends proposal ID with 'FY' and fiscal year" do
work_order = build(:ncr_work_order)
proposal = work_order.proposal
fiscal_year = work_order.fiscal_year.to_s.rjust(2, "0")
expect(work_order.public_identifier).to eq "FY#{fiscal_year}-#{proposal.id}"
end
end
describe '#fiscal_year' do
it 'ends the fiscal year on September 30th' do
work_order = create(:ncr_work_order, created_at: Date.new(2014, 9, 30))
expect(work_order.fiscal_year).to eq 14
end
it 'starts a new fiscal year on October first' do
work_order = create(:ncr_work_order, created_at: Date.new(2014, 10, 1))
expect(work_order.fiscal_year).to eq 15
end
end
describe 'validations' do
describe 'cl_number' do
let (:work_order) { build(:ncr_work_order) }
it "works with a 'CL' prefix" do
work_order.cl_number = 'CL1234567'
expect(work_order).to be_valid
end
it "requires seven numbers" do
work_order.cl_number = '123'
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:cl_number])
end
end
describe 'function_code' do
let (:work_order) { build(:ncr_work_order) }
it "works with 'PG' followed by three characters" do
work_order.function_code = 'PG123'
expect(work_order).to be_valid
end
it "must have five characters" do
work_order.function_code = 'PG12'
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:function_code])
end
end
describe 'RWA' do
let (:work_order) { build(:ncr_work_order, expense_type: 'BA80') }
it 'works with one letter followed by 7 numbers' do
work_order.rwa_number = 'A1234567'
expect(work_order).to be_valid
end
it 'must be 8 chars' do
work_order.rwa_number = 'A123456'
expect(work_order).not_to be_valid
end
it 'must have a letter at the beginning' do
work_order.rwa_number = '12345678'
expect(work_order).not_to be_valid
end
it "is required for BA80" do
work_order.rwa_number = nil
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:rwa_number])
end
it "is not required for BA61" do
work_order.expense_type = 'BA61'
work_order.rwa_number = nil
expect(work_order).to be_valid
work_order.rwa_number = ''
expect(work_order).to be_valid
end
end
describe "soc_code" do
let (:work_order) { build(:ncr_work_order) }
it "works with three characters" do
work_order.soc_code = "123"
expect(work_order).to be_valid
end
it "must be three characters" do
work_order.soc_code = "12"
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:soc_code])
end
end
end
describe "#org_id" do
it "pulls out the organization id when present" do
wo = create(:ncr_work_order, org_code: 'P0000000 (192X,192M) PRIOR YEAR ACTIVITIES')
expect(wo.org_id).to eq("P0000000")
end
it "returns nil when no organization is present" do
wo = create(:ncr_work_order, org_code: nil)
expect(wo.org_id).to be_nil
end
end
describe "#building_id" do
it "pulls out the building id when an identifier is present" do
wo = build(:ncr_work_order, building_number: "AB1234CD then some more")
expect(wo.building_id).to eq("AB1234CD")
end
it "defaults to the whole building number" do
wo = build(:ncr_work_order, building_number: "Another String")
expect(wo.building_id).to eq("Another String")
end
it "allows nil" do
wo = build(:ncr_work_order, building_number: nil)
expect(wo.building_id).to be_nil
end
end
describe "#current_approver" do
it "returns the first pending approver" do
wo = create(:ncr_work_order, :with_approvers)
expect(wo.current_approver).to eq(wo.approvers.first)
wo.individual_approvals.first.approve!
expect(wo.current_approver).to eq(wo.approvers.second)
end
it "returns the first approver when fully approved" do
wo = create(:ncr_work_order, :with_approvers)
fully_approve(wo.proposal)
expect(wo.reload.current_approver).to eq(wo.approvers.first)
end
end
describe "#final_approver" do
it "returns the final approver" do
wo = create(:ncr_work_order, :with_approvers)
expect(wo.final_approver).to eq(wo.approvers.last)
wo.individual_approvals.first.approve!
expect(wo.final_approver).to eq(wo.approvers.last)
end
it "returns the last approver when fully approved" do
wo = create(:ncr_work_order, :with_approvers)
fully_approve(wo.proposal)
expect(wo.final_approver).to eq(wo.approvers.last)
end
end
end
| 1 | 15,332 | should we have a similar test for gsa18f procurements? | 18F-C2 | rb |
@@ -113,9 +113,9 @@ func TestProtocol_HandleTransfer(t *testing.T) {
GasLimit: testutil.TestGasLimit,
})
- sender, err := accountutil.AccountState(sm, v.caller.String())
+ sender, err := accountutil.AccountState(sm, v.caller)
require.NoError(err)
- recipient, err := accountutil.AccountState(sm, v.recipient)
+ recipient, err := accountutil.AccountStateByHash160(sm, v.recipient)
require.NoError(err)
gasFee := new(big.Int).Mul(v.gasPrice, new(big.Int).SetUint64(gas))
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package account
import (
"context"
"math/big"
"testing"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
"github.com/iotexproject/iotex-core/testutil/testdb"
)
func TestProtocol_ValidateTransfer(t *testing.T) {
require := require.New(t)
p := NewProtocol(rewarding.DepositGas)
t.Run("Oversized data", func(t *testing.T) {
tmpPayload := [32769]byte{}
payload := tmpPayload[:]
tsf, err := action.NewTransfer(uint64(1), big.NewInt(1), "2", payload, uint64(0), big.NewInt(0))
require.NoError(err)
require.Equal(action.ErrActPool, errors.Cause(p.Validate(context.Background(), tsf, nil)))
})
}
func TestProtocol_HandleTransfer(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
sm := testdb.NewMockStateManager(ctrl)
// set-up protocol and genesis states
p := NewProtocol(rewarding.DepositGas)
reward := rewarding.NewProtocol(0, 0)
registry := protocol.NewRegistry()
require.NoError(reward.Register(registry))
chainCtx := genesis.WithGenesisContext(
protocol.WithRegistry(context.Background(), registry),
config.Default.Genesis,
)
ctx := protocol.WithBlockCtx(chainCtx, protocol.BlockCtx{})
require.NoError(reward.CreateGenesisStates(ctx, sm))
// initial deposit to alfa and charlie (as a contract)
alfa := identityset.Address(28)
bravo := identityset.Address(29)
charlie := identityset.Address(30)
require.NoError(accountutil.StoreAccount(sm, alfa, &state.Account{
Balance: big.NewInt(50005),
}))
require.NoError(accountutil.StoreAccount(sm, charlie, &state.Account{
CodeHash: []byte("codeHash"),
}))
tests := []struct {
caller address.Address
nonce uint64
amount *big.Int
recipient string
gasLimit uint64
gasPrice *big.Int
isContract bool
err error
status uint64
contractLog uint64
}{
{
alfa, 1, big.NewInt(2), bravo.String(), 10000, big.NewInt(1), false, nil, uint64(iotextypes.ReceiptStatus_Success), 2,
},
// transfer to contract address only charges gas fee
{
alfa, 2, big.NewInt(20), charlie.String(), 10000, big.NewInt(1), true, nil, uint64(iotextypes.ReceiptStatus_Failure), 1,
},
// not enough balance
{
alfa, 3, big.NewInt(30000), bravo.String(), 10000, big.NewInt(1), false, state.ErrNotEnoughBalance, uint64(iotextypes.ReceiptStatus_Failure), 1,
},
}
for _, v := range tests {
tsf, err := action.NewTransfer(v.nonce, v.amount, v.recipient, []byte{}, v.gasLimit, v.gasPrice)
require.NoError(err)
gas, err := tsf.IntrinsicGas()
require.NoError(err)
ctx = protocol.WithActionCtx(chainCtx, protocol.ActionCtx{
Caller: v.caller,
IntrinsicGas: gas,
})
ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{
BlockHeight: 1,
Producer: identityset.Address(27),
GasLimit: testutil.TestGasLimit,
})
sender, err := accountutil.AccountState(sm, v.caller.String())
require.NoError(err)
recipient, err := accountutil.AccountState(sm, v.recipient)
require.NoError(err)
gasFee := new(big.Int).Mul(v.gasPrice, new(big.Int).SetUint64(gas))
receipt, err := p.Handle(ctx, tsf, sm)
require.Equal(v.err, errors.Cause(err))
if err != nil {
require.Nil(receipt)
// sender balance/nonce remains the same in case of error
newSender, err := accountutil.AccountState(sm, v.caller.String())
require.NoError(err)
require.Equal(sender.Balance, newSender.Balance)
require.Equal(sender.Nonce, newSender.Nonce)
continue
}
require.Equal(v.status, receipt.Status)
// amount is transferred only upon success and for non-contract recipient
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) && !v.isContract {
gasFee.Add(gasFee, v.amount)
// verify recipient
newRecipient, err := accountutil.AccountState(sm, v.recipient)
require.NoError(err)
recipient.AddBalance(v.amount)
require.Equal(recipient.Balance, newRecipient.Balance)
}
// verify sender balance/nonce
newSender, err := accountutil.AccountState(sm, v.caller.String())
require.NoError(err)
sender.SubBalance(gasFee)
require.Equal(sender.Balance, newSender.Balance)
require.Equal(v.nonce, newSender.Nonce)
// verify transaction log
tLog := block.ReceiptTransactionLog(receipt)
if tLog != nil {
require.NotNil(tLog)
pbLog := tLog.Proto()
require.EqualValues(v.contractLog, pbLog.NumTransactions)
// TODO: verify gas transaction log
if len(pbLog.Transactions) > 1 {
rec := pbLog.Transactions[0]
require.Equal(v.amount.String(), rec.Amount)
require.Equal(v.caller.String(), rec.Sender)
require.Equal(v.recipient, rec.Recipient)
require.Equal(iotextypes.TransactionLogType_NATIVE_TRANSFER, rec.Type)
}
}
}
}
| 1 | 23,689 | change `v.recipient` to address.Address, so can use `AccountState(v,recipient)` | iotexproject-iotex-core | go |
@@ -0,0 +1,6 @@
+class NcrDispatcher < LinearDispatcher
+
+ def requires_approval_notice? approval
+ approval.cart_approvals.approvable.order('position ASC').last == approval
+ end
+end | 1 | 1 | 12,371 | Open to doing away with this altogether and maybe injecting this logic somehow into `requires_approval_notice?` in LinearDispatcher. | 18F-C2 | rb |
|
@@ -26,6 +26,12 @@ namespace Nethermind.JsonRpc
[ConfigItem(Description = "Host for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC. If it does not work with 117.0.0.1 try something like 10.0.0.4 or 192.168.0.1", DefaultValue = "\"127.0.0.1\"")]
string Host { get; set; }
+ [ConfigItem(Description = "Defines whether the JSON RPC tracers will have timeout enabled. Default timeout is 5 seconds per tracer usage.", DefaultValue = "false")]
+ bool TracerTimeoutEnabled { get; set; }
+
+ [ConfigItem(Description = "JSON RPC tracers' timeout value given in seconds.", DefaultValue = "5")]
+ int TracerTimeout { get; set; }
+
[ConfigItem(Description = "Base file path for diagnostic JSON RPC recorder.", DefaultValue = "\"logs/rpc.log_1.txt\"")]
string RpcRecorderBaseFilePath { get; set; }
| 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Config;
namespace Nethermind.JsonRpc
{
public interface IJsonRpcConfig : IConfig
{
[ConfigItem(Description = "Defines whether the JSON RPC service is enabled on node startup. Configure host nad port if default values do not work for you.", DefaultValue = "false")]
bool Enabled { get; set; }
[ConfigItem(Description = "Host for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC. If it does not work with 117.0.0.1 try something like 10.0.0.4 or 192.168.0.1", DefaultValue = "\"127.0.0.1\"")]
string Host { get; set; }
[ConfigItem(Description = "Base file path for diagnostic JSON RPC recorder.", DefaultValue = "\"logs/rpc.log_1.txt\"")]
string RpcRecorderBaseFilePath { get; set; }
[ConfigItem(Description = "Defines whether the JSON RPC diagnostic recording is enabled on node startup. Do not enable unless you are a DEV diagnosing issues with JSON RPC.", DefaultValue = "false")]
bool RpcRecorderEnabled { get; set; }
[ConfigItem(Description = "Port number for JSON RPC calls. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = "8545")]
int Port { get; set; }
[ConfigItem(Description = "Port number for JSON RPC web sockets calls. By default same port is used as regular JSON RPC. Ensure the firewall is configured when enabling JSON RPC.", DefaultValue = "8545")]
int WebSocketsPort { get; set; }
[ConfigItem(Description = "Defines which RPC modules should be enabled.", DefaultValue = "all")]
string[] EnabledModules { get; set; }
[ConfigItem(Description = "Defines block depth when finding logs.", DefaultValue = "1000")]
int FindLogBlockDepthLimit { get; set; }
[ConfigItem(Description = "Gas limit for eth_call and eth_estimateGas", DefaultValue = "100000000")]
long? GasCap { get; set; }
[ConfigItem(Description = "Interval between the JSON RPC stats report log", DefaultValue = "300")]
public int ReportIntervalSeconds { get; set; }
[ConfigItem(Description = "Buffer responses before sending them to client. This allows to set Content-Length in response instead of using Transfer-Encoding: chunked. This may degrade performance on big responses.", DefaultValue = "false")]
public bool BufferResponses { get; set; }
}
}
| 1 | 24,195 | remove this and always enable tracer, just set the default timeout to something higher (20 seconds) | NethermindEth-nethermind | .cs |
@@ -39,6 +39,7 @@ public class ScriptDTO {
private final Boolean free;
private final Boolean requiresPatch;
private final String script;
+ private final URI icon;
private ScriptDTO(Builder builder) {
this.scriptName = builder.scriptName; | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.repository.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.phoenicis.entities.OperatingSystem;
import java.net.URI;
import java.util.Comparator;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonDeserialize(builder = ScriptDTO.Builder.class)
public class ScriptDTO {
private final String scriptName;
private final URI scriptSource;
private final List<OperatingSystem> compatibleOperatingSystems;
private final List<OperatingSystem> testingOperatingSystems;
private final Boolean free;
private final Boolean requiresPatch;
private final String script;
private ScriptDTO(Builder builder) {
this.scriptName = builder.scriptName;
this.scriptSource = builder.scriptSource;
this.compatibleOperatingSystems = builder.compatibleOperatingSystems;
this.testingOperatingSystems = builder.testingOperatingSystems;
this.free = builder.free;
this.requiresPatch = builder.requiresPatch;
this.script = builder.script;
}
public String getScriptName() {
return scriptName;
}
public URI getScriptSource() {
return scriptSource;
}
public List<OperatingSystem> getCompatibleOperatingSystems() {
return compatibleOperatingSystems;
}
public Boolean isFree() {
return free;
}
public Boolean isRequiresPatch() {
return requiresPatch;
}
public List<OperatingSystem> getTestingOperatingSystems() {
return testingOperatingSystems;
}
public String getScript() {
return script;
}
public static Comparator<ScriptDTO> nameComparator() {
return (o1, o2) -> o1.getScriptName().compareToIgnoreCase(o2.getScriptName());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ScriptDTO scriptDTO = (ScriptDTO) o;
return new EqualsBuilder()
.append(scriptName, scriptDTO.scriptName)
.append(scriptSource, scriptDTO.scriptSource)
.append(compatibleOperatingSystems, scriptDTO.compatibleOperatingSystems)
.append(testingOperatingSystems, scriptDTO.testingOperatingSystems)
.append(free, scriptDTO.free)
.append(requiresPatch, scriptDTO.requiresPatch)
.append(script, scriptDTO.script)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(scriptName)
.append(scriptSource)
.append(compatibleOperatingSystems)
.append(testingOperatingSystems)
.append(free).append(requiresPatch)
.append(script)
.toHashCode();
}
@JsonPOJOBuilder(buildMethodName = "build", withPrefix = "with")
public static class Builder {
private String scriptName;
private URI scriptSource;
private List<OperatingSystem> compatibleOperatingSystems;
private List<OperatingSystem> testingOperatingSystems;
private Boolean free;
private Boolean requiresPatch;
private String script;
public Builder() {
// Default constructor
}
public Builder(ScriptDTO scriptDTO) {
this.withScriptName(scriptDTO.getScriptName()).withScript(scriptDTO.getScript())
.withCompatibleOperatingSystems(scriptDTO.getCompatibleOperatingSystems())
.withTestingOperatingSystems(scriptDTO.getTestingOperatingSystems()).withFree(scriptDTO.isFree())
.withRequiresPatch(scriptDTO.requiresPatch);
}
public Builder withScriptName(String name) {
this.scriptName = name;
return this;
}
public Builder withScript(String script) {
this.script = script;
return this;
}
public Builder withScriptSource(URI scriptSource) {
this.scriptSource = scriptSource;
return this;
}
public Builder withCompatibleOperatingSystems(List<OperatingSystem> compatibleOperatingSystems) {
this.compatibleOperatingSystems = compatibleOperatingSystems;
return this;
}
public Builder withTestingOperatingSystems(List<OperatingSystem> testingOperatingSystems) {
this.testingOperatingSystems = testingOperatingSystems;
return this;
}
public Builder withFree(Boolean free) {
this.free = free;
return this;
}
public Builder withRequiresPatch(Boolean requiresPatch) {
this.requiresPatch = requiresPatch;
return this;
}
public ScriptDTO build() {
return new ScriptDTO(this);
}
}
}
| 1 | 11,455 | Can we give this field a more descriptive name? When I use a variable named `icon` I normally expect an `Image` object. | PhoenicisOrg-phoenicis | java |
@@ -333,6 +333,13 @@ var opts struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true"`
} `command:"roots" description:"Show build labels with no dependents in the given list, from the list."`
+ Filter struct {
+ IncludeLabels []string `long:"in" description:"Include any targets with matching labels"`
+ ExcludeLabels []string `long:"ex" description:"Exclude any targets with matching labels"`
+ Args struct {
+ Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to filter"`
+ } `positional-args:"true"`
+ } `command:"filter" description:"Filter the given set of targets according to some rules"`
} `command:"query" description:"Queries information about the build graph"`
}
| 1 | package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"path"
"runtime"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/jessevdk/go-flags"
"gopkg.in/op/go-logging.v1"
"build"
"cache"
"clean"
"cli"
"core"
"export"
"follow"
"fs"
"gc"
"hashes"
"help"
"metrics"
"output"
"parse"
"query"
"run"
"sync"
"test"
"tool"
"update"
"utils"
"watch"
)
var log = logging.MustGetLogger("plz")
var config *core.Configuration
var opts struct {
Usage string `usage:"Please is a high-performance multi-language build system.\n\nIt uses BUILD files to describe what to build and how to build it.\nSee https://please.build for more information about how it works and what Please can do for you."`
BuildFlags struct {
Config string `short:"c" long:"config" description:"Build config to use. Defaults to opt."`
Arch cli.Arch `short:"a" long:"arch" description:"Architecture to compile for."`
RepoRoot cli.Filepath `short:"r" long:"repo_root" description:"Root of repository to build."`
KeepGoing bool `short:"k" long:"keep_going" description:"Don't stop on first failed target."`
NumThreads int `short:"n" long:"num_threads" description:"Number of concurrent build operations. Default is number of CPUs + 2."`
Include []string `short:"i" long:"include" description:"Label of targets to include in automatic detection."`
Exclude []string `short:"e" long:"exclude" description:"Label of targets to exclude from automatic detection."`
Option ConfigOverrides `short:"o" long:"override" env:"PLZ_OVERRIDES" env-delim:";" description:"Options to override from .plzconfig (e.g. -o please.selfupdate:false)"`
Profile string `long:"profile" env:"PLZ_CONFIG_PROFILE" description:"Configuration profile to load; e.g. --profile=dev will load .plzconfig.dev if it exists."`
} `group:"Options controlling what to build & how to build it"`
OutputFlags struct {
Verbosity cli.Verbosity `short:"v" long:"verbosity" description:"Verbosity of output (error, warning, notice, info, debug)" default:"warning"`
LogFile cli.Filepath `long:"log_file" description:"File to echo full logging output to" default:"plz-out/log/build.log"`
LogFileLevel cli.Verbosity `long:"log_file_level" description:"Log level for file output" default:"debug"`
InteractiveOutput bool `long:"interactive_output" description:"Show interactive output ina terminal"`
PlainOutput bool `short:"p" long:"plain_output" description:"Don't show interactive output."`
Colour bool `long:"colour" description:"Forces coloured output from logging & other shell output."`
NoColour bool `long:"nocolour" description:"Forces colourless output from logging & other shell output."`
TraceFile cli.Filepath `long:"trace_file" description:"File to write Chrome tracing output into"`
ShowAllOutput bool `long:"show_all_output" description:"Show all output live from all commands. Implies --plain_output."`
CompletionScript bool `long:"completion_script" description:"Prints the bash / zsh completion script to stdout"`
} `group:"Options controlling output & logging"`
FeatureFlags struct {
NoUpdate bool `long:"noupdate" description:"Disable Please attempting to auto-update itself."`
NoCache bool `long:"nocache" description:"Disable caches (NB. not incrementality)"`
NoHashVerification bool `long:"nohash_verification" description:"Hash verification errors are nonfatal."`
NoLock bool `long:"nolock" description:"Don't attempt to lock the repo exclusively. Use with care."`
KeepWorkdirs bool `long:"keep_workdirs" description:"Don't clean directories in plz-out/tmp after successfully building targets."`
} `group:"Options that enable / disable certain features"`
HelpFlags struct {
Help bool `short:"h" long:"help" description:"Show this help message"`
Version bool `long:"version" description:"Print the version of Please"`
} `group:"Help Options"`
Profile string `long:"profile_file" hidden:"true" description:"Write profiling output to this file"`
MemProfile string `long:"mem_profile_file" hidden:"true" description:"Write a memory profile to this file"`
ProfilePort int `long:"profile_port" hidden:"true" description:"Serve profiling info on this port."`
ParsePackageOnly bool `description:"Parses a single package only. All that's necessary for some commands." no-flag:"true"`
Complete string `long:"complete" hidden:"true" env:"PLZ_COMPLETE" description:"Provide completion options for this build target."`
VisibilityParse bool `description:"Parse all targets that the original targets are visible to. Used for some query steps." no-flag:"true"`
Build struct {
Prepare bool `long:"prepare" description:"Prepare build directory for these targets but don't build them."`
Shell bool `long:"shell" description:"Like --prepare, but opens a shell in the build directory with the appropriate environment variables."`
ShowStatus bool `long:"show_status" hidden:"true" description:"Show status of each target in output after build"`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"build" description:"Builds one or more targets"`
Rebuild struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to rebuild"`
} `positional-args:"true" required:"true"`
} `command:"rebuild" description:"Forces a rebuild of one or more targets"`
Hash struct {
Detailed bool `long:"detailed" description:"Produces a detailed breakdown of the hash"`
Update bool `short:"u" long:"update" description:"Rewrites the hashes in the BUILD file to the new values"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"hash" description:"Calculates hash for one or more targets"`
Test struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NumRuns int `long:"num_runs" short:"n" default:"1" description:"Number of times to run each test target."`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
SurefireDir cli.Filepath `long:"surefire_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
Detailed bool `long:"detailed" description:"Prints more detailed output after tests."`
Shell bool `long:"shell" description:"Opens a shell in the test directory with the appropriate environment variables."`
// Slightly awkward since we can specify a single test with arguments or multiple test targets.
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors"`
} `positional-args:"true"`
} `command:"test" description:"Builds and tests one or more targets"`
Cover struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NoCoverageReport bool `long:"nocoverage_report" description:"Suppress the per-file coverage report displayed in the shell"`
LineCoverageReport bool `short:"l" long:"line_coverage_report" description:" Show a line-by-line coverage report for all affected files."`
NumRuns int `short:"n" long:"num_runs" default:"1" description:"Number of times to run each test target."`
IncludeAllFiles bool `short:"a" long:"include_all_files" description:"Include all dependent files in coverage (default is just those from relevant packages)"`
IncludeFile []string `long:"include_file" description:"Filenames to filter coverage display to"`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
SurefireDir cli.Filepath `long:"surefire_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."`
CoverageResultsFile cli.Filepath `long:"coverage_results_file" default:"plz-out/log/coverage.json" description:"File to write combined coverage results to."`
CoverageXMLReport cli.Filepath `long:"coverage_xml_report" default:"plz-out/log/coverage.xml" description:"XML File to write combined coverage results to."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
Detailed bool `long:"detailed" description:"Prints more detailed output after tests."`
Shell bool `long:"shell" description:"Opens a shell in the test directory with the appropriate environment variables."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test" group:"one test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors" group:"one test"`
} `positional-args:"true"`
} `command:"cover" description:"Builds and tests one or more targets, and calculates coverage."`
Run struct {
Env bool `long:"env" description:"Overrides environment variables (e.g. PATH) in the new process."`
Parallel struct {
NumTasks int `short:"n" long:"num_tasks" default:"10" description:"Maximum number of subtasks to run in parallel"`
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
} `command:"parallel" description:"Runs a sequence of targets in parallel"`
Sequential struct {
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args []string `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
} `command:"sequential" description:"Runs a sequence of targets sequentially."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" required:"true" description:"Target to run"`
Args []string `positional-arg-name:"arguments" description:"Arguments to pass to target when running (to pass flags to the target, put -- before them)"`
} `positional-args:"true"`
} `command:"run" subcommands-optional:"true" description:"Builds and runs a single target"`
Clean struct {
NoBackground bool `long:"nobackground" short:"f" description:"Don't fork & detach until clean is finished."`
Remote bool `long:"remote" description:"Clean entire remote cache when no targets are given (default is local only)"`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to clean (default is to clean everything)"`
} `positional-args:"true"`
} `command:"clean" description:"Cleans build artifacts" subcommands-optional:"true"`
Watch struct {
Run bool `short:"r" long:"run" description:"Runs the specified targets when they change (default is to build or test as appropriate)."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to watch the sources of for changes"`
} `positional-args:"true" required:"true"`
} `command:"watch" description:"Watches sources of targets for changes and rebuilds them"`
Update struct {
Force bool `long:"force" description:"Forces a re-download of the new version."`
NoVerify bool `long:"noverify" description:"Skips signature verification of downloaded version"`
Latest bool `long:"latest" description:"Update to latest available version (overrides config)."`
Version cli.Version `long:"version" description:"Updates to a particular version (overrides config)."`
} `command:"update" description:"Checks for an update and updates if needed."`
Op struct {
} `command:"op" description:"Re-runs previous command."`
Init struct {
Dir cli.Filepath `long:"dir" description:"Directory to create config in" default:"."`
BazelCompatibility bool `long:"bazel_compat" description:"Initialises config for Bazel compatibility mode."`
} `command:"init" description:"Initialises a .plzconfig file in the current directory"`
Gc struct {
Conservative bool `short:"c" long:"conservative" description:"Runs a more conservative / safer GC."`
TargetsOnly bool `short:"t" long:"targets_only" description:"Only print the targets to delete"`
SrcsOnly bool `short:"s" long:"srcs_only" description:"Only print the source files to delete"`
NoPrompt bool `short:"y" long:"no_prompt" description:"Remove targets without prompting"`
DryRun bool `short:"n" long:"dry_run" description:"Don't remove any targets or files, just print what would be done"`
Git bool `short:"g" long:"git" description:"Use 'git rm' to remove unused files instead of just 'rm'."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to limit gc to."`
} `positional-args:"true"`
} `command:"gc" description:"Analyzes the repo to determine unneeded targets."`
Export struct {
Output string `short:"o" long:"output" required:"true" description:"Directory to export into"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."`
} `positional-args:"true"`
Outputs struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."`
} `positional-args:"true"`
} `command:"outputs" description:"Exports outputs of a set of targets"`
} `command:"export" subcommands-optional:"true" description:"Exports a set of targets and files from the repo."`
Follow struct {
Retries int `long:"retries" description:"Number of times to retry the connection"`
Delay cli.Duration `long:"delay" default:"1s" description:"Delay between timeouts"`
Args struct {
URL cli.URL `positional-arg-name:"URL" required:"yes" description:"URL of remote server to connect to, e.g. 10.23.0.5:7777"`
} `positional-args:"true" required:"yes"`
} `command:"follow" description:"Connects to a remote Please instance to stream build events from."`
Help struct {
Args struct {
Topic help.Topic `positional-arg-name:"topic" description:"Topic to display help on"`
} `positional-args:"true"`
} `command:"help" alias:"halp" description:"Displays help about various parts of plz or its build rules"`
Tool struct {
Args struct {
Tool tool.Tool `positional-arg-name:"tool" description:"Tool to invoke (jarcat, lint, etc)"`
Args []string `positional-arg-name:"arguments" description:"Arguments to pass to the tool"`
} `positional-args:"true"`
} `command:"tool" hidden:"true" description:"Invoke one of Please's sub-tools"`
Query struct {
Deps struct {
Unique bool `long:"unique" short:"u" description:"Only output each dependency once"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"deps" description:"Queries the dependencies of a target."`
ReverseDeps struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"reverseDeps" alias:"revdeps" description:"Queries all the reverse dependencies of a target."`
SomePath struct {
Args struct {
Target1 core.BuildLabel `positional-arg-name:"target1" description:"First build target" required:"true"`
Target2 core.BuildLabel `positional-arg-name:"target2" description:"Second build target" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"somepath" description:"Queries for a path between two targets"`
AllTargets struct {
Hidden bool `long:"hidden" description:"Show hidden targets as well"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query"`
} `positional-args:"true"`
} `command:"alltargets" description:"Lists all targets in the graph"`
Print struct {
Fields []string `short:"f" long:"field" description:"Individual fields to print of the target"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to print" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"print" description:"Prints a representation of a single target"`
Completions struct {
Cmd string `long:"cmd" description:"Command to complete for" default:"build"`
Args struct {
Fragments cli.StdinStrings `positional-arg-name:"fragment" description:"Initial fragment to attempt to complete"`
} `positional-args:"true"`
} `command:"completions" subcommands-optional:"true" description:"Prints possible completions for a string."`
AffectedTargets struct {
Tests bool `long:"tests" description:"Shows only affected tests, no other targets."`
Intransitive bool `long:"intransitive" description:"Shows only immediately affected targets, not transitive dependencies."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query affected tests for"`
} `positional-args:"true"`
} `command:"affectedtargets" description:"Prints any targets affected by a set of files."`
Input struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display inputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"input" alias:"inputs" description:"Prints all transitive inputs of a target."`
Output struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display outputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"output" alias:"outputs" description:"Prints all outputs of a target."`
Graph struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to render graph for"`
} `positional-args:"true"`
} `command:"graph" description:"Prints a JSON representation of the build graph."`
WhatOutputs struct {
EchoFiles bool `long:"echo_files" description:"Echo the file for which the printed output is responsible."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query targets responsible for"`
} `positional-args:"true"`
} `command:"whatoutputs" description:"Prints out target(s) responsible for outputting provided file(s)"`
Rules struct {
Args struct {
Targets []core.BuildLabel `position-arg-name:"targets" description:"Additional targets to load rules from"`
} `positional-args:"true"`
} `command:"rules" description:"Prints built-in rules to stdout as JSON"`
Changes struct {
Since string `short:"s" long:"since" default:"origin/master" description:"Revision to compare against"`
CheckoutCommand string `long:"checkout_command" default:"git checkout %s" description:"Command to run to check out the before/after revisions."`
CurrentCommand string `long:"current_revision_command" default:"git rev-parse --abbrev-ref HEAD" description:"Command to run to get the current revision (which will be checked out again at the end)"`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" description:"Files to consider changed"`
} `positional-args:"true"`
} `command:"changes" description:"Calculates the difference between two different states of the build graph"`
Roots struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true"`
} `command:"roots" description:"Show build labels with no dependents in the given list, from the list."`
} `command:"query" description:"Queries information about the build graph"`
}
// Definitions of what we do for each command.
// Functions are called after args are parsed and return true for success.
var buildFunctions = map[string]func() bool{
"build": func() bool {
success, _ := runBuild(opts.Build.Args.Targets, true, false)
return success
},
"rebuild": func() bool {
// It would be more pure to require --nocache for this, but in basically any context that
// you use 'plz rebuild', you don't want the cache coming in and mucking things up.
// 'plz clean' followed by 'plz build' would still work in those cases, anyway.
opts.FeatureFlags.NoCache = true
success, _ := runBuild(opts.Rebuild.Args.Targets, true, false)
return success
},
"hash": func() bool {
success, state := runBuild(opts.Hash.Args.Targets, true, false)
if opts.Hash.Detailed {
for _, target := range state.ExpandOriginalTargets() {
build.PrintHashes(state, state.Graph.TargetOrDie(target))
}
}
if opts.Hash.Update {
hashes.RewriteHashes(state, state.ExpandOriginalTargets())
}
return success
},
"test": func() bool {
targets := testTargets(opts.Test.Args.Target, opts.Test.Args.Args, opts.Test.Failed, opts.Test.TestResultsFile)
success, _ := doTest(targets, opts.Test.SurefireDir, opts.Test.TestResultsFile)
return success || opts.Test.FailingTestsOk
},
"cover": func() bool {
if opts.BuildFlags.Config != "" {
log.Warning("Build config overridden; coverage may not be available for some languages")
} else {
opts.BuildFlags.Config = "cover"
}
targets := testTargets(opts.Cover.Args.Target, opts.Cover.Args.Args, opts.Cover.Failed, opts.Cover.TestResultsFile)
os.RemoveAll(string(opts.Cover.CoverageResultsFile))
success, state := doTest(targets, opts.Cover.SurefireDir, opts.Cover.TestResultsFile)
test.AddOriginalTargetsToCoverage(state, opts.Cover.IncludeAllFiles)
test.RemoveFilesFromCoverage(state.Coverage, state.Config.Cover.ExcludeExtension)
test.WriteCoverageToFileOrDie(state.Coverage, string(opts.Cover.CoverageResultsFile))
test.WriteXMLCoverageToFileOrDie(targets, state.Coverage, string(opts.Cover.CoverageXMLReport))
if opts.Cover.LineCoverageReport {
output.PrintLineCoverageReport(state, opts.Cover.IncludeFile)
} else if !opts.Cover.NoCoverageReport {
output.PrintCoverage(state, opts.Cover.IncludeFile)
}
return success || opts.Cover.FailingTestsOk
},
"run": func() bool {
if success, state := runBuild([]core.BuildLabel{opts.Run.Args.Target}, true, false); success {
run.Run(state, opts.Run.Args.Target, opts.Run.Args.Args, opts.Run.Env)
}
return false // We should never return from run.Run so if we make it here something's wrong.
},
"parallel": func() bool {
if success, state := runBuild(opts.Run.Parallel.PositionalArgs.Targets, true, false); success {
os.Exit(run.Parallel(state, state.ExpandOriginalTargets(), opts.Run.Parallel.Args, opts.Run.Parallel.NumTasks, opts.Run.Parallel.Quiet, opts.Run.Env))
}
return false
},
"sequential": func() bool {
if success, state := runBuild(opts.Run.Sequential.PositionalArgs.Targets, true, false); success {
os.Exit(run.Sequential(state, state.ExpandOriginalTargets(), opts.Run.Sequential.Args, opts.Run.Sequential.Quiet, opts.Run.Env))
}
return false
},
"clean": func() bool {
config.Cache.DirClean = false
if len(opts.Clean.Args.Targets) == 0 {
if len(opts.BuildFlags.Include) == 0 && len(opts.BuildFlags.Exclude) == 0 {
// Clean everything, doesn't require parsing at all.
if !opts.Clean.Remote {
// Don't construct the remote caches if they didn't pass --remote.
config.Cache.RPCURL = ""
config.Cache.HTTPURL = ""
}
clean.Clean(config, newCache(config), !opts.Clean.NoBackground)
return true
}
opts.Clean.Args.Targets = core.WholeGraph
}
if success, state := runBuild(opts.Clean.Args.Targets, false, false); success {
clean.Targets(state, state.ExpandOriginalTargets(), !opts.FeatureFlags.NoCache)
return true
}
return false
},
"watch": func() bool {
success, state := runBuild(opts.Watch.Args.Targets, false, false)
if success {
watch.Watch(state, state.ExpandOriginalTargets(), opts.Watch.Run)
}
return success
},
"update": func() bool {
fmt.Printf("Up to date (version %s).\n", core.PleaseVersion)
return true // We'd have died already if something was wrong.
},
"op": func() bool {
cmd := core.ReadLastOperationOrDie()
log.Notice("OP PLZ: %s", strings.Join(cmd, " "))
// Annoyingly we don't seem to have any access to execvp() which would be rather useful here...
executable, err := os.Executable()
if err == nil {
err = syscall.Exec(executable, append([]string{executable}, cmd...), os.Environ())
}
log.Fatalf("SORRY OP: %s", err) // On success Exec never returns.
return false
},
"gc": func() bool {
success, state := runBuild(core.WholeGraph, false, false)
if success {
state.OriginalTargets = state.Config.Gc.Keep
gc.GarbageCollect(state, opts.Gc.Args.Targets, state.ExpandOriginalTargets(), state.Config.Gc.Keep, state.Config.Gc.KeepLabel,
opts.Gc.Conservative, opts.Gc.TargetsOnly, opts.Gc.SrcsOnly, opts.Gc.NoPrompt, opts.Gc.DryRun, opts.Gc.Git)
}
return success
},
"init": func() bool {
utils.InitConfig(string(opts.Init.Dir), opts.Init.BazelCompatibility)
return true
},
"export": func() bool {
success, state := runBuild(opts.Export.Args.Targets, false, false)
if success {
export.ToDir(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return success
},
"follow": func() bool {
// This is only temporary, ConnectClient will alter it to match the server.
state := core.NewBuildState(1, nil, int(opts.OutputFlags.Verbosity), config)
return follow.ConnectClient(state, opts.Follow.Args.URL.String(), opts.Follow.Retries, time.Duration(opts.Follow.Delay))
},
"outputs": func() bool {
success, state := runBuild(opts.Export.Outputs.Args.Targets, true, false)
if success {
export.Outputs(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return success
},
"help": func() bool {
return help.Help(string(opts.Help.Args.Topic))
},
"tool": func() bool {
tool.Run(config, opts.Tool.Args.Tool, opts.Tool.Args.Args)
return false // If the function returns (which it shouldn't), something went wrong.
},
"deps": func() bool {
return runQuery(true, opts.Query.Deps.Args.Targets, func(state *core.BuildState) {
query.Deps(state, state.ExpandOriginalTargets(), opts.Query.Deps.Unique)
})
},
"reverseDeps": func() bool {
opts.VisibilityParse = true
return runQuery(false, opts.Query.ReverseDeps.Args.Targets, func(state *core.BuildState) {
query.ReverseDeps(state, state.ExpandOriginalTargets())
})
},
"somepath": func() bool {
return runQuery(true,
[]core.BuildLabel{opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2},
func(state *core.BuildState) {
query.SomePath(state.Graph, opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2)
},
)
},
"alltargets": func() bool {
return runQuery(true, opts.Query.AllTargets.Args.Targets, func(state *core.BuildState) {
query.AllTargets(state.Graph, state.ExpandOriginalTargets(), opts.Query.AllTargets.Hidden)
})
},
"print": func() bool {
return runQuery(false, opts.Query.Print.Args.Targets, func(state *core.BuildState) {
query.Print(state.Graph, state.ExpandOriginalTargets(), opts.Query.Print.Fields)
})
},
"affectedtargets": func() bool {
files := opts.Query.AffectedTargets.Args.Files
targets := core.WholeGraph
if opts.Query.AffectedTargets.Intransitive {
state := core.NewBuildState(1, nil, 1, config)
targets = core.FindOwningPackages(state, files)
}
return runQuery(true, targets, func(state *core.BuildState) {
query.AffectedTargets(state, files.Get(), opts.BuildFlags.Include, opts.BuildFlags.Exclude, opts.Query.AffectedTargets.Tests, !opts.Query.AffectedTargets.Intransitive)
})
},
"input": func() bool {
return runQuery(true, opts.Query.Input.Args.Targets, func(state *core.BuildState) {
query.TargetInputs(state.Graph, state.ExpandOriginalTargets())
})
},
"output": func() bool {
return runQuery(true, opts.Query.Output.Args.Targets, func(state *core.BuildState) {
query.TargetOutputs(state.Graph, state.ExpandOriginalTargets())
})
},
"completions": func() bool {
// Somewhat fiddly because the inputs are not necessarily well-formed at this point.
opts.ParsePackageOnly = true
fragments := opts.Query.Completions.Args.Fragments.Get()
if opts.Query.Completions.Cmd == "help" {
// Special-case completing help topics rather than build targets.
if len(fragments) == 0 {
help.Topics("")
} else {
help.Topics(fragments[0])
}
return true
}
if len(fragments) == 0 || len(fragments) == 1 && strings.Trim(fragments[0], "/ ") == "" {
os.Exit(0) // Don't do anything for empty completion, it's normally too slow.
}
labels, parseLabels, hidden := query.CompletionLabels(config, fragments, core.RepoRoot)
if success, state := Please(parseLabels, config, false, false, false); success {
binary := opts.Query.Completions.Cmd == "run"
test := opts.Query.Completions.Cmd == "test" || opts.Query.Completions.Cmd == "cover"
query.Completions(state.Graph, labels, binary, test, hidden)
return true
}
return false
},
"graph": func() bool {
return runQuery(true, opts.Query.Graph.Args.Targets, func(state *core.BuildState) {
if len(opts.Query.Graph.Args.Targets) == 0 {
state.OriginalTargets = opts.Query.Graph.Args.Targets // It special-cases doing the full graph.
}
query.Graph(state, state.ExpandOriginalTargets())
})
},
"whatoutputs": func() bool {
return runQuery(true, core.WholeGraph, func(state *core.BuildState) {
query.WhatOutputs(state.Graph, opts.Query.WhatOutputs.Args.Files.Get(), opts.Query.WhatOutputs.EchoFiles)
})
},
"rules": func() bool {
targets := opts.Query.Rules.Args.Targets
success, state := Please(opts.Query.Rules.Args.Targets, config, true, true, false)
if !success {
return false
}
targets = state.ExpandOriginalTargets()
parse.PrintRuleArgs(state, targets)
return true
},
"changes": func() bool {
// Temporarily set this flag on to avoid fatal errors from the first parse.
keepGoing := opts.BuildFlags.KeepGoing
opts.BuildFlags.KeepGoing = true
original := query.MustGetRevision(opts.Query.Changes.CurrentCommand)
files := opts.Query.Changes.Args.Files.Get()
query.MustCheckout(opts.Query.Changes.Since, opts.Query.Changes.CheckoutCommand)
_, before := runBuild(core.WholeGraph, false, false)
opts.BuildFlags.KeepGoing = keepGoing
// N.B. Ignore failure here; if we can't parse the graph before then it will suffice to
// assume that anything we don't know about has changed.
query.MustCheckout(original, opts.Query.Changes.CheckoutCommand)
success, after := runBuild(core.WholeGraph, false, false)
if !success {
return false
}
for _, target := range query.DiffGraphs(before, after, files) {
fmt.Printf("%s\n", target)
}
return true
},
"roots": func() bool {
return runQuery(true, opts.Query.Roots.Args.Targets, func(state *core.BuildState) {
query.Roots(state.Graph, opts.Query.Roots.Args.Targets)
})
},
}
// ConfigOverrides are used to implement completion on the -o flag.
type ConfigOverrides map[string]string
// Complete implements the flags.Completer interface.
func (overrides ConfigOverrides) Complete(match string) []flags.Completion {
return core.DefaultConfiguration().Completions(match)
}
// Used above as a convenience wrapper for query functions.
func runQuery(needFullParse bool, labels []core.BuildLabel, onSuccess func(state *core.BuildState)) bool {
if !needFullParse {
opts.ParsePackageOnly = true
}
if len(labels) == 0 {
labels = core.WholeGraph
}
if success, state := runBuild(labels, false, false); success {
onSuccess(state)
return true
}
return false
}
func please(tid int, state *core.BuildState, parsePackageOnly bool, include, exclude []string) {
for {
label, dependor, t := state.NextTask()
switch t {
case core.Stop, core.Kill:
return
case core.Parse, core.SubincludeParse:
t := t
label := label
dependor := dependor
state.ParsePool <- func() {
parse.Parse(tid, state, label, dependor, parsePackageOnly, include, exclude, t == core.SubincludeParse)
if opts.VisibilityParse && state.IsOriginalTarget(label) {
parseForVisibleTargets(state, label)
}
state.TaskDone(false)
}
case core.Build, core.SubincludeBuild:
build.Build(tid, state, label)
state.TaskDone(true)
case core.Test:
test.Test(tid, state, label)
state.TaskDone(true)
}
}
}
func doTest(targets []core.BuildLabel, surefireDir cli.Filepath, resultsFile cli.Filepath) (bool, *core.BuildState) {
os.RemoveAll(string(surefireDir))
os.RemoveAll(string(resultsFile))
os.MkdirAll(string(surefireDir), core.DirPermissions)
success, state := runBuild(targets, true, true)
test.CopySurefireXmlFilesToDir(state.Graph, string(surefireDir))
test.WriteResultsToFileOrDie(state.Graph, string(resultsFile))
return success, state
}
// parseForVisibleTargets adds parse tasks for any targets that the given label is visible to.
func parseForVisibleTargets(state *core.BuildState, label core.BuildLabel) {
if target := state.Graph.Target(label); target != nil {
for _, vis := range target.Visibility {
findOriginalTask(state, vis, false)
}
}
}
// prettyOutputs determines from input flags whether we should show 'pretty' output (ie. interactive).
func prettyOutput(interactiveOutput bool, plainOutput bool, verbosity cli.Verbosity) bool {
if interactiveOutput && plainOutput {
log.Fatal("Can't pass both --interactive_output and --plain_output")
}
return interactiveOutput || (!plainOutput && cli.StdErrIsATerminal && verbosity < 4)
}
// newCache constructs a new cache based on the current config / flags.
func newCache(config *core.Configuration) core.Cache {
if opts.FeatureFlags.NoCache {
return nil
}
return cache.NewCache(config)
}
// Please starts & runs the main build process through to its completion.
func Please(targets []core.BuildLabel, config *core.Configuration, prettyOutput, shouldBuild, shouldTest bool) (bool, *core.BuildState) {
if opts.BuildFlags.NumThreads > 0 {
config.Please.NumThreads = opts.BuildFlags.NumThreads
} else if config.Please.NumThreads <= 0 {
config.Please.NumThreads = runtime.NumCPU() + 2
}
debugTests := opts.Test.Debug || opts.Cover.Debug
if opts.BuildFlags.Config != "" {
config.Build.Config = opts.BuildFlags.Config
} else if debugTests {
config.Build.Config = "dbg"
}
c := newCache(config)
state := core.NewBuildState(config.Please.NumThreads, c, int(opts.OutputFlags.Verbosity), config)
state.VerifyHashes = !opts.FeatureFlags.NoHashVerification
state.NumTestRuns = utils.Max(opts.Test.NumRuns, opts.Cover.NumRuns) // Only one of these can be passed
state.TestArgs = append(opts.Test.Args.Args, opts.Cover.Args.Args...) // Similarly here.
state.NeedCoverage = !opts.Cover.Args.Target.IsEmpty()
state.NeedBuild = shouldBuild
state.NeedTests = shouldTest
state.NeedHashesOnly = len(opts.Hash.Args.Targets) > 0
state.PrepareOnly = opts.Build.Prepare || opts.Build.Shell
state.PrepareShell = opts.Build.Shell || opts.Test.Shell || opts.Cover.Shell
state.CleanWorkdirs = !opts.FeatureFlags.KeepWorkdirs
state.ForceRebuild = len(opts.Rebuild.Args.Targets) > 0
state.ShowTestOutput = opts.Test.ShowOutput || opts.Cover.ShowOutput
state.DebugTests = debugTests
state.ShowAllOutput = opts.OutputFlags.ShowAllOutput
state.SetIncludeAndExclude(opts.BuildFlags.Include, opts.BuildFlags.Exclude)
parse.InitParser(state)
build.Init(state)
if config.Events.Port != 0 && shouldBuild {
shutdown := follow.InitialiseServer(state, config.Events.Port)
defer shutdown()
}
if config.Events.Port != 0 || config.Display.SystemStats {
go follow.UpdateResources(state)
}
metrics.InitFromConfig(config)
// Acquire the lock before we start building
if (shouldBuild || shouldTest) && !opts.FeatureFlags.NoLock {
core.AcquireRepoLock()
defer core.ReleaseRepoLock()
}
if state.DebugTests && len(targets) != 1 {
log.Fatalf("-d/--debug flag can only be used with a single test target")
}
detailedTests := shouldTest && (opts.Test.Detailed || opts.Cover.Detailed || (len(targets) == 1 && !targets[0].IsAllTargets() && !targets[0].IsAllSubpackages() && targets[0] != core.BuildLabelStdin))
// Start looking for the initial targets to kick the build off
go findOriginalTasks(state, targets)
// Start up all the build workers
var wg sync.WaitGroup
wg.Add(config.Please.NumThreads)
for i := 0; i < config.Please.NumThreads; i++ {
go func(tid int) {
please(tid, state, opts.ParsePackageOnly, opts.BuildFlags.Include, opts.BuildFlags.Exclude)
wg.Done()
}(i)
}
// Wait until they've all exited, which they'll do once they have no tasks left.
go func() {
wg.Wait()
close(state.Results) // This will signal MonitorState (below) to stop.
}()
// Draw stuff to the screen while there are still results coming through.
shouldRun := !opts.Run.Args.Target.IsEmpty()
success := output.MonitorState(state, config.Please.NumThreads, !prettyOutput, opts.BuildFlags.KeepGoing, shouldBuild, shouldTest, shouldRun, opts.Build.ShowStatus, detailedTests, string(opts.OutputFlags.TraceFile))
metrics.Stop()
build.StopWorkers()
if c != nil {
c.Shutdown()
}
return success, state
}
// findOriginalTasks finds the original parse tasks for the original set of targets.
func findOriginalTasks(state *core.BuildState, targets []core.BuildLabel) {
if state.Config.Bazel.Compatibility && fs.FileExists("WORKSPACE") {
// We have to parse the WORKSPACE file before anything else to understand subrepos.
// This is a bit crap really since it inhibits parallelism for the first step.
parse.Parse(0, state, core.NewBuildLabel("workspace", "all"), core.OriginalTarget, false, state.Include, state.Exclude, false)
}
if opts.BuildFlags.Arch.Arch != "" {
// Set up a new subrepo for this architecture.
state.Graph.AddSubrepo(core.SubrepoForArch(state, opts.BuildFlags.Arch))
}
for _, target := range targets {
if target == core.BuildLabelStdin {
for label := range cli.ReadStdin() {
findOriginalTask(state, core.ParseBuildLabels([]string{label})[0], true)
}
} else {
findOriginalTask(state, target, true)
}
}
state.TaskDone(true) // initial target adding counts as one.
}
func findOriginalTask(state *core.BuildState, target core.BuildLabel, addToList bool) {
if opts.BuildFlags.Arch.Arch != "" {
target.Subrepo = opts.BuildFlags.Arch.String()
}
if target.IsAllSubpackages() {
for pkg := range utils.FindAllSubpackages(state.Config, target.PackageName, "") {
state.AddOriginalTarget(core.NewBuildLabel(pkg, "all"), addToList)
}
} else {
state.AddOriginalTarget(target, addToList)
}
}
// testTargets handles test targets which can be given in two formats; a list of targets or a single
// target with a list of trailing arguments.
// Alternatively they can be completely omitted in which case we test everything under the working dir.
// One can also pass a 'failed' flag which runs the failed tests from last time.
func testTargets(target core.BuildLabel, args []string, failed bool, resultsFile cli.Filepath) []core.BuildLabel {
if failed {
targets, args := test.LoadPreviousFailures(string(resultsFile))
// Have to reset these - it doesn't matter which gets which.
opts.Test.Args.Args = args
opts.Cover.Args.Args = nil
return targets
} else if target.Name == "" {
return core.InitialPackage()
} else if len(args) > 0 && core.LooksLikeABuildLabel(args[0]) {
opts.Cover.Args.Args = []string{}
opts.Test.Args.Args = []string{}
return append(core.ParseBuildLabels(args), target)
}
return []core.BuildLabel{target}
}
// readConfig sets various things up and reads the initial configuration.
func readConfig(forceUpdate bool) *core.Configuration {
if opts.FeatureFlags.NoHashVerification {
log.Warning("You've disabled hash verification; this is intended to help temporarily while modifying build targets. You shouldn't use this regularly.")
}
config, err := core.ReadDefaultConfigFiles(opts.BuildFlags.Profile)
if err != nil {
log.Fatalf("Error reading config file: %s", err)
} else if err := config.ApplyOverrides(opts.BuildFlags.Option); err != nil {
log.Fatalf("Can't override requested config setting: %s", err)
}
// Now apply any flags that override this
if opts.Update.Latest {
config.Please.Version.Unset()
} else if opts.Update.Version.IsSet {
config.Please.Version = opts.Update.Version
}
update.CheckAndUpdate(config, !opts.FeatureFlags.NoUpdate, forceUpdate, opts.Update.Force, !opts.Update.NoVerify)
return config
}
// Runs the actual build
// Which phases get run are controlled by shouldBuild and shouldTest.
func runBuild(targets []core.BuildLabel, shouldBuild, shouldTest bool) (bool, *core.BuildState) {
if len(targets) == 0 {
targets = core.InitialPackage()
}
pretty := prettyOutput(opts.OutputFlags.InteractiveOutput, opts.OutputFlags.PlainOutput, opts.OutputFlags.Verbosity)
return Please(targets, config, pretty, shouldBuild, shouldTest)
}
// readConfigAndSetRoot reads the .plzconfig files and moves to the repo root.
func readConfigAndSetRoot(forceUpdate bool) *core.Configuration {
if opts.BuildFlags.RepoRoot == "" {
log.Debug("Found repo root at %s", core.MustFindRepoRoot())
} else {
core.RepoRoot = string(opts.BuildFlags.RepoRoot)
}
// Please always runs from the repo root, so move there now.
if err := os.Chdir(core.RepoRoot); err != nil {
log.Fatalf("%s", err)
}
// Reset this now we're at the repo root.
if opts.OutputFlags.LogFile != "" {
if !path.IsAbs(string(opts.OutputFlags.LogFile)) {
opts.OutputFlags.LogFile = cli.Filepath(path.Join(core.RepoRoot, string(opts.OutputFlags.LogFile)))
}
cli.InitFileLogging(string(opts.OutputFlags.LogFile), opts.OutputFlags.LogFileLevel)
}
return readConfig(forceUpdate)
}
// handleCompletions handles shell completion. Typically it just prints to stdout but
// may do a little more if we think we need to handle aliases.
func handleCompletions(parser *flags.Parser, items []flags.Completion) {
cli.InitLogging(cli.MinVerbosity) // Ensure this is quiet
opts.FeatureFlags.NoUpdate = true // Ensure we don't try to update
if len(items) > 0 && strings.HasPrefix(items[0].Item, "//") {
// Don't muck around with the config if we're predicting build labels.
cli.PrintCompletions(items)
} else if config := readConfigAndSetRoot(false); config.AttachAliasFlags(parser) {
// Run again without this registered as a completion handler
parser.CompletionHandler = nil
parser.ParseArgs(os.Args[1:])
} else {
cli.PrintCompletions(items)
}
// Regardless of what happened, always exit with 0 at this point.
os.Exit(0)
}
func main() {
parser, extraArgs, flagsErr := cli.ParseFlags("Please", &opts, os.Args, flags.PassDoubleDash, handleCompletions)
// Note that we must leave flagsErr for later, because it may be affected by aliases.
if opts.HelpFlags.Version {
fmt.Printf("Please version %s\n", core.PleaseVersion)
os.Exit(0) // Ignore other flags if --version was passed.
} else if opts.HelpFlags.Help {
// Attempt to read config files to produce help for aliases.
cli.InitLogging(cli.MinVerbosity)
parser.WriteHelp(os.Stderr)
if core.FindRepoRoot() {
if config, err := core.ReadDefaultConfigFiles(""); err == nil {
config.PrintAliases(os.Stderr)
}
}
os.Exit(0)
}
if opts.OutputFlags.Colour {
output.SetColouredOutput(true)
} else if opts.OutputFlags.NoColour {
output.SetColouredOutput(false)
}
if opts.OutputFlags.ShowAllOutput {
opts.OutputFlags.PlainOutput = true
}
// Init logging, but don't do file output until we've chdir'd.
cli.InitLogging(opts.OutputFlags.Verbosity)
command := cli.ActiveCommand(parser.Command)
if opts.Complete != "" {
// Completion via PLZ_COMPLETE env var sidesteps other commands
opts.Query.Completions.Cmd = command
opts.Query.Completions.Args.Fragments = []string{opts.Complete}
command = "completions"
} else if command == "help" || command == "follow" || command == "init" {
// These commands don't use a config file, allowing them to be run outside a repo.
if flagsErr != nil { // This error otherwise doesn't get checked until later.
cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, os.Args)
}
config = core.DefaultConfiguration()
if !buildFunctions[command]() {
os.Exit(1)
}
os.Exit(0)
} else if opts.OutputFlags.CompletionScript {
utils.PrintCompletionScript()
os.Exit(0)
}
// Read the config now
config = readConfigAndSetRoot(command == "update")
if parser.Command.Active != nil && parser.Command.Active.Name == "query" {
// Query commands don't need either of these set.
opts.OutputFlags.PlainOutput = true
config.Cache.DirClean = false
}
// Now we've read the config file, we may need to re-run the parser; the aliases in the config
// can affect how we parse otherwise illegal flag combinations.
if flagsErr != nil || len(extraArgs) > 0 {
args := config.UpdateArgsWithAliases(os.Args)
command = cli.ParseFlagsFromArgsOrDie("Please", core.PleaseVersion.String(), &opts, args)
}
if opts.ProfilePort != 0 {
go func() {
log.Warning("%s", http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", opts.ProfilePort), nil))
}()
}
if opts.Profile != "" {
f, err := os.Create(opts.Profile)
if err != nil {
log.Fatalf("Failed to open profile file: %s", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatalf("could not start profiler: %s", err)
}
defer pprof.StopCPUProfile()
}
if opts.MemProfile != "" {
f, err := os.Create(opts.MemProfile)
if err != nil {
log.Fatalf("Failed to open memory profile file: %s", err)
}
defer f.Close()
defer pprof.WriteHeapProfile(f)
}
if !buildFunctions[command]() {
os.Exit(7) // Something distinctive, is sometimes useful to identify this externally.
}
}
| 1 | 8,307 | can't these just use the global `include` and `exclude` flags? | thought-machine-please | go |
@@ -28,7 +28,7 @@ def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
- return deltas
+ return deltas.cuda()
def delta2bbox(rois, | 1 | import mmcv
import numpy as np
import torch
def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2bbox(rois,
deltas,
means=[0, 0, 0, 0],
stds=[1, 1, 1, 1],
max_shape=None,
wh_ratio_clip=16 / 1000):
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::4]
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
gw = pw * dw.exp()
gh = ph * dh.exp()
gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx
gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy
x1 = gx - gw * 0.5 + 0.5
y1 = gy - gh * 0.5 + 0.5
x2 = gx + gw * 0.5 - 0.5
y2 = gy + gh * 0.5 - 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(Tensor or ndarray): Shape (..., 4*k)
img_shape(tuple): Image shape.
Returns:
Same type as `bboxes`: Flipped bboxes.
"""
if isinstance(bboxes, torch.Tensor):
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.clone()
flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1
flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1
return flipped
elif isinstance(bboxes, np.ndarray):
return mmcv.bbox_flip(bboxes, img_shape)
def bbox_mapping(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from the original image scale to testing scale"""
new_bboxes = bboxes * scale_factor
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape)
return new_bboxes
def bbox_mapping_back(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from testing scale to original image scale"""
new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes
new_bboxes = new_bboxes / scale_factor
return new_bboxes
def bbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
else:
rois = bboxes.new_zeros((0, 5))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois):
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
def bbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [
np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes - 1)]
| 1 | 17,268 | Is this still necessary? | open-mmlab-mmdetection | py |
@@ -52,7 +52,7 @@ namespace Examples.Console
.AddPrometheusExporter(opt =>
{
opt.StartHttpListener = true;
- opt.HttpListenerPrefixes = new string[] { $"http://*:{port}/" };
+ opt.HttpListenerPrefixes = new string[] { $"http://localhost:{port}/" };
})
.Build();
| 1 | // <copyright file="TestPrometheusExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry;
using OpenTelemetry.Metrics;
using OpenTelemetry.Trace;
namespace Examples.Console
{
internal class TestPrometheusExporter
{
private static readonly Meter MyMeter = new Meter("TestMeter", "0.0.1");
private static readonly Counter<long> Counter = MyMeter.CreateCounter<long>("myCounter");
private static readonly Histogram<long> MyHistogram = MyMeter.CreateHistogram<long>("myHistogram");
private static readonly Random RandomGenerator = new Random();
internal static object Run(int port, int totalDurationInMins)
{
/*
Following is sample prometheus.yml config. Adjust port,interval as needed.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'OpenTelemetryTest'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9184']
*/
using var meterProvider = Sdk.CreateMeterProviderBuilder()
.AddSource("TestMeter")
.AddPrometheusExporter(opt =>
{
opt.StartHttpListener = true;
opt.HttpListenerPrefixes = new string[] { $"http://*:{port}/" };
})
.Build();
ObservableGauge<long> gauge = MyMeter.CreateObservableGauge<long>(
"Gauge",
() =>
{
var tag1 = new KeyValuePair<string, object>("tag1", "value1");
var tag2 = new KeyValuePair<string, object>("tag2", "value2");
return new List<Measurement<long>>()
{
new Measurement<long>(RandomGenerator.Next(1, 1000), tag1, tag2),
};
});
using var token = new CancellationTokenSource();
Task writeMetricTask = new Task(() =>
{
while (!token.IsCancellationRequested)
{
Counter.Add(
10,
new KeyValuePair<string, object>("tag1", "value1"),
new KeyValuePair<string, object>("tag2", "value2"));
Counter.Add(
100,
new KeyValuePair<string, object>("tag1", "anothervalue"),
new KeyValuePair<string, object>("tag2", "somethingelse"));
MyHistogram.Record(
RandomGenerator.Next(1, 1500),
new KeyValuePair<string, object>("tag1", "value1"),
new KeyValuePair<string, object>("tag2", "value2"));
Task.Delay(10).Wait();
}
});
writeMetricTask.Start();
token.CancelAfter(totalDurationInMins * 60 * 1000);
System.Console.WriteLine($"OpenTelemetry Prometheus Exporter is making metrics available at http://localhost:{port}/metrics/");
System.Console.WriteLine($"Press Enter key to exit now or will exit automatically after {totalDurationInMins} minutes.");
System.Console.ReadLine();
token.Cancel();
System.Console.WriteLine("Exiting...");
return null;
}
}
}
| 1 | 21,736 | Interesting! Happen to catch an exception message or anything I can look into? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -103,15 +103,10 @@ Promise.prototype.error = function (err) {
* @api public
*/
-Promise.prototype.resolve = function (err, val) {
- if (err) return this.error(err);
- return this.fulfill(val);
-}
-
/**
* Adds a single function as a listener to both err and complete.
*
- * It will be executed with traditional node.js argument position when the promise is resolved.
+ * It will be executed with traditional node.js argument position when the promise is resolved.
*
* promise.addBack(function (err, args...) {
* if (err) return handleError(err); | 1 |
/*!
* Module dependencies
*/
var MPromise = require('mpromise');
/**
* Promise constructor.
*
* Promises are returned from executed queries. Example:
*
* var query = Candy.find({ bar: true });
* var promise = query.exec();
*
* @param {Function} fn a function which will be called when the promise is resolved that accepts `fn(err, ...){}` as signature
* @inherits mpromise https://github.com/aheckmann/mpromise
* @inherits NodeJS EventEmitter http://nodejs.org/api/events.html#events_class_events_eventemitter
* @event `err`: Emits when the promise is rejected
* @event `complete`: Emits when the promise is fulfilled
* @api public
*/
function Promise (fn) {
MPromise.call(this, fn);
}
/*!
* Inherit from mpromise
*/
Promise.prototype = Object.create(MPromise.prototype, {
constructor: {
value: Promise
, enumerable: false
, writable: true
, configurable: true
}
});
/*!
* Override event names for backward compatibility.
*/
Promise.SUCCESS = 'complete';
Promise.FAILURE = 'err';
/**
* Adds `listener` to the `event`.
*
* If `event` is either the success or failure event and the event has already been emitted, the`listener` is called immediately and passed the results of the original emitted event.
*
* @see mpromise#on https://github.com/aheckmann/mpromise#on
* @method on
* @memberOf Promise
* @param {String} event
* @param {Function} listener
* @return {Promise} this
* @api public
*/
/**
* Rejects this promise with `reason`.
*
* If the promise has already been fulfilled or rejected, not action is taken.
*
* @see mpromise#reject https://github.com/aheckmann/mpromise#reject
* @method reject
* @memberOf Promise
* @param {Object|String|Error} reason
* @return {Promise} this
* @api public
*/
/**
* Rejects this promise with `err`.
*
* If the promise has already been fulfilled or rejected, not action is taken.
*
* Differs from [#reject](#promise_Promise-reject) by first casting `err` to an `Error` if it is not `instanceof Error`.
*
* @api public
* @param {Error|String} err
* @return {Promise} this
*/
Promise.prototype.error = function (err) {
if (!(err instanceof Error)) err = new Error(err);
return this.reject(err);
}
/**
* Resolves this promise to a rejected state if `err` is passed or a fulfilled state if no `err` is passed.
*
* If the promise has already been fulfilled or rejected, not action is taken.
*
* `err` will be cast to an Error if not already instanceof Error.
*
* _NOTE: overrides [mpromise#resolve](https://github.com/aheckmann/mpromise#resolve) to provide error casting._
*
* @param {Error} [err] error or null
* @param {Object} [val] value to fulfill the promise with
* @api public
*/
Promise.prototype.resolve = function (err, val) {
if (err) return this.error(err);
return this.fulfill(val);
}
/**
* Adds a single function as a listener to both err and complete.
*
* It will be executed with traditional node.js argument position when the promise is resolved.
*
* promise.addBack(function (err, args...) {
* if (err) return handleError(err);
* console.log('success');
* })
*
* Alias of [mpromise#onResolve](https://github.com/aheckmann/mpromise#onresolve).
*
* _Deprecated. Use `onResolve` instead._
*
* @method addBack
* @param {Function} listener
* @return {Promise} this
* @deprecated
*/
Promise.prototype.addBack = Promise.prototype.onResolve;
/**
* Fulfills this promise with passed arguments.
*
* @method fulfill
* @see https://github.com/aheckmann/mpromise#fulfill
* @param {any} args
* @api public
*/
/**
* Fulfills this promise with passed arguments.
*
* @method fulfill
* @see https://github.com/aheckmann/mpromise#fulfill
* @param {any} args
* @api public
*/
/**
* Fulfills this promise with passed arguments.
*
* Alias of [mpromise#fulfill](https://github.com/aheckmann/mpromise#fulfill).
*
* _Deprecated. Use `fulfill` instead._
*
* @method complete
* @param {any} args
* @api public
* @deprecated
*/
Promise.prototype.complete = MPromise.prototype.fulfill;
/**
* Adds a listener to the `complete` (success) event.
*
* Alias of [mpromise#onFulfill](https://github.com/aheckmann/mpromise#onfulfill).
*
* _Deprecated. Use `onFulfill` instead._
*
* @method addCallback
* @param {Function} listener
* @return {Promise} this
* @api public
* @deprecated
*/
Promise.prototype.addCallback = Promise.prototype.onFulfill;
/**
* Adds a listener to the `err` (rejected) event.
*
* Alias of [mpromise#onReject](https://github.com/aheckmann/mpromise#onreject).
*
* _Deprecated. Use `onReject` instead._
*
* @method addErrback
* @param {Function} listener
* @return {Promise} this
* @api public
* @deprecated
*/
Promise.prototype.addErrback = Promise.prototype.onReject;
/**
* Creates a new promise and returns it. If `onFulfill` or `onReject` are passed, they are added as SUCCESS/ERROR callbacks to this promise after the nextTick.
*
* Conforms to [promises/A+](https://github.com/promises-aplus/promises-spec) specification.
*
* ####Example:
*
* var promise = Meetups.find({ tags: 'javascript' }).select('_id').exec();
* promise.then(function (meetups) {
* var ids = meetups.map(function (m) {
* return m._id;
* });
* return People.find({ meetups: { $in: ids }).exec();
* }).then(function (people) {
* if (people.length < 10000) {
* throw new Error('Too few people!!!');
* } else {
* throw new Error('Still need more people!!!');
* }
* }).then(null, function (err) {
* assert.ok(err instanceof Error);
* });
*
* @see promises-A+ https://github.com/promises-aplus/promises-spec
* @see mpromise#then https://github.com/aheckmann/mpromise#then
* @method then
* @memberOf Promise
* @param {Function} onFulFill
* @param {Function} onReject
* @return {Promise} newPromise
*/
/**
* Signifies that this promise was the last in a chain of `then()s`: if a handler passed to the call to `then` which produced this promise throws, the exception will go uncaught.
*
* ####Example:
*
* var p = new Promise;
* p.then(function(){ throw new Error('shucks') });
* setTimeout(function () {
* p.fulfill();
* // error was caught and swallowed by the promise returned from
* // p.then(). we either have to always register handlers on
* // the returned promises or we can do the following...
* }, 10);
*
* // this time we use .end() which prevents catching thrown errors
* var p = new Promise;
* var p2 = p.then(function(){ throw new Error('shucks') }).end(); // <--
* setTimeout(function () {
* p.fulfill(); // throws "shucks"
* }, 10);
*
* @api public
* @see mpromise#end https://github.com/aheckmann/mpromise#end
* @method end
* @memberOf Promise
*/
/*!
* expose
*/
module.exports = Promise;
| 1 | 12,181 | did this get moved to mpromise? I don't recall if it's in that lib or not. | Automattic-mongoose | js |
@@ -81,14 +81,16 @@ type ProviderModeConfig struct {
type ConsumerConfig struct {
PublicKey string `json:"PublicKey"`
// IP is needed when provider is behind NAT. In such case provider parses this IP and tries to ping consumer.
- IP string `json:"IP,omitempty"`
+ IP string `json:"IP,omitempty"`
+ Ports []int `json:"Ports"`
}
// ServiceConfig represent a Wireguard service provider configuration that will be passed to the consumer for establishing a connection.
type ServiceConfig struct {
// LocalPort and RemotePort are needed for NAT hole punching only.
- LocalPort int `json:"-"`
- RemotePort int `json:"-"`
+ LocalPort int `json:"-"`
+ RemotePort int `json:"-"`
+ Ports []int `json:"ports"`
Provider struct {
PublicKey string | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package wireguard
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"strings"
"time"
"github.com/mysteriumnetwork/node/market"
)
// ServiceType indicates "wireguard" service type
const ServiceType = "wireguard"
// ServiceDefinition structure represents "wireguard" service parameters
type ServiceDefinition struct {
// Approximate information on location where the service is provided from
Location market.Location `json:"location"`
// Approximate information on location where the actual tunnelled traffic will originate from.
// This is used by providers having their own means of setting tunnels to other remote exit points.
LocationOriginate market.Location `json:"location_originate"`
}
// GetLocation returns geographic location of service definition provider
func (service ServiceDefinition) GetLocation() market.Location {
return service.Location
}
// EndpointFactory creates new connection endpoint.
type EndpointFactory func() (ConnectionEndpoint, error)
// ConnectionEndpoint represents Wireguard network instance, it provide information
// required for establishing connection between service provider and consumer.
type ConnectionEndpoint interface {
StartConsumerMode(config ConsumerModeConfig) error
StartProviderMode(config ProviderModeConfig) error
AddPeer(iface string, peer Peer) error
PeerStats() (*Stats, error)
ConfigureRoutes(ip net.IP) error
Config() (ServiceConfig, error)
InterfaceName() string
Stop() error
}
// ConsumerModeConfig is consumer endpoint startup configuration.
type ConsumerModeConfig struct {
PrivateKey string
IPAddress net.IPNet
ListenPort int
}
// ProviderModeConfig is provider endpoint startup configuration.
type ProviderModeConfig struct {
Network net.IPNet
ListenPort int
PublicIP string
}
// ConsumerConfig is used for sending the public key and IP from consumer to provider
type ConsumerConfig struct {
PublicKey string `json:"PublicKey"`
// IP is needed when provider is behind NAT. In such case provider parses this IP and tries to ping consumer.
IP string `json:"IP,omitempty"`
}
// ServiceConfig represent a Wireguard service provider configuration that will be passed to the consumer for establishing a connection.
type ServiceConfig struct {
// LocalPort and RemotePort are needed for NAT hole punching only.
LocalPort int `json:"-"`
RemotePort int `json:"-"`
Provider struct {
PublicKey string
Endpoint net.UDPAddr
}
Consumer struct {
IPAddress net.IPNet
DNSIPs string
ConnectDelay int
}
}
// MarshalJSON implements json.Marshaler interface to provide human readable configuration.
func (s ServiceConfig) MarshalJSON() ([]byte, error) {
type provider struct {
PublicKey string `json:"public_key"`
Endpoint string `json:"endpoint"`
}
type consumer struct {
IPAddress string `json:"ip_address"`
DNSIPs string `json:"dns_ips"`
ConnectDelay int `json:"connect_delay"`
}
return json.Marshal(&struct {
LocalPort int `json:"local_port"`
RemotePort int `json:"remote_port"`
Provider provider `json:"provider"`
Consumer consumer `json:"consumer"`
}{
LocalPort: s.LocalPort,
RemotePort: s.RemotePort,
Provider: provider{
PublicKey: s.Provider.PublicKey,
Endpoint: s.Provider.Endpoint.String(),
},
Consumer: consumer{
IPAddress: s.Consumer.IPAddress.String(),
ConnectDelay: s.Consumer.ConnectDelay,
DNSIPs: s.Consumer.DNSIPs,
},
})
}
// UnmarshalJSON implements json.Unmarshaler interface to receive human readable configuration.
func (s *ServiceConfig) UnmarshalJSON(data []byte) error {
type provider struct {
PublicKey string `json:"public_key"`
Endpoint string `json:"endpoint"`
}
type consumer struct {
IPAddress string `json:"ip_address"`
DNSIPs string `json:"dns_ips"`
ConnectDelay int `json:"connect_delay"`
}
var config struct {
LocalPort int `json:"local_port"`
RemotePort int `json:"remote_port"`
Provider provider `json:"provider"`
Consumer consumer `json:"consumer"`
}
if err := json.Unmarshal(data, &config); err != nil {
return err
}
endpoint, err := net.ResolveUDPAddr("udp", config.Provider.Endpoint)
if err != nil {
return err
}
ip, ipnet, err := net.ParseCIDR(config.Consumer.IPAddress)
if err != nil {
return err
}
s.LocalPort = config.LocalPort
s.RemotePort = config.RemotePort
s.Provider.Endpoint = *endpoint
s.Provider.PublicKey = config.Provider.PublicKey
s.Consumer.DNSIPs = config.Consumer.DNSIPs
s.Consumer.IPAddress = *ipnet
s.Consumer.IPAddress.IP = ip
s.Consumer.ConnectDelay = config.Consumer.ConnectDelay
return nil
}
// DeviceConfig describes wireguard device configuration.
type DeviceConfig struct {
IfaceName string
Subnet net.IPNet
PrivateKey string
ListenPort int
}
// Encode encodes device config into string representation which is used for
// userspace and kernel space wireguard configuration.
func (dc *DeviceConfig) Encode() string {
var res strings.Builder
keyBytes, err := base64.StdEncoding.DecodeString(dc.PrivateKey)
if err != nil {
return ""
}
hexKey := hex.EncodeToString(keyBytes)
res.WriteString(fmt.Sprintf("private_key=%s\n", hexKey))
res.WriteString(fmt.Sprintf("listen_port=%d\n", dc.ListenPort))
return res.String()
}
// Peer represents wireguard peer.
type Peer struct {
PublicKey string
Endpoint *net.UDPAddr
AllowedIPs []string
KeepAlivePeriodSeconds int
}
// Encode encodes device peer config into string representation which is used for
// userspace and kernel space wireguard configuration.
func (p *Peer) Encode() string {
var res strings.Builder
keyBytes, err := base64.StdEncoding.DecodeString(p.PublicKey)
if err != nil {
return ""
}
hexKey := hex.EncodeToString(keyBytes)
res.WriteString(fmt.Sprintf("public_key=%s\n", hexKey))
res.WriteString(fmt.Sprintf("persistent_keepalive_interval=%d\n", p.KeepAlivePeriodSeconds))
if p.Endpoint != nil {
res.WriteString(fmt.Sprintf("endpoint=%s\n", p.Endpoint.String()))
}
for _, ip := range p.AllowedIPs {
res.WriteString(fmt.Sprintf("allowed_ip=%s\n", ip))
}
return res.String()
}
// Stats represents wireguard peer statistics information.
type Stats struct {
BytesSent uint64
BytesReceived uint64
LastHandshake time.Time
}
// ParseDevicePeerStats parses current active consumer stats.
func ParseDevicePeerStats(d *UserspaceDevice) (*Stats, error) {
if len(d.Peers) != 1 {
return nil, fmt.Errorf("exactly 1 peer expected, got %d", len(d.Peers))
}
p := d.Peers[0]
return &Stats{
BytesSent: uint64(p.TransmitBytes),
BytesReceived: uint64(p.ReceiveBytes),
LastHandshake: p.LastHandshakeTime,
}, nil
}
| 1 | 15,782 | json objects should be `camelCase` | mysteriumnetwork-node | go |
@@ -0,0 +1,11 @@
+package types
+
+// SectorSize is the amount of bytes in a sector. This amount will be slightly
+// greater than the number of user bytes which can be written to a sector due to
+// bit-padding.
+type SectorSize uint64
+
+const (
+ OneKiBSectorSize = SectorSize(iota)
+ TwoHundredFiftySixMiBSectorSize
+) | 1 | 1 | 18,530 | QuarterGiBSectorSize? SectorSize265MiB? Spelling out 256 seems overly verbose. | filecoin-project-venus | go |
|
@@ -23,10 +23,11 @@ using System.Linq;
using Microsoft.Extensions.Logging;
using OpenTelemetry.Exporter;
using OpenTelemetry.Logs;
+using OpenTelemetry.Tests;
using OpenTelemetry.Trace;
using Xunit;
-namespace OpenTelemetry.Tests.Logs
+namespace OpenTelemetry.Logs.Tests
{
public sealed class LogRecordTest : IDisposable
{ | 1 | // <copyright file="LogRecordTest.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
#if !NET461
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using Microsoft.Extensions.Logging;
using OpenTelemetry.Exporter;
using OpenTelemetry.Logs;
using OpenTelemetry.Trace;
using Xunit;
namespace OpenTelemetry.Tests.Logs
{
public sealed class LogRecordTest : IDisposable
{
private readonly ILogger logger;
private readonly List<LogRecord> exportedItems = new List<LogRecord>();
private readonly ILoggerFactory loggerFactory;
private readonly BaseExportProcessor<LogRecord> processor;
private readonly BaseExporter<LogRecord> exporter;
private OpenTelemetryLoggerOptions options;
public LogRecordTest()
{
this.exporter = new InMemoryExporter<LogRecord>(this.exportedItems);
this.processor = new TestLogRecordProcessor(this.exporter);
this.loggerFactory = LoggerFactory.Create(builder =>
{
builder.AddOpenTelemetry(options =>
{
this.options = options;
options
.AddProcessor(this.processor);
});
builder.AddFilter(typeof(LogRecordTest).FullName, LogLevel.Trace);
});
this.logger = this.loggerFactory.CreateLogger<LogRecordTest>();
}
[Fact]
public void CheckCateogryNameForLog()
{
this.logger.LogInformation("Log");
var categoryName = this.exportedItems[0].CategoryName;
Assert.Equal(typeof(LogRecordTest).FullName, categoryName);
}
[Theory]
[InlineData(LogLevel.Trace)]
[InlineData(LogLevel.Debug)]
[InlineData(LogLevel.Information)]
[InlineData(LogLevel.Warning)]
[InlineData(LogLevel.Error)]
[InlineData(LogLevel.Critical)]
public void CheckLogLevel(LogLevel logLevel)
{
var message = $"Log {logLevel}";
this.logger.Log(logLevel, message);
var logLevelRecorded = this.exportedItems[0].LogLevel;
Assert.Equal(logLevel, logLevelRecorded);
}
[Fact]
public void CheckStateForUnstructuredLog()
{
var message = "Hello, World!";
this.logger.LogInformation(message);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state only has {OriginalFormat}
Assert.Equal(1, state.Count);
Assert.Equal(message.ToString(), state.ToString());
}
[Fact]
public void CheckStateForUnstructuredLogWithStringInterpolation()
{
var message = $"Hello from potato {0.99}.";
this.logger.LogInformation(message);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state only has {OriginalFormat}
Assert.Equal(1, state.Count);
Assert.Equal(message.ToString(), state.ToString());
}
[Fact]
public void CheckStateForStructuredLogWithTemplate()
{
var message = "Hello from {name} {price}.";
this.logger.LogInformation(message, "tomato", 2.99);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state has name, price and {OriginalFormat}
Assert.Equal(3, state.Count);
// Check if state has name
Assert.Contains(state, item => item.Key == "name");
Assert.Equal("tomato", state.First(item => item.Key == "name").Value);
// Check if state has price
Assert.Contains(state, item => item.Key == "price");
Assert.Equal(2.99, state.First(item => item.Key == "price").Value);
// Check if state has OriginalFormat
Assert.Contains(state, item => item.Key == "{OriginalFormat}");
Assert.Equal(message, state.First(item => item.Key == "{OriginalFormat}").Value);
Assert.Equal($"Hello from tomato 2.99.", state.ToString());
}
[Fact]
public void CheckStateForStructuredLogWithStrongType()
{
var food = new Food { Name = "artichoke", Price = 3.99 };
this.logger.LogInformation("{food}", food);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state has food and {OriginalFormat}
Assert.Equal(2, state.Count);
// Check if state has food
Assert.Contains(state, item => item.Key == "food");
var foodParameter = (Food)state.First(item => item.Key == "food").Value;
Assert.Equal(food.Name, foodParameter.Name);
Assert.Equal(food.Price, foodParameter.Price);
// Check if state has OriginalFormat
Assert.Contains(state, item => item.Key == "{OriginalFormat}");
Assert.Equal("{food}", state.First(item => item.Key == "{OriginalFormat}").Value);
Assert.Equal(food.ToString(), state.ToString());
}
[Fact]
public void CheckStateForStructuredLogWithAnonymousType()
{
var anonymousType = new { Name = "pumpkin", Price = 5.99 };
this.logger.LogInformation("{food}", anonymousType);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state has food and {OriginalFormat}
Assert.Equal(2, state.Count);
// Check if state has food
Assert.Contains(state, item => item.Key == "food");
var foodParameter = state.First(item => item.Key == "food").Value as dynamic;
Assert.Equal(anonymousType.Name, foodParameter.Name);
Assert.Equal(anonymousType.Price, foodParameter.Price);
// Check if state has OriginalFormat
Assert.Contains(state, item => item.Key == "{OriginalFormat}");
Assert.Equal("{food}", state.First(item => item.Key == "{OriginalFormat}").Value);
Assert.Equal(anonymousType.ToString(), state.ToString());
}
[Fact]
public void CheckStateForStrucutredLogWithGeneralType()
{
var food = new Dictionary<string, object>
{
["Name"] = "truffle",
["Price"] = 299.99,
};
this.logger.LogInformation("{food}", food);
var state = this.exportedItems[0].State as IReadOnlyList<KeyValuePair<string, object>>;
// state only has food and {OriginalFormat}
Assert.Equal(2, state.Count);
// Check if state has food
Assert.Contains(state, item => item.Key == "food");
var foodParameter = state.First(item => item.Key == "food").Value as Dictionary<string, object>;
Assert.True(food.Count == foodParameter.Count && !food.Except(foodParameter).Any());
// Check if state has OriginalFormat
Assert.Contains(state, item => item.Key == "{OriginalFormat}");
Assert.Equal("{food}", state.First(item => item.Key == "{OriginalFormat}").Value);
var prevCulture = CultureInfo.CurrentCulture;
CultureInfo.CurrentCulture = CultureInfo.InvariantCulture;
try
{
Assert.Equal("[Name, truffle], [Price, 299.99]", state.ToString());
}
finally
{
CultureInfo.CurrentCulture = prevCulture;
}
}
[Fact]
public void CheckStateForExceptionLogged()
{
var exceptionMessage = "Exception Message";
var exception = new Exception(exceptionMessage);
var message = "Exception Occurred";
this.logger.LogInformation(exception, message);
var state = this.exportedItems[0].State;
var itemCount = state.GetType().GetProperty("Count").GetValue(state);
// state only has {OriginalFormat}
Assert.Equal(1, itemCount);
var loggedException = this.exportedItems[0].Exception;
Assert.NotNull(loggedException);
Assert.Equal(exceptionMessage, loggedException.Message);
Assert.Equal(message.ToString(), state.ToString());
}
[Fact]
public void CheckTraceIdForLogWithinDroppedActivity()
{
this.logger.LogInformation("Log within a dropped activity");
var logRecord = this.exportedItems[0];
Assert.Null(Activity.Current);
Assert.Equal(default, logRecord.TraceId);
Assert.Equal(default, logRecord.SpanId);
Assert.Equal(default, logRecord.TraceFlags);
}
[Fact]
public void CheckTraceIdForLogWithinActivityMarkedAsRecordOnly()
{
var sampler = new RecordOnlySampler();
var exportedActivityList = new List<Activity>();
var activitySourceName = "LogRecordTest";
var activitySource = new ActivitySource(activitySourceName);
using var tracerProvider = Sdk.CreateTracerProviderBuilder()
.AddSource(activitySourceName)
.SetSampler(sampler)
.AddInMemoryExporter(exportedActivityList)
.Build();
using var activity = activitySource.StartActivity("Activity");
this.logger.LogInformation("Log within activity marked as RecordOnly");
var logRecord = this.exportedItems[0];
var currentActivity = Activity.Current;
Assert.NotNull(Activity.Current);
Assert.Equal(currentActivity.TraceId, logRecord.TraceId);
Assert.Equal(currentActivity.SpanId, logRecord.SpanId);
Assert.Equal(currentActivity.ActivityTraceFlags, logRecord.TraceFlags);
}
[Fact]
public void CheckTraceIdForLogWithinActivityMarkedAsRecordAndSample()
{
var sampler = new AlwaysOnSampler();
var exportedActivityList = new List<Activity>();
var activitySourceName = "LogRecordTest";
var activitySource = new ActivitySource(activitySourceName);
using var tracerProvider = Sdk.CreateTracerProviderBuilder()
.AddSource(activitySourceName)
.SetSampler(sampler)
.AddInMemoryExporter(exportedActivityList)
.Build();
using var activity = activitySource.StartActivity("Activity");
this.logger.LogInformation("Log within activity marked as RecordAndSample");
var logRecord = this.exportedItems[0];
var currentActivity = Activity.Current;
Assert.NotNull(Activity.Current);
Assert.Equal(currentActivity.TraceId, logRecord.TraceId);
Assert.Equal(currentActivity.SpanId, logRecord.SpanId);
Assert.Equal(currentActivity.ActivityTraceFlags, logRecord.TraceFlags);
}
[Fact]
public void IncludeFormattedMessageTest()
{
this.logger.LogInformation("OpenTelemetry!");
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.FormattedMessage);
this.options.IncludeFormattedMessage = true;
try
{
this.logger.LogInformation("OpenTelemetry!");
logRecord = this.exportedItems[1];
Assert.Equal("OpenTelemetry!", logRecord.FormattedMessage);
this.logger.LogInformation("OpenTelemetry {Greeting} {Subject}!", "Hello", "World");
logRecord = this.exportedItems[2];
Assert.Equal("OpenTelemetry Hello World!", logRecord.FormattedMessage);
}
finally
{
this.options.IncludeFormattedMessage = false;
}
}
[Fact]
public void IncludeFormattedMessageTestWhenFormatterNull()
{
this.logger.Log(LogLevel.Information, default, "Hello World!", null, null);
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.FormattedMessage);
this.options.IncludeFormattedMessage = true;
try
{
// Pass null as formatter function
this.logger.Log(LogLevel.Information, default, "Hello World!", null, null);
logRecord = this.exportedItems[1];
Assert.Null(logRecord.FormattedMessage);
var expectedFormattedMessage = "formatted message";
this.logger.Log(LogLevel.Information, default, "Hello World!", null, (state, ex) => expectedFormattedMessage);
logRecord = this.exportedItems[2];
Assert.Equal(expectedFormattedMessage, logRecord.FormattedMessage);
}
finally
{
this.options.IncludeFormattedMessage = false;
}
}
[Fact]
public void IncludeScopesTest()
{
using var scope = this.logger.BeginScope("string_scope");
this.logger.LogInformation("OpenTelemetry!");
var logRecord = this.exportedItems[0];
List<object> scopes = new List<object>();
logRecord.ForEachScope<object>((scope, state) => scopes.Add(scope.Scope), null);
Assert.Empty(scopes);
this.options.IncludeScopes = true;
try
{
this.logger.LogInformation("OpenTelemetry!");
logRecord = this.exportedItems[1];
int reachedDepth = -1;
logRecord.ForEachScope<object>(
(scope, state) =>
{
reachedDepth++;
scopes.Add(scope.Scope);
foreach (KeyValuePair<string, object> item in scope)
{
Assert.Equal(string.Empty, item.Key);
Assert.Equal("string_scope", item.Value);
}
},
null);
Assert.Single(scopes);
Assert.Equal(0, reachedDepth);
Assert.Equal("string_scope", scopes[0]);
scopes.Clear();
List<KeyValuePair<string, object>> expectedScope2 = new List<KeyValuePair<string, object>>
{
new KeyValuePair<string, object>("item1", "value1"),
new KeyValuePair<string, object>("item2", "value2"),
};
using var scope2 = this.logger.BeginScope(expectedScope2);
this.logger.LogInformation("OpenTelemetry!");
logRecord = this.exportedItems[2];
reachedDepth = -1;
logRecord.ForEachScope<object>(
(scope, state) =>
{
scopes.Add(scope.Scope);
if (reachedDepth++ == 1)
{
foreach (KeyValuePair<string, object> item in scope)
{
Assert.Contains(item, expectedScope2);
}
}
},
null);
Assert.Equal(2, scopes.Count);
Assert.Equal(1, reachedDepth);
Assert.Equal("string_scope", scopes[0]);
Assert.Same(expectedScope2, scopes[1]);
scopes.Clear();
KeyValuePair<string, object>[] expectedScope3 = new KeyValuePair<string, object>[]
{
new KeyValuePair<string, object>("item3", "value3"),
new KeyValuePair<string, object>("item4", "value4"),
};
using var scope3 = this.logger.BeginScope(expectedScope3);
this.logger.LogInformation("OpenTelemetry!");
logRecord = this.exportedItems[3];
reachedDepth = -1;
logRecord.ForEachScope<object>(
(scope, state) =>
{
scopes.Add(scope.Scope);
if (reachedDepth++ == 2)
{
foreach (KeyValuePair<string, object> item in scope)
{
Assert.Contains(item, expectedScope3);
}
}
},
null);
Assert.Equal(3, scopes.Count);
Assert.Equal(2, reachedDepth);
Assert.Equal("string_scope", scopes[0]);
Assert.Same(expectedScope2, scopes[1]);
Assert.Same(expectedScope3, scopes[2]);
}
finally
{
this.options.IncludeScopes = false;
}
}
[Fact]
public void ParseStateValuesUsingStandardExtensionsTest()
{
// Tests state parsing with standard extensions.
this.logger.LogInformation("{Product} {Year}!", "OpenTelemetry", 2021);
var logRecord = this.exportedItems[0];
Assert.NotNull(logRecord.State);
Assert.Null(logRecord.StateValues);
this.options.ParseStateValues = true;
try
{
var complex = new { Property = "Value" };
this.logger.LogInformation("{Product} {Year} {Complex}!", "OpenTelemetry", 2021, complex);
logRecord = this.exportedItems[1];
Assert.Null(logRecord.State);
Assert.NotNull(logRecord.StateValues);
Assert.Equal(4, logRecord.StateValues.Count);
Assert.Equal(new KeyValuePair<string, object>("Product", "OpenTelemetry"), logRecord.StateValues[0]);
Assert.Equal(new KeyValuePair<string, object>("Year", 2021), logRecord.StateValues[1]);
Assert.Equal(new KeyValuePair<string, object>("{OriginalFormat}", "{Product} {Year} {Complex}!"), logRecord.StateValues[3]);
KeyValuePair<string, object> actualComplex = logRecord.StateValues[2];
Assert.Equal("Complex", actualComplex.Key);
Assert.Same(complex, actualComplex.Value);
}
finally
{
this.options.ParseStateValues = false;
}
}
[Fact]
public void ParseStateValuesUsingStructTest()
{
// Tests struct IReadOnlyList<KeyValuePair<string, object>> parse path.
this.options.ParseStateValues = true;
try
{
this.logger.Log(
LogLevel.Information,
0,
new StructState(new KeyValuePair<string, object>("Key1", "Value1")),
null,
(s, e) => "OpenTelemetry!");
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.State);
Assert.NotNull(logRecord.StateValues);
Assert.Equal(1, logRecord.StateValues.Count);
Assert.Equal(new KeyValuePair<string, object>("Key1", "Value1"), logRecord.StateValues[0]);
}
finally
{
this.options.ParseStateValues = false;
}
}
[Fact]
public void ParseStateValuesUsingListTest()
{
// Tests ref IReadOnlyList<KeyValuePair<string, object>> parse path.
this.options.ParseStateValues = true;
try
{
this.logger.Log(
LogLevel.Information,
0,
new List<KeyValuePair<string, object>> { new KeyValuePair<string, object>("Key1", "Value1") },
null,
(s, e) => "OpenTelemetry!");
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.State);
Assert.NotNull(logRecord.StateValues);
Assert.Equal(1, logRecord.StateValues.Count);
Assert.Equal(new KeyValuePair<string, object>("Key1", "Value1"), logRecord.StateValues[0]);
}
finally
{
this.options.ParseStateValues = false;
}
}
[Fact]
public void ParseStateValuesUsingIEnumerableTest()
{
// Tests IEnumerable<KeyValuePair<string, object>> parse path.
this.options.ParseStateValues = true;
try
{
this.logger.Log(
LogLevel.Information,
0,
new ListState(new KeyValuePair<string, object>("Key1", "Value1")),
null,
(s, e) => "OpenTelemetry!");
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.State);
Assert.NotNull(logRecord.StateValues);
Assert.Equal(1, logRecord.StateValues.Count);
Assert.Equal(new KeyValuePair<string, object>("Key1", "Value1"), logRecord.StateValues[0]);
}
finally
{
this.options.ParseStateValues = false;
}
}
[Fact]
public void ParseStateValuesUsingCustomTest()
{
// Tests unknown state parse path.
this.options.ParseStateValues = true;
try
{
CustomState state = new CustomState
{
Property = "Value",
};
this.logger.Log(
LogLevel.Information,
0,
state,
null,
(s, e) => "OpenTelemetry!");
var logRecord = this.exportedItems[0];
Assert.Null(logRecord.State);
Assert.NotNull(logRecord.StateValues);
Assert.Equal(1, logRecord.StateValues.Count);
KeyValuePair<string, object> actualState = logRecord.StateValues[0];
Assert.Equal(string.Empty, actualState.Key);
Assert.Same(state, actualState.Value);
}
finally
{
this.options.ParseStateValues = false;
}
}
public void Dispose()
{
this.loggerFactory?.Dispose();
}
internal struct Food
{
public string Name { get; set; }
public double Price { get; set; }
}
private struct StructState : IReadOnlyList<KeyValuePair<string, object>>
{
private readonly List<KeyValuePair<string, object>> list;
public StructState(params KeyValuePair<string, object>[] items)
{
this.list = new List<KeyValuePair<string, object>>(items);
}
public int Count => this.list.Count;
public KeyValuePair<string, object> this[int index] => this.list[index];
public IEnumerator<KeyValuePair<string, object>> GetEnumerator()
{
return this.list.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return this.list.GetEnumerator();
}
}
private class ListState : IEnumerable<KeyValuePair<string, object>>
{
private readonly List<KeyValuePair<string, object>> list;
public ListState(params KeyValuePair<string, object>[] items)
{
this.list = new List<KeyValuePair<string, object>>(items);
}
public IEnumerator<KeyValuePair<string, object>> GetEnumerator()
{
return this.list.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return this.list.GetEnumerator();
}
}
private class CustomState
{
public string Property { get; set; }
}
private class TestLogRecordProcessor : SimpleExportProcessor<LogRecord>
{
public TestLogRecordProcessor(BaseExporter<LogRecord> exporter)
: base(exporter)
{
}
public override void OnEnd(LogRecord data)
{
data.BufferLogScopes();
base.OnEnd(data);
}
}
}
}
#endif
| 1 | 20,832 | Maybe with this change we can remove `using OpenTelemetry.Logs`. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -164,9 +164,10 @@ func (h *Handler) handleAddProject(w http.ResponseWriter, r *http.Request) {
}
var (
- id = r.FormValue("ID")
- description = r.FormValue("Description")
- sharedSSOName = r.FormValue("SharedSSO")
+ id = r.FormValue("ID")
+ description = r.FormValue("Description")
+ sharedSSOName = r.FormValue("SharedSSO")
+ viewerRoleAsDefault = r.FormValue("ViewerRoleAsDefault") == "on"
)
if id == "" {
http.Error(w, "invalid id", http.StatusBadRequest) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package handler
import (
"context"
"fmt"
"html/template"
"net/http"
"strconv"
"time"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/insight/insightstore"
"github.com/pipe-cd/pipe/pkg/model"
)
var (
topPageTmpl = template.Must(template.New("Top").Parse(Templates["Top"]))
listProjectsTmpl = template.Must(template.New("ListProjects").Parse(Templates["ListProjects"]))
applicationCountsTmpl = template.Must(template.New("ApplicationCounts").Parse(Templates["ApplicationCounts"]))
addProjectTmpl = template.Must(template.New("AddProject").Parse(Templates["AddProject"]))
addedProjectTmpl = template.Must(template.New("AddedProject").Parse(Templates["AddedProject"]))
)
type projectStore interface {
AddProject(ctx context.Context, proj *model.Project) error
ListProjects(ctx context.Context, opts datastore.ListOptions) ([]model.Project, error)
}
type Handler struct {
port int
projectStore projectStore
insightStore insightstore.Store
sharedSSOConfigs []config.SharedSSOConfig
server *http.Server
gracePeriod time.Duration
logger *zap.Logger
}
func NewHandler(port int, ps projectStore, is insightstore.Store, sharedSSOConfigs []config.SharedSSOConfig, gracePeriod time.Duration, logger *zap.Logger) *Handler {
mux := http.NewServeMux()
h := &Handler{
projectStore: ps,
insightStore: is,
sharedSSOConfigs: sharedSSOConfigs,
server: &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
},
gracePeriod: gracePeriod,
logger: logger.Named("handler"),
}
mux.HandleFunc("/", h.handleTop)
mux.HandleFunc("/projects", h.handleListProjects)
mux.HandleFunc("/projects/add", h.handleAddProject)
mux.HandleFunc("/applicationcounts", h.handleApplicationCounts)
return h
}
func (h *Handler) Run(ctx context.Context) error {
doneCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
doneCh <- h.run()
}()
<-ctx.Done()
h.stop()
return <-doneCh
}
func (h *Handler) run() error {
h.logger.Info(fmt.Sprintf("handler server is running on %d", h.port))
if err := h.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
h.logger.Error("failed to listen and serve handler server", zap.Error(err))
return err
}
return nil
}
func (h *Handler) stop() error {
ctx, cancel := context.WithTimeout(context.Background(), h.gracePeriod)
defer cancel()
h.logger.Info("stopping handler server")
if err := h.server.Shutdown(ctx); err != nil {
h.logger.Error("failed to shutdown handler server", zap.Error(err))
return err
}
return nil
}
func (h *Handler) handleTop(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "not found", http.StatusNotFound)
return
}
if err := topPageTmpl.Execute(w, nil); err != nil {
h.logger.Error("failed to render Top page template", zap.Error(err))
}
}
func (h *Handler) handleListProjects(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "not found", http.StatusNotFound)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
projects, err := h.projectStore.ListProjects(ctx, datastore.ListOptions{})
if err != nil {
h.logger.Error("failed to retrieve the list of projects", zap.Error(err))
http.Error(w, "Unable to retrieve projects", http.StatusInternalServerError)
return
}
data := make([]map[string]string, 0, len(projects))
for i := range projects {
data = append(data, map[string]string{
"ID": projects[i].Id,
"Description": projects[i].Desc,
"StaticAdminDisabled": strconv.FormatBool(projects[i].StaticAdminDisabled),
"SharedSSOName": projects[i].SharedSsoName,
"CreatedAt": time.Unix(projects[i].CreatedAt, 0).String(),
})
}
if err := listProjectsTmpl.Execute(w, data); err != nil {
h.logger.Error("failed to render ListProjects page template", zap.Error(err))
}
}
func (h *Handler) handleAddProject(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodPost {
http.Error(w, "not found", http.StatusNotFound)
return
}
if r.Method == http.MethodGet {
if err := addProjectTmpl.Execute(w, nil); err != nil {
h.logger.Error("failed to render AddProject page template", zap.Error(err))
}
return
}
var (
id = r.FormValue("ID")
description = r.FormValue("Description")
sharedSSOName = r.FormValue("SharedSSO")
)
if id == "" {
http.Error(w, "invalid id", http.StatusBadRequest)
return
}
if sharedSSOName != "" {
found := false
for i := range h.sharedSSOConfigs {
if h.sharedSSOConfigs[i].Name == sharedSSOName {
found = true
break
}
}
if !found {
http.Error(w, fmt.Sprintf("SharedSSOConfig %q was not found in Control Plane configuration", sharedSSOName), http.StatusBadRequest)
return
}
}
var (
project = &model.Project{
Id: id,
Desc: description,
SharedSsoName: sharedSSOName,
}
username = model.GenerateRandomString(10)
password = model.GenerateRandomString(30)
)
if err := project.SetStaticAdmin(username, password); err != nil {
h.logger.Error("failed to set static admin",
zap.String("id", id),
zap.Error(err),
)
http.Error(w, fmt.Sprintf("Unable to add the project (%v)", err), http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := h.projectStore.AddProject(ctx, project); err != nil {
h.logger.Error("failed to add a new project",
zap.String("id", id),
zap.Error(err),
)
http.Error(w, fmt.Sprintf("Unable to add the project (%v)", err), http.StatusInternalServerError)
return
}
h.logger.Info("successfully added a new project", zap.String("id", id))
data := map[string]string{
"ID": id,
"Description": description,
"SharedSSOName": sharedSSOName,
"StaticAdminUsername": username,
"StaticAdminPassword": password,
}
if err := addedProjectTmpl.Execute(w, data); err != nil {
h.logger.Error("failed to render AddedProject page template", zap.Error(err))
}
}
func (h *Handler) handleApplicationCounts(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "not found", http.StatusNotFound)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
projects, err := h.projectStore.ListProjects(ctx, datastore.ListOptions{})
if err != nil {
h.logger.Error("failed to retrieve the list of projects", zap.Error(err))
http.Error(w, "Unable to retrieve projects", http.StatusInternalServerError)
return
}
data := make([]map[string]interface{}, 0, len(projects))
for i := range projects {
counts, err := h.insightStore.LoadApplicationCounts(ctx, projects[i].Id)
if err != nil {
data = append(data, map[string]interface{}{
"Project": projects[i].Id,
"Error": err.Error(),
})
continue
}
total, groups := groupApplicationCounts(counts.Counts)
data = append(data, map[string]interface{}{
"Project": projects[i].Id,
"Total": total,
"Counts": groups,
})
}
if err := applicationCountsTmpl.Execute(w, data); err != nil {
h.logger.Error("failed to render ApplicationCounts page template", zap.Error(err))
}
}
func groupApplicationCounts(counts []model.InsightApplicationCount) (total int, groups map[string]int) {
groups = make(map[string]int)
for _, c := range counts {
total += int(c.Count)
kind := c.Labels[model.InsightApplicationCountLabelKey_KIND.String()]
groups[kind] = groups[kind] + int(c.Count)
}
return
}
| 1 | 19,103 | Fix this name too. | pipe-cd-pipe | go |
@@ -2018,7 +2018,7 @@ class BibFormatObject(object):
# If record is given as parameter
self.xml_record = xml_record
self.record = create_record(xml_record)[0]
- recID = record_get_field_value(self.record, "001") or None
+ recID = int(record_get_field_value(self.record, "001")) or None
try:
assert isinstance(recID, (int, long, type(None))), 'Argument of wrong type!' | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Format a single record using specified format.
You can have a look at the various escaping modes available in
X{BibFormatObject} in function L{escape_field}
Still it is useful sometimes for debugging purpose to use the
L{BibFormatObject} class directly. For eg:
>>> from invenio.modules.formatter.engine import BibFormatObject
>>> bfo = BibFormatObject(102)
>>> bfo.field('245__a')
The order Rodentia in South America
>>> from invenio.modules.formatter.format_elements import bfe_title
>>> bfe_title.format_element(bfo)
The order Rodentia in South America
@see: bibformat.py, bibformat_utils.py
"""
import re
import sys
import os
import inspect
import traceback
import cgi
import types
from flask import has_app_context, current_app
from operator import itemgetter
from six import iteritems
from werkzeug.utils import cached_property
from invenio.base.globals import cfg
from invenio.config import \
CFG_SITE_LANG, \
CFG_BIBFORMAT_CACHED_FORMATS, \
CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS, \
CFG_BIBFORMAT_HIDDEN_TAGS
from invenio.ext.logging import \
register_exception
from invenio.legacy.bibrecord import \
create_record, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
record_xml_output, \
record_empty
from . import registry
from .engines import xslt
from .models import Format
from invenio.base.i18n import \
language_list_long, \
wash_language, \
gettext_set_language
import invenio.legacy.bibformat.dblayer as bibformat_dblayer
from .config import \
CFG_BIBFORMAT_TEMPLATES_DIR, \
CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION, \
CFG_BIBFORMAT_FORMAT_JINJA_TEMPLATE_EXTENSION, \
CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION, \
CFG_BIBFORMAT_OUTPUTS_PATH, \
InvenioBibFormatError
from invenio.modules.formatter.utils import \
record_get_xml, \
parse_tag
from invenio.utils.html import \
HTMLWasher, \
CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST, \
CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
from invenio.modules.knowledge.api import get_kbr_values
from invenio.ext.template import render_template_to_string
from HTMLParser import HTMLParseError
from invenio.modules.access.engine import acc_authorize_action
from invenio.modules.formatter.registry import template_context_functions
# Cache for data we have already read and parsed
format_templates_cache = {}
format_elements_cache = {}
format_outputs_cache = {}
html_field = '<!--HTML-->' # String indicating that field should be
# treated as HTML (and therefore no escaping of
# HTML tags should occur.
# Appears in some field values.
washer = HTMLWasher() # Used to remove dangerous tags from HTML
# sources
# Regular expression for finding <lang>...</lang> tag in format templates
pattern_lang = re.compile(r'''
<lang #<lang tag (no matter case)
\s* #any number of white spaces
> #closing <lang> start tag
(?P<langs>.*?) #anything but the next group (greedy)
(</lang\s*>) #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Builds regular expression for finding each known language in <lang> tags
ln_pattern_text = r"<("
for lang in language_list_long(enabled_langs_only=False):
ln_pattern_text += lang[0] +r"|"
ln_pattern_text = ln_pattern_text.rstrip(r"|")
ln_pattern_text += r")>(.*?)</\1>"
ln_pattern = re.compile(ln_pattern_text, re.IGNORECASE | re.DOTALL)
# Regular expression for finding text to be translated
TRANSLATION_PATTERN = re.compile(r'_\((?P<word>.*?)\)_',
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <name> tag in format templates
pattern_format_template_name = re.compile(r'''
<name #<name tag (no matter case)
\s* #any number of white spaces
> #closing <name> start tag
(?P<name>.*?) #name value. any char that is not end tag
(</name\s*>)(\n)? #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <description> tag in format templates
pattern_format_template_desc = re.compile(r'''
<description #<decription tag (no matter case)
\s* #any number of white spaces
> #closing <description> start tag
(?P<desc>.*?) #description value. any char that is not end tag
</description\s*>(\n)? #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <BFE_ > tags in format templates
pattern_tag = re.compile(r'''
<BFE_ #every special tag starts with <BFE_ (no matter case)
(?P<function_name>[^/\s]+) #any char but a space or slash
\s* #any number of spaces
(?P<params>(\s* #params here
(?P<param>([^=\s])*)\s* #param name: any chars that is not a white space or equality. Followed by space(s)
=\s* #equality: = followed by any number of spaces
(?P<sep>[\'"]) #one of the separators
(?P<value>.*?) #param value: any chars that is not a separator like previous one
(?P=sep) #same separator as starting one
)*) #many params
\s* #any number of spaces
(/)?> #end of the tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding params inside <BFE_ > tags in format templates
pattern_function_params = re.compile(r'''
(?P<param>([^=\s])*)\s* # Param name: any chars that is not a white space or equality. Followed by space(s)
=\s* # Equality: = followed by any number of spaces
(?P<sep>[\'"]) # One of the separators
(?P<value>.*?) # Param value: any chars that is not a separator like previous one
(?P=sep) # Same separator as starting one
''', re.VERBOSE | re.DOTALL)
# Regular expression for finding format elements "params" attributes
# (defined by @param)
pattern_format_element_params = re.compile(r'''
@param\s* # Begins with AT param keyword followed by space(s)
(?P<name>[^\s=]*):\s* # A single keyword and comma, then space(s)
#(=\s*(?P<sep>[\'"]) # Equality, space(s) and then one of the separators
#(?P<default>.*?) # Default value: any chars that is not a separator like previous one
#(?P=sep) # Same separator as starting one
#)?\s* # Default value for param is optional. Followed by space(s)
(?P<desc>.*) # Any text that is not end of line (thanks to MULTILINE parameter)
''', re.VERBOSE | re.MULTILINE)
# Regular expression for finding format elements "see also" attribute
# (defined by @see)
pattern_format_element_seealso = re.compile(r'''@see:\s*(?P<see>.*)''',
re.VERBOSE | re.MULTILINE)
#Regular expression for finding 2 expressions in quotes, separated by
#comma (as in template("1st","2nd") )
#Used when parsing output formats
## pattern_parse_tuple_in_quotes = re.compile('''
## (?P<sep1>[\'"])
## (?P<val1>.*)
## (?P=sep1)
## \s*,\s*
## (?P<sep2>[\'"])
## (?P<val2>.*)
## (?P=sep2)
## ''', re.VERBOSE | re.MULTILINE)
sub_non_alnum = re.compile('[^0-9a-zA-Z]+')
fix_tag_name = lambda s: sub_non_alnum.sub('_', s.lower())
from invenio.utils.memoise import memoize
class LazyTemplateContextFunctionsCache(object):
"""Loads bibformat elements using plugin builder and caches results."""
@cached_property
def template_context_functions(self):
"""Returns template context functions"""
modules = template_context_functions
elem = {}
for m in modules:
register_func = getattr(m, 'template_context_function', None)
if register_func and isinstance(register_func, types.FunctionType):
elem[m.__name__.split('.')[-1]] = register_func
return elem
@memoize
def bibformat_elements(self, modules=None):
"""Returns bibformat elements."""
if modules is None:
modules = registry.format_elements
elem = {}
for m in modules:
if m is None:
continue
name = m.__name__.split('.')[-1]
if name in elem:
continue
filename = m.__file__[:-1] if m.__file__.endswith('.pyc') \
else m.__file__
register_func = getattr(m, 'format_element',
getattr(m, 'format', None))
escape_values = getattr(m, 'escape_values', None)
if register_func and isinstance(register_func, types.FunctionType):
register_func._escape_values = escape_values
register_func.__file__ = filename
elem[name] = register_func
return elem
@cached_property
def functions(self):
def insert(name):
def _bfe_element(bfo, **kwargs):
# convert to utf-8 for legacy app
kwargs = dict((k, v.encode('utf-8') if isinstance(v, unicode) else v)
for k, v in iteritems(kwargs))
format_element = get_format_element(name)
(out, dummy) = eval_format_element(format_element,
bfo,
kwargs)
# returns unicode for jinja2
return out.decode('utf-8')
return _bfe_element
# Old bibformat templates
tfn_from_files = dict((name.lower(), insert(name.lower()))
for name in self.bibformat_elements().keys())
# Update with new template context functions
tfn_from_files.update(self.template_context_functions)
bfe_from_tags = {}
if has_app_context():
from invenio.ext.sqlalchemy import db
from invenio.modules.search.models import Tag
# get functions from tag table
bfe_from_tags = dict(('bfe_'+fix_tag_name(name),
insert(fix_tag_name(name)))
for name in map(itemgetter(0),
db.session.query(Tag.name).all()))
# overwrite functions from tag table with functions from files
bfe_from_tags.update(tfn_from_files)
return bfe_from_tags
TEMPLATE_CONTEXT_FUNCTIONS_CACHE = LazyTemplateContextFunctionsCache()
def get_format_element_path(filename):
if filename.endswith('.py'):
filename = filename[:-3]
return TEMPLATE_CONTEXT_FUNCTIONS_CACHE.bibformat_elements()[filename].__file__
def format_record(recID, of, ln=CFG_SITE_LANG, verbose=0,
search_pattern=None, xml_record=None, user_info=None, qid="",
**kwargs):
"""
Formats a record given output format. Main entry function of
bibformat engine.
Returns a formatted version of the record in the specified
language, search pattern, and with the specified output format.
The function will define which format template must be applied.
You can either specify an record ID to format, or give its xml
representation. if 'xml_record' is not None, then use it instead
of recID.
'user_info' allows to grant access to some functionalities on a
page depending on the user's priviledges. 'user_info' is the same
object as the one returned by 'webuser.collect_user_info(req)'
:param recID: the ID of record to format
:param of: an output format code (or short identifier for the output format)
:param ln: the language to use to format the record
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings, stop if error in format elements
9: errors and warnings, stop if error (debug mode ))
:param search_pattern: list of strings representing the user request in web interface
:param xml_record: an xml string representing the record to format
:param user_info: the information of the user who will view the formatted page
@return: formatted record
"""
if search_pattern is None:
search_pattern = []
out = ""
ln = wash_language(ln)
_ = gettext_set_language(ln)
# Temporary workflow (during migration of formats):
# Call new BibFormat
# But if format not found for new BibFormat, then call old BibFormat
#Create a BibFormat Object to pass that contain record and context
bfo = BibFormatObject(recID, ln, search_pattern, xml_record, user_info, of)
if of.lower() != 'xm' and (not bfo.get_record()
or record_empty(bfo.get_record())):
# Record only has recid: do not format, excepted
# for xm format
return "", False
#Find out which format template to use based on record and output format.
template = decide_format_template(bfo, of)
if verbose == 9 and template is not None:
out += """\n<br/><span class="quicknote">
Using %s template for record %i.
</span>""" % (template, recID)
path = registry.format_templates_lookup.get(template)
if template is None or not (
template.endswith("." + CFG_BIBFORMAT_FORMAT_JINJA_TEMPLATE_EXTENSION)
or path is None or os.access(path, os.R_OK)
):
# template not found in new BibFormat. Call old one
if verbose == 9:
if template is None:
out += """\n<br/><span class="quicknote">
No template found for output format %s and record %i.
(Check invenio.err log file for more details)
</span>""" % (of, recID)
else:
out += """\n<br/><span class="quicknote">
Template %s could not be read.
</span>""" % (template)
try:
raise InvenioBibFormatError(_('No template could be found for output format %(code)s.', code=of))
except InvenioBibFormatError as exc:
register_exception(req=bfo.req)
if verbose > 5:
out += """\n<br/><span class="quicknote">
%s
</span>""" % str(exc)
return out, False
# Format with template
out_, needs_2nd_pass = format_with_format_template(
template, bfo, verbose=verbose, extra_context=kwargs)
out += out_
return out, needs_2nd_pass
def format_record_1st_pass(recID, of, ln=CFG_SITE_LANG, verbose=0,
search_pattern=None, xml_record=None,
user_info=None, on_the_fly=False,
save_missing=True, **kwargs):
"""
Format a record in given output format.
Return a formatted version of the record in the specified
language, search pattern, and with the specified output format.
The function will define which format template must be applied.
The record to be formatted can be specified with its ID (with
'recID' parameter) or given as XML representation (with
'xml_record' parameter). If 'xml_record' is specified 'recID' is
ignored (but should still be given for reference. A dummy recid 0
or -1 could be used).
'user_info' allows to grant access to some functionalities on a
page depending on the user's priviledges. The 'user_info' object
makes sense only in the case of on-the-fly formatting. 'user_info'
is the same object as the one returned by
'webuser.collect_user_info(req)'
:param recID: the ID of record to format.
@type recID: int
:param of: an output format code (or short identifier for the output format)
@type of: string
:param ln: the language to use to format the record
@type ln: string
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings, stop if error in format elements
9: errors and warnings, stop if error (debug mode ))
@type verbose: int
:param search_pattern: list of strings representing the user request in web interface
@type search_pattern: list(string)
:param xml_record: an xml string represention of the record to format
@type xml_record: string or None
:param user_info: the information of the user who will view the formatted page (if applicable)
:param on_the_fly: if False, try to return an already preformatted version of the record in the database
@type on_the_fly: boolean
@return: formatted record
@rtype: string
"""
from invenio.legacy.search_engine import record_exists
if search_pattern is None:
search_pattern = []
out = ""
if verbose == 9:
out += """\n<span class="quicknote">
Formatting record %i with output format %s.
</span>""" % (recID, of)
if not on_the_fly and \
(ln == CFG_SITE_LANG or
of.lower() == 'xm' or
(of.lower() in CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS)) and \
record_exists(recID) != -1:
# Try to fetch preformatted record. Only possible for records
# formatted in CFG_SITE_LANG language (other are never
# stored), or of='xm' which does not depend on language.
# Exceptions are made for output formats defined in
# CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS, which are
# always served from the same cache for any language. Also,
# do not fetch from DB when record has been deleted: we want
# to return an "empty" record in that case
res, needs_2nd_pass = bibformat_dblayer.get_preformatted_record(recID, of)
if res is not None:
# record 'recID' is formatted in 'of', so return it
if verbose == 9:
last_updated = bibformat_dblayer.get_preformatted_record_date(recID, of)
out += """\n<br/><span class="quicknote">
Found preformatted output for record %i (cache updated on %s).
</span><br/>""" % (recID, last_updated)
if of.lower() == 'xm':
res = filter_hidden_fields(res, user_info)
# try to replace language links in pre-cached res, if applicable:
if ln != CFG_SITE_LANG and of.lower() in CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS:
# The following statements try to quickly replace any
# language arguments in URL links. Not an exact
# science, but should work most of the time for most
# of the formats, with not too many false positives.
# We don't have time to parse output much here.
res = res.replace('?ln=' + CFG_SITE_LANG, '?ln=' + ln)
res = res.replace('&ln=' + CFG_SITE_LANG, '&ln=' + ln)
res = res.replace('&ln=' + CFG_SITE_LANG, '&ln=' + ln)
out += res
return out, needs_2nd_pass
else:
if verbose == 9:
out += """\n<br/><span class="quicknote">
No preformatted output found for record %s.
</span>"""% recID
# Live formatting of records in all other cases
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i on-the-fly.
</span>""" % recID
try:
out_, needs_2nd_pass = format_record(recID=recID,
of=of,
ln=ln,
verbose=verbose,
search_pattern=search_pattern,
xml_record=xml_record,
user_info=user_info,
**kwargs)
out += out_
if of.lower() in ('xm', 'xoaimarc'):
out = filter_hidden_fields(out, user_info, force_filtering=of.lower()=='xoaimarc')
# We have spent time computing this format
# We want to save this effort if the format is cached
if save_missing and recID and ln == cfg['CFG_SITE_LANG'] \
and of.lower() in cfg['CFG_BIBFORMAT_CACHED_FORMATS'] \
and verbose == 0:
bibformat_dblayer.save_preformatted_record(recID,
of,
out,
needs_2nd_pass)
return out, needs_2nd_pass
except Exception, e:
register_exception(prefix="An error occured while formatting record %s in %s" %
(recID, of),
alert_admin=True)
#Failsafe execution mode
import invenio.legacy.template
websearch_templates = invenio.legacy.template.load('websearch')
if verbose == 9:
out += """\n<br/><span class="quicknote">
An error occured while formatting record %s. (%s)
</span>""" % (recID, str(e))
if of.lower() == 'hd':
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i with websearch_templates.tmpl_print_record_detailed.
</span><br/>""" % recID
return out + websearch_templates.tmpl_print_record_detailed(
ln=ln,
recID=recID,
)
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i with websearch_templates.tmpl_print_record_brief.
</span><br/>""" % recID
return out + websearch_templates.tmpl_print_record_brief(ln=ln,
recID=recID,
), False
def format_record_2nd_pass(recID, template, ln=CFG_SITE_LANG,
search_pattern=None, xml_record=None,
user_info=None, of=None, verbose=0, **kwargs):
# Create light bfo object
bfo = BibFormatObject(recID, ln, search_pattern, xml_record, user_info, of)
# Translations
template = translate_template(template, ln)
# Format template
r, dummy = format_with_format_template(format_template_filename=None,
format_template_code=template,
bfo=bfo,
verbose=verbose,
extra_context=kwargs)
return r
def decide_format_template(bfo, of):
"""
Returns the format template name that should be used for formatting
given output format and L{BibFormatObject}.
Look at of rules, and take the first matching one.
If no rule matches, returns None
To match we ignore lettercase and spaces before and after value of
rule and value of record
:param bfo: a L{BibFormatObject}
:param of: the code of the output format to use
@return: name of a format template
"""
output_format = get_output_format(of)
for rule in output_format['rules']:
if rule['field'].startswith('00'):
# Rule uses controlfield
values = [bfo.control_field(rule['field']).strip()] #Remove spaces
else:
# Rule uses datafield
values = bfo.fields(rule['field'])
# loop over multiple occurences, but take the first match
if len(values) > 0:
for value in values:
value = value.strip() #Remove spaces
pattern = rule['value'].strip() #Remove spaces
match_obj = re.match(pattern, value, re.IGNORECASE)
if match_obj is not None and \
match_obj.end() == len(value):
return rule['template']
template = output_format['default']
if template != '':
return template
else:
return None
def translate_template(template, ln=CFG_SITE_LANG):
_ = gettext_set_language(ln)
def translate(match):
"""
Translate matching values
"""
word = match.group("word")
translated_word = _(word)
return translated_word
filtered_template = filter_languages(template, ln)
evaluated_format = TRANSLATION_PATTERN.sub(translate, filtered_template)
return evaluated_format
def format_with_format_template(format_template_filename, bfo,
verbose=0, format_template_code=None, qid="",
extra_context=None):
""" Format a record given a
format template.
Returns a formatted version of the record represented by bfo,
in the language specified in bfo, and with the specified format template.
If format_template_code is provided, the template will not be loaded from
format_template_filename (but format_template_filename will still be used to
determine if bft or xsl transformation applies). This allows to preview format
code without having to save file on disk.
:param format_template_filename: the dilename of a format template
:param bfo: the object containing parameters for the current formatting
:param format_template_code: if not empty, use code as template instead of reading format_template_filename (used for previews)
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: formatted text
"""
if format_template_code is not None:
format_content = str(format_template_code)
elif not format_template_filename.endswith("." + CFG_BIBFORMAT_FORMAT_JINJA_TEMPLATE_EXTENSION):
format_content = get_format_template(format_template_filename)['code']
if format_template_filename is None or \
format_template_filename.endswith("." + CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION):
# .bft
evaluated_format, needs_2nd_pass = eval_format_template_elements(
format_content,
bfo,
verbose)
if not needs_2nd_pass:
evaluated_format = translate_template(evaluated_format, bfo.lang)
elif format_template_filename.endswith("." + CFG_BIBFORMAT_FORMAT_JINJA_TEMPLATE_EXTENSION):
evaluated_format = '<!-- empty -->'
#try:
from functools import wraps
from invenio.modules.records.api import \
create_record as new_create_record, \
get_record as new_get_record
from flask.ext.login import current_user
from invenio.base.helpers import unicodifier
def _format_record(recid, of='hb', user_info=current_user, *args, **kwargs):
from invenio.modules.formatter import format_record
return format_record(recid, of, user_info=user_info, *args, **kwargs)
# Fixes unicode problems in Jinja2 templates.
def encode_utf8(f):
@wraps(f)
def wrapper(*args, **kwds):
return unicodifier(f(*args, **kwds))
return wrapper
if bfo.xml_record is None:
record = new_get_record(bfo.recID)
else:
record = new_create_record(bfo.xml_record, master_format='marc')
bfo.recID = bfo.recID if bfo.recID else 0
record.__getitem__ = encode_utf8(record.__getitem__)
record.get = encode_utf8(record.get)
ctx = TEMPLATE_CONTEXT_FUNCTIONS_CACHE.functions
if extra_context is not None:
ctx.update(extra_context)
evaluated_format = render_template_to_string(
'format/record/'+format_template_filename,
recid=bfo.recID,
record=record,
format_record=_format_record,
qid=qid,
bfo=bfo, **ctx).encode('utf-8')
needs_2nd_pass = False
else:
#.xsl
if bfo.xml_record:
# bfo was initialized with a custom MARCXML
xml_record = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
record_xml_output(bfo.record)
else:
# Fetch MARCXML. On-the-fly xm if we are now formatting in xm
xml_record = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
record_get_xml(bfo.recID, 'xm', on_the_fly=False)
# Transform MARCXML using stylesheet
evaluated_format = xslt.format(xml_record, template_source=format_content).decode('utf-8')
needs_2nd_pass = False
return evaluated_format, needs_2nd_pass
def eval_format_template_elements(format_template, bfo, verbose=0):
"""
Evalutes the format elements of the given template and replace each element with its value.
Prepare the format template content so that we can directly replace the marc code by their value.
This implies:
1. Look for special tags
2. replace special tags by their evaluation
:param format_template: the format template code
:param bfo: the object containing parameters for the current formatting
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors, 7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: tuple (result, errors)
"""
_ = gettext_set_language(bfo.lang)
status = {'no_cache': False}
# First define insert_element_code(match), used in re.sub() function
def insert_element_code(match):
"""
Analyses 'match', interpret the corresponding code, and return the result of the evaluation.
Called by substitution in 'eval_format_template_elements(...)'
:param match: a match object corresponding to the special tag that must be interpreted
"""
function_name = match.group("function_name")
# Ignore lang tags the processing is done outside
if function_name == 'lang':
return match.group(0)
try:
format_element = get_format_element(function_name, verbose)
except Exception as e:
register_exception(req=bfo.req)
format_element = None
if verbose >= 5:
return '<b><span style="color: rgb(255, 0, 0);">' + \
cgi.escape(str(e)).replace('\n', '<br/>') + \
'</span>'
if format_element is None:
try:
raise InvenioBibFormatError(
_('Could not find format element named %(function_name)s.',
function_name=function_name))
except InvenioBibFormatError as exc:
register_exception(req=bfo.req)
if verbose >= 5:
return '<b><span style="color: rgb(255, 0, 0);">' + \
str(exc.message)+'</span></b>'
else:
params = {}
# Look for function parameters given in format template code
all_params = match.group('params')
if all_params is not None:
function_params_iterator = pattern_function_params.finditer(all_params)
for param_match in function_params_iterator:
name = param_match.group('param')
value = param_match.group('value')
params[name] = value
if params.get('no_cache') == '1':
result = match.group("function_name")
del params['no_cache']
if params:
params_str = ' '.join('%s="%s"' % (k, v) for k, v in params.iteritems())
result = "<bfe_%s %s />" % (result, params_str)
else:
result = "<bfe_%s />" % result
status['no_cache'] = True
else:
# Evaluate element with params and return (Do not return errors)
result, dummy = eval_format_element(format_element,
bfo,
params,
verbose)
return result
# Substitute special tags in the format by our own text.
# Special tags have the form <BNE_format_element_name [param="value"]* />
fmt = pattern_tag.sub(insert_element_code, format_template)
return fmt, status['no_cache']
def eval_format_element(format_element, bfo, parameters=None, verbose=0):
"""
Returns the result of the evaluation of the given format element
name, with given L{BibFormatObject} and parameters. Also returns
the errors of the evaluation.
:param format_element: a format element structure as returned by get_format_element
:param bfo: a L{BibFormatObject} used for formatting
:param parameters: a dict of parameters to be used for formatting. Key is parameter and value is value of parameter
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: tuple (result, errors)
"""
if parameters is None:
parameters = {}
errors = []
#Load special values given as parameters
prefix = parameters.get('prefix', "")
suffix = parameters.get('suffix', "")
default_value = parameters.get('default', "")
escape = parameters.get('escape', "")
output_text = ''
_ = gettext_set_language(bfo.lang)
# 3 possible cases:
# a) format element file is found: we execute it
# b) format element file is not found, but exist in tag table (e.g. bfe_isbn)
# c) format element is totally unknown. Do nothing or report error
if format_element is not None and format_element['type'] == "python":
# a) We found an element with the tag name, of type "python"
# Prepare a dict 'params' to pass as parameter to 'format'
# function of element
params = {}
# Look for parameters defined in format element
# Fill them with specified default values and values
# given as parameters.
# Also remember if the element overrides the 'escape'
# parameter
format_element_overrides_escape = False
for param in format_element['attrs']['params']:
name = param['name']
default = param['default']
params[name] = parameters.get(name, default)
if name == 'escape':
format_element_overrides_escape = True
# Add BibFormatObject
params['bfo'] = bfo
# Execute function with given parameters and return result.
function = format_element['code']
_ = gettext_set_language(bfo.lang)
try:
output_text = function(**params)
except Exception as e:
register_exception(req=bfo.req)
name = format_element['attrs']['name']
try:
raise InvenioBibFormatError(
_('Error when evaluating format element %(x_name)s '
'with parameters %(x_params)s.',
x_name=name, x_params=str(params)))
except InvenioBibFormatError, exc:
errors.append(exc.message)
# Log exception
message = _(
"Error when evaluating format element %(format_element)s with"
" parameters %(parameters)s.",
format_element=name,
parameters=str(params)
)
current_app.logger.exception(
message
)
errors.append(message)
# In debug mode - include traceback in output
if current_app.debug:
tb = sys.exc_info()[2]
stack = traceback.format_exception(
Exception, e, tb, limit=None
)
output_text = '<span class="well"><pre style="color:red;">' \
'%s\n\n%s</pre></span>' % (message, "".join(stack))
# None can be returned when evaluating function
if output_text is None:
output_text = ""
else:
try:
output_text = str(output_text)
except:
output_text = output_text.encode('utf-8')
# Escaping:
# (1) By default, everything is escaped in mode 1
# (2) If evaluated element has 'escape_values()' function, use
# its returned value as escape mode, and override (1)
# (3) If template has a defined parameter 'escape' (in allowed
# values), use it, and override (1) and (2). If this
# 'escape' parameter is overriden by the format element
# (defined in the 'format' function of the element), leave
# the escaping job to this element
# (1)
escape_mode = 1
# (2)
escape_function = format_element['escape_function']
if escape_function is not None:
try:
escape_mode = escape_function(bfo=bfo)
except Exception as e:
try:
raise InvenioBibFormatError(_('Escape mode for format element %(x_name)s could not be retrieved. Using default mode instead.', x_name=name))
except InvenioBibFormatError as exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose >= 5:
tb = sys.exc_info()[2]
output_text += '<b><span style="color: rgb(255, 0, 0);">'+ \
str(exc.message) +'</span></b> '
# (3)
if escape in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
escape_mode = int(escape)
# If escape is equal to 1, then escape all
# HTML reserved chars.
if escape_mode > 0 and not format_element_overrides_escape:
output_text = escape_field(output_text, mode=escape_mode)
# Add prefix and suffix if they have been given as parameters and if
# the evaluation of element is not empty
if output_text.strip() != "":
output_text = prefix + output_text + suffix
# Add the default value if output_text is empty
if output_text == "":
output_text = default_value
return output_text, errors
elif format_element is not None and format_element['type'] == "field":
# b) We have not found an element in files that has the tag
# name. Then look for it in the table "tag"
#
# <BFE_LABEL_IN_TAG prefix = "" suffix = "" separator = ""
# nbMax="" escape="0"/>
#
# Load special values given as parameters
separator = parameters.get('separator ', "")
nbMax = parameters.get('nbMax', "")
escape = parameters.get('escape', "1") # By default, escape here
# Get the fields tags that have to be printed
tags = format_element['attrs']['tags']
output_text = []
# Get values corresponding to tags
for tag in tags:
p_tag = parse_tag(tag)
values = record_get_field_values(bfo.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if len(values)>0 and isinstance(values[0], dict):
#flatten dict to its values only
values_list = [x.values() for x in values]
#output_text.extend(values)
for values in values_list:
output_text.extend(values)
else:
output_text.extend(values)
if nbMax != "":
try:
nbMax = int(nbMax)
except ValueError:
name = format_element['attrs']['name']
try:
raise InvenioBibFormatError(_('"nbMax" parameter for %(x_param)s must be an "int".', x_param=name))
except InvenioBibFormatError as exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose >= 5:
output_text = output_text.append(exc.message)
else:
output_text = output_text[:nbMax]
# Add prefix and suffix if they have been given as parameters and if
# the evaluation of element is not empty.
# If evaluation is empty string, return default value if it exists.
# Else return empty string
if ("".join(output_text)).strip() != "":
# If escape is equal to 1, then escape all
# HTML reserved chars.
if escape == '1':
output_text = cgi.escape(separator.join(output_text))
else:
output_text = separator.join(output_text)
output_text = prefix + output_text + suffix
else:
#Return default value
output_text = default_value
return (output_text, errors)
else:
# c) Element is unknown
try:
raise InvenioBibFormatError(_('Could not find format element named %(format_element)s.', format_element=format_element))
except InvenioBibFormatError as exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose < 5:
return ("", errors)
elif verbose >= 5:
if verbose >= 9:
sys.exit(exc.message)
return ('<b><span style="color: rgb(255, 0, 0);">' +
str(exc.message)+'</span></b>', errors)
def filter_languages(format_template, ln=CFG_SITE_LANG):
"""
Filters the language tags that do not correspond to the specified language.
:param format_template: the format template code
:param ln: the language that is NOT filtered out from the template
@return: the format template with unnecessary languages filtered out
"""
# First define search_lang_tag(match) and clean_language_tag(match), used
# in re.sub() function
def search_lang_tag(match):
"""
Searches for the <lang>...</lang> tag and remove inner localized tags
such as <en>, <fr>, that are not current_lang.
If current_lang cannot be found inside <lang> ... </lang>, try to use 'CFG_SITE_LANG'
:param match: a match object corresponding to the special tag that must be interpreted
"""
current_lang = ln
def clean_language_tag(match):
"""
Return tag text content if tag language of match is output language.
Called by substitution in 'filter_languages(...)'
:param match: a match object corresponding to the special tag that must be interpreted
"""
if match.group(1) == current_lang:
return match.group(2)
else:
return ""
# End of clean_language_tag
lang_tag_content = match.group("langs")
# Try to find tag with current lang. If it does not exists,
# then current_lang becomes CFG_SITE_LANG until the end of this
# replace
pattern_current_lang = re.compile(r"<(" + current_lang +
r")\s*>(.*)(</" + current_lang + r"\s*>)", re.IGNORECASE | re.DOTALL)
if re.search(pattern_current_lang, lang_tag_content) is None:
current_lang = CFG_SITE_LANG
cleaned_lang_tag = ln_pattern.sub(clean_language_tag, lang_tag_content)
return cleaned_lang_tag.strip()
# End of search_lang_tag
filtered_format_template = pattern_lang.sub(search_lang_tag, format_template)
return filtered_format_template
def get_format_template(filename, with_attributes=False):
"""
Returns the structured content of the given formate template.
if 'with_attributes' is true, returns the name and description. Else 'attrs' is not
returned as key in dictionary (it might, if it has already been loaded previously)::
{'code':"<b>Some template code</b>"
'attrs': {'name': "a name", 'description': "a description"}
}
:param filename: the filename of an format template
:param with_attributes: if True, fetch the attributes (names and description) for format'
@return: strucured content of format template
"""
_ = gettext_set_language(CFG_SITE_LANG)
if not filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION) and \
not filename.endswith(".xsl"):
return None
# Get from cache whenever possible
if filename in format_templates_cache:
# If we must return with attributes and template exist in
# cache with attributes then return cache.
# Else reload with attributes
if with_attributes and \
'attrs' in format_templates_cache[filename]:
return format_templates_cache[filename]
format_template = {'code': ""}
try:
path = registry.format_templates_lookup[filename]
format_file = open(path)
format_content = format_file.read()
format_file.close()
# Load format template code
# Remove name and description
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION):
code_and_description = pattern_format_template_name.sub("",
format_content, 1)
code = pattern_format_template_desc.sub("", code_and_description, 1)
else:
code = format_content
format_template['code'] = code
except:
register_exception()
# Save attributes if necessary
if with_attributes:
format_template['attrs'] = get_format_template_attrs(filename)
# Cache and return
format_templates_cache[filename] = format_template
return format_template
def get_format_templates(with_attributes=False):
"""
Returns the list of all format templates, as dictionary with filenames as keys
if 'with_attributes' is true, returns the name and description. Else 'attrs' is not
returned as key in each dictionary (it might, if it has already been loaded previously)::
[{'code':"<b>Some template code</b>"
'attrs': {'name': "a name", 'description': "a description"}
},
...
}
:param with_attributes: if True, fetch the attributes (names and description) for formats
@return: the list of format templates (with code and info)
"""
format_templates = {}
for filename in registry.format_templates:
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION) or \
filename.endswith(".xsl"):
filename = os.path.basename(filename)
format_templates[filename] = get_format_template(filename,
with_attributes)
return format_templates
def get_format_template_attrs(filename):
"""
Returns the attributes of the format template with given filename
The attributes are {'name', 'description'}
Caution: the function does not check that path exists or
that the format element is valid.
:param filename: the name of a format template
@return: a structure with detailed information about given format template
"""
_ = gettext_set_language(CFG_SITE_LANG)
attrs = {}
attrs['name'] = ""
attrs['description'] = ""
try:
template_file = open(registry.format_templates_lookup[filename])
code = template_file.read()
template_file.close()
match = None
if filename.endswith(".xsl"):
# .xsl
attrs['name'] = filename[:-4]
else:
# .bft
match = pattern_format_template_name.search(code)
if match is not None:
attrs['name'] = match.group('name')
else:
attrs['name'] = filename
match = pattern_format_template_desc.search(code)
if match is not None:
attrs['description'] = match.group('desc').rstrip('.')
except Exception as e:
try:
raise InvenioBibFormatError(_('Could not read format template named %(filename)s. %(error)s.', filename=filename, error=str(e)))
except InvenioBibFormatError:
register_exception()
attrs['name'] = filename
return attrs
def get_format_element(element_name, verbose=0, with_built_in_params=False,
soft_fail=False):
"""
Returns the format element structured content.
Return None if element cannot be loaded (file not found, not readable or
invalid)
The returned structure is::
{'attrs': {some attributes in dict. See get_format_element_attrs_from_*}
'code': the_function_code,
'type':"field" or "python" depending if element is defined in file or table,
'escape_function': the function to call to know if element output must be escaped}
:param element_name: the name of the format element to load
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
:param with_built_in_params: if True, load the parameters built in all elements
@return: a dictionary with format element attributes
"""
_ = gettext_set_language(CFG_SITE_LANG)
# Resolve filename and prepare 'name' as key for the cache
filename = resolve_format_element_filename(element_name)
if filename is not None:
name = filename.upper()
else:
name = element_name.upper()
if name in format_elements_cache:
element = format_elements_cache[name]
if not with_built_in_params or \
(with_built_in_params and
'builtin_params' in element['attrs']):
return element
if filename is None:
# Element is maybe in tag table
if bibformat_dblayer.tag_exists_for_name(element_name):
format_element = {'attrs': get_format_element_attrs_from_table(
element_name,
with_built_in_params),
'code': None,
'escape_function': None,
'type': "field"}
# Cache and returns
format_elements_cache[name] = format_element
return format_element
elif soft_fail:
register_exception()
return None
else:
raise InvenioBibFormatError(
_('Format element %(x_element_name)s could not be found.',
x_element_name=element_name))
else:
format_element = {}
module_name = filename
if module_name.endswith(".py"):
module_name = module_name[:-3]
# Load function 'format_element()' inside element
try:
function_format = TEMPLATE_CONTEXT_FUNCTIONS_CACHE.\
bibformat_elements()[module_name]
format_element['code'] = function_format
except:
if soft_fail:
register_exception()
return None
else:
raise
# Load function 'escape_values()' inside element
format_element['escape_function'] = function_format._escape_values
# Prepare, cache and return
format_element['attrs'] = get_format_element_attrs_from_function(
function_format,
element_name,
with_built_in_params)
format_element['type'] = "python"
format_elements_cache[name] = format_element
return format_element
def get_format_elements(with_built_in_params=False):
"""
Returns the list of format elements attributes as dictionary structure
Elements declared in files have priority over element declared in 'tag' table
The returned object has this format::
{element_name1: {'attrs': {'description':..., 'seealso':...
'params':[{'name':..., 'default':..., 'description':...}, ...]
'builtin_params':[{'name':..., 'default':..., 'description':...}, ...]
},
'code': code_of_the_element
},
element_name2: {...},
...}
Returns only elements that could be loaded (not error in code)
@return: a dict of format elements with name as key, and a dict as attributes
:param with_built_in_params: if True, load the parameters built in all elements
"""
format_elements = {}
mappings = bibformat_dblayer.get_all_name_tag_mappings()
for name in mappings:
format_elements[name.upper().replace(" ", "_").strip()] = get_format_element(name, with_built_in_params=with_built_in_params)
for module in registry.format_elements:
filename = os.path.basename(module.__file__)
filename_test = filename.upper().replace(" ", "_")
if filename_test.endswith(".PYC"):
filename_test = filename_test[:-1]
if filename_test.endswith(".PY") and not filename.upper().startswith("__INIT__.PY"):
if filename_test.startswith("BFE_"):
filename_test = filename_test[4:]
element_name = filename_test[:-3]
element = get_format_element(element_name,
with_built_in_params=with_built_in_params,
soft_fail=True)
if element is not None:
format_elements[element_name] = element
return format_elements
def get_format_element_attrs_from_function(function, element_name,
with_built_in_params=False):
"""
Returns the attributes of the function given as parameter.
It looks for standard parameters of the function, default
values and comments in the docstring.
The attributes are::
{'name' : "name of element" #basically the name of 'name' parameter
'description': "a string description of the element",
'seealso' : ["element_1.py", "element_2.py", ...] #a list of related elements
'params': [{'name':"param_name", #a list of parameters for this element (except 'bfo')
'default':"default value",
'description': "a description"}, ...],
'builtin_params': {name: {'name':"param_name",#the parameters builtin for all elem of this kind
'default':"default value",
'description': "a description"}, ...},
}
:param function: the formatting function of a format element
:param element_name: the name of the element
:param with_built_in_params: if True, load the parameters built in all elements
@return: a structure with detailed information of a function
"""
attrs = {}
attrs['description'] = ""
attrs['name'] = element_name.replace(" ", "_").upper()
attrs['seealso'] = []
docstring = function.__doc__
if isinstance(docstring, str):
# Look for function description in docstring
#match = pattern_format_element_desc.search(docstring)
description = docstring.split("@param")[0]
description = description.split("@see:")[0]
attrs['description'] = description.strip().rstrip('.')
# Look for @see: in docstring
match = pattern_format_element_seealso.search(docstring)
if match is not None:
elements = match.group('see').rstrip('.').split(",")
for element in elements:
attrs['seealso'].append(element.strip())
params = {}
# Look for parameters in function definition
args, dummy_varargs, dummy_varkw, defaults = inspect.getargspec(function)
# Prepare args and defaults_list such that we can have a mapping
# from args to defaults
args.reverse()
if defaults is not None:
defaults_list = list(defaults)
defaults_list.reverse()
else:
defaults_list = []
for arg, default in map(None, args, defaults_list):
if arg == "bfo":
#Don't keep this as parameter. It is hidden to users, and
#exists in all elements of this kind
continue
param = {}
param['name'] = arg
if default is None:
#In case no check is made inside element, we prefer to
#print "" (nothing) than None in output
param['default'] = ""
else:
param['default'] = default
param['description'] = "(no description provided)"
params[arg] = param
if isinstance(docstring, str):
# Look for AT param descriptions in docstring.
# Add description to existing parameters in params dict
params_iterator = pattern_format_element_params.finditer(docstring)
for match in params_iterator:
name = match.group('name')
if name in params:
params[name]['description'] = match.group('desc').rstrip('.')
attrs['params'] = params.values()
# Load built-in parameters if necessary
if with_built_in_params:
builtin_params = []
# Add 'prefix' parameter
param_prefix = {}
param_prefix['name'] = "prefix"
param_prefix['default'] = ""
param_prefix['description'] = """A prefix printed only if the
record has a value for this element"""
builtin_params.append(param_prefix)
# Add 'suffix' parameter
param_suffix = {}
param_suffix['name'] = "suffix"
param_suffix['default'] = ""
param_suffix['description'] = """A suffix printed only if the
record has a value for this element"""
builtin_params.append(param_suffix)
# Add 'default' parameter
param_default = {}
param_default['name'] = "default"
param_default['default'] = ""
param_default['description'] = """A default value printed if the
record has no value for this element"""
builtin_params.append(param_default)
# Add 'escape' parameter
param_escape = {}
param_escape['name'] = "escape"
param_escape['default'] = ""
param_escape['description'] = """0 keeps value as it is. Refer to main
documentation for escaping modes
1 to 7"""
builtin_params.append(param_escape)
attrs['builtin_params'] = builtin_params
return attrs
def get_format_element_attrs_from_table(element_name,
with_built_in_params=False):
"""
Returns the attributes of the format element with given name in 'tag' table.
Returns None if element_name does not exist in tag table.
The attributes are::
{'name' : "name of element" #basically the name of 'element_name' parameter
'description': "a string description of the element",
'seealso' : [] #a list of related elements. Always empty in this case
'params': [], #a list of parameters for this element. Always empty in this case
'builtin_params': [{'name':"param_name", #the parameters builtin for all elem of this kind
'default':"default value",
'description': "a description"}, ...],
'tags':["950.1", 203.a] #the list of tags printed by this element
}
:param element_name: an element name in database
:param element_name: the name of the element
:param with_built_in_params: if True, load the parameters built in all elements
@return: a structure with detailed information of an element found in DB
"""
attrs = {}
tags = bibformat_dblayer.get_tags_from_name(element_name)
field_label = "field"
if len(tags)>1:
field_label = "fields"
attrs['description'] = "Prints %s %s of the record" % (field_label,
", ".join(tags))
attrs['name'] = element_name.replace(" ", "_").upper()
attrs['seealso'] = []
attrs['params'] = []
attrs['tags'] = tags
# Load built-in parameters if necessary
if with_built_in_params:
builtin_params = []
# Add 'prefix' parameter
param_prefix = {}
param_prefix['name'] = "prefix"
param_prefix['default'] = ""
param_prefix['description'] = """A prefix printed only if the
record has a value for this element"""
builtin_params.append(param_prefix)
# Add 'suffix' parameter
param_suffix = {}
param_suffix['name'] = "suffix"
param_suffix['default'] = ""
param_suffix['description'] = """A suffix printed only if the
record has a value for this element"""
builtin_params.append(param_suffix)
# Add 'separator' parameter
param_separator = {}
param_separator['name'] = "separator"
param_separator['default'] = " "
param_separator['description'] = """A separator between elements of
the field"""
builtin_params.append(param_separator)
# Add 'nbMax' parameter
param_nbMax = {}
param_nbMax['name'] = "nbMax"
param_nbMax['default'] = ""
param_nbMax['description'] = """The maximum number of values to
print for this element. No limit if not
specified"""
builtin_params.append(param_nbMax)
# Add 'default' parameter
param_default = {}
param_default['name'] = "default"
param_default['default'] = ""
param_default['description'] = """A default value printed if the
record has no value for this element"""
builtin_params.append(param_default)
# Add 'escape' parameter
param_escape = {}
param_escape['name'] = "escape"
param_escape['default'] = ""
param_escape['description'] = """If set to 1, replaces special
characters '&', '<' and '>' of this
element by SGML entities"""
builtin_params.append(param_escape)
attrs['builtin_params'] = builtin_params
return attrs
def get_output_format(code, with_attributes=False, verbose=0):
"""
Returns the structured content of the given output format
If 'with_attributes' is true, also returns the names and description of the output formats,
else 'attrs' is not returned in dict (it might, if it has already been loaded previously).
if output format corresponding to 'code' is not found return an empty structure.
See get_output_format_attrs() to learn more about the attributes::
{'rules': [ {'field': "980__a",
'value': "PREPRINT",
'template': "filename_a.bft",
},
{...}
],
'attrs': {'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"}, 'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1",
'content_type': "application/ms-excel",
'visibility': 1
}
'default':"filename_b.bft"
}
:param code: the code of an output_format
:param with_attributes: if True, fetch the attributes (names and description) for format
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: strucured content of output format
"""
_ = gettext_set_language(CFG_SITE_LANG)
output_format = {'rules': [], 'default': ""}
filename = resolve_output_format_filename(code, verbose)
if filename is None:
try:
raise InvenioBibFormatError(_('Output format with code %(code)s could not be found.', code=code))
except InvenioBibFormatError:
register_exception()
if with_attributes: #Create empty attrs if asked for attributes
output_format['attrs'] = get_output_format_attrs(code, verbose)
return output_format
# Get from cache whenever possible
if filename in format_outputs_cache:
# If was must return with attributes but cache has not
# attributes, then load attributes
if with_attributes and 'attrs' not in format_outputs_cache[filename]:
format_outputs_cache[filename]['attrs'] = get_output_format_attrs(code, verbose)
return format_outputs_cache[filename]
try:
if with_attributes:
output_format['attrs'] = get_output_format_attrs(code, verbose)
path = registry.output_formats_lookup[filename]
format_file = open(path)
current_tag = ''
for line in format_file:
line = line.strip()
if line == "":
# Ignore blank lines
continue
if line.endswith(":"):
# Retrieve tag
# Remove : spaces and eol at the end of line
clean_line = line.rstrip(": \n\r")
# The tag starts at second position
current_tag = "".join(clean_line.split()[1:]).strip()
elif line.find('---') != -1:
words = line.split('---')
template = words[-1].strip()
condition = ''.join(words[:-1])
output_format['rules'].append({'field': current_tag,
'value': condition,
'template': template,
})
elif line.find(':') != -1:
# Default case
default = line.split(':')[1].strip()
output_format['default'] = default
except:
register_exception()
# Cache and return
format_outputs_cache[filename] = output_format
return output_format
def get_output_format_attrs(code, verbose=0):
"""Return the attributes of an output format.
The attributes contain 'code', which is the short identifier of the output
format (to be given as parameter in format_record function to specify the
output format), 'description', a description of the output format,
'visibility' the visibility of the format in the output format list on
public pages and 'names', the localized names of the output format. If
'content_type' is specified then the search_engine will send a file with
this content type and with result of formatting as content to the user.
The 'names' dict always contais 'generic', 'ln' (for long name) and 'sn'
(for short names) keys. 'generic' is the default name for output format.
'ln' and 'sn' contain long and short localized names of the output format.
Only the languages for which a localization exist are used::
{'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"},
'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1",
'content_type': "application/ms-excel",
'visibility': 1
}
:param code: the short identifier of the format
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors, 7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
:return: strucured content of output format attributes
"""
if code.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = code[:-(len(CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION) + 1)]
attrs = {'names': {'generic': "",
'ln': {},
'sn': {}},
'description': '',
'code': code.upper(),
'content_type': "",
'visibility': 1}
filename = resolve_output_format_filename(code, verbose)
if filename is None:
return attrs
try:
format_ = Format.query.filter_by(code=code).one()
attrs['names']['generic'] = format_.name
for name in format_.names:
attrs['names'][name.type][name.ln] = name.value
attrs['description'] = format_.description
attrs['content_type'] = format_.content_type
attrs['visibility'] = format_.visibility
except Exception:
current_app.logger.exception("Unknown code %s", code)
return attrs
def get_output_formats(with_attributes=False):
"""
Return all output format as a dictionary with their filename as key.
If 'with_attributes' is true, also returns the names and description of the
output formats, else 'attrs' is not returned in dicts (it might, if it has
already been loaded previously).
See get_output_format_attrs() to learn more on the attributes::
{'filename_1.bfo': {'rules': [ {'field': "980__a",
'value': "PREPRINT",
'template': "filename_a.bft",
},
{...}
],
'attrs': {'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"}, 'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1"
}
'default':"filename_b.bft"
},
'filename_2.bfo': {...},
...
}
:param with_attributes: if returned output formats contain detailed info, or not
@type with_attributes: boolean
@return: the list of output formats
"""
output_formats = {}
for filename in registry.output_formats_lookup.values():
filename = os.path.basename(filename)
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = "".join(filename.split(".")[:-1])
if filename in output_formats:
continue
output_formats[filename] = get_output_format(code, with_attributes)
return output_formats
def resolve_format_element_filename(element_name):
"""
Returns the filename of element corresponding to x{element_name}
This is necessary since format templates code call
elements by ignoring case, for eg. <BFE_AUTHOR> is the
same as <BFE_author>.
It is also recommended that format elements filenames are
prefixed with bfe_ . We need to look for these too.
The name of the element has to start with "BFE_".
:param element_name: a name for a format element
@return: the corresponding filename, with right case
"""
if not element_name.endswith(".py"):
name = element_name.replace(" ", "_").upper() +".PY"
else:
name = element_name.replace(" ", "_").upper()
files = registry.format_elements
for element in files:
filename = element.__file__
if filename.endswith('.pyc'):
filename = filename[:-1]
basename = os.path.basename(filename)
test_filename = basename.replace(" ", "_").upper()
if test_filename == name or \
test_filename == "BFE_" + name or \
"BFE_" + test_filename == name:
return basename
# No element with that name found
# Do not log error, as it might be a normal execution case:
# element can be in database
return None
def resolve_output_format_filename(code, verbose=0):
"""
Returns the filename of output corresponding to code
This is necessary since output formats names are not case sensitive
but most file systems are.
:param code: the code for an output format
:param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: the corresponding filename, with right case, or None if not found
"""
_ = gettext_set_language(CFG_SITE_LANG)
#Remove non alphanumeric chars (except . and _)
code = re.sub(r"[^.0-9a-zA-Z_]", "", code)
if not code.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = re.sub(r"\W", "", code)
code += "."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION
code = code.upper()
for filename in registry.output_formats_lookup.keys():
if filename.upper() == code:
return filename
# No output format with that name found
raise InvenioBibFormatError(_('Could not find output format named %(x_code)s.', x_code=code))
if verbose >= 5:
sys.stderr.write(exc.message)
if verbose >= 9:
sys.exit(exc.message)
return None
def get_fresh_format_template_filename(name):
"""
Returns a new filename and name for template with given name.
Used when writing a new template to a file, so that the name
has no space, is unique in template directory
Returns (unique_filename, modified_name)
:param name: name for a format template
@return: the corresponding filename, and modified name if necessary
"""
#name = re.sub(r"\W", "", name) #Remove non alphanumeric chars
name = name.replace(" ", "_")
filename = name
# Remove non alphanumeric chars (except .)
filename = re.sub(r"[^.0-9a-zA-Z]", "", filename)
index = 1
def _get_fullname(filename):
return filename + '.' + CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION
while _get_fullname(filename) in registry.format_templates_lookup:
index += 1
filename = name + str(index)
if index > 1:
returned_name = (name + str(index)).replace("_", " ")
else:
returned_name = name.replace("_", " ")
return (_get_fullname(filename), returned_name)
def get_fresh_output_format_filename(code):
"""
Returns a new filename for output format with given code.
Used when writing a new output format to a file, so that the code
has no space, is unique in output format directory. The filename
also need to be at most 6 chars long, as the convention is that
filename == output format code (+ .extension)
We return an uppercase code
Returns (unique_filename, modified_code)
:param code: the code of an output format
@return: the corresponding filename, and modified code if necessary
"""
_ = gettext_set_language(CFG_SITE_LANG)
#code = re.sub(r"\W", "", code) #Remove non alphanumeric chars
code = code.upper().replace(" ", "_")
# Remove non alphanumeric chars (except . and _)
code = re.sub(r"[^.0-9a-zA-Z_]", "", code)
if len(code) > 6:
code = code[:6]
def _get_fullname(filename):
return filename + '.' + CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION
filename = code
index = 2
while _get_fullname(filename) in registry.output_formats_lookup:
filename = code + str(index)
if len(filename) > 6:
filename = code[:-(len(str(index)))]+str(index)
index += 1
# We should not try more than 99999... Well I don't see how we
# could get there.. Sanity check.
if index >= 99999:
try:
raise InvenioBibFormatError(_('Could not find a fresh name for output format %(x_code)s.', x_code=code))
except InvenioBibFormatError:
register_exception()
sys.exit("Output format cannot be named as %s" % code)
return (filename + "." + CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION, filename)
def clear_caches():
"""
Clear the caches (Output Format, Format Templates and Format Elements).
@return: None
"""
global format_templates_cache, format_elements_cache, format_outputs_cache
format_templates_cache = {}
format_elements_cache = {}
format_outputs_cache = {}
class BibFormatObject(object):
"""
An object that encapsulates a record and associated methods, and that is given
as parameter to all format elements 'format' function.
The object is made specifically for a given formatting, i.e. it includes
for example the language for the formatting.
The object provides basic accessors to the record. For full access, one can get
the record with get_record() and then use BibRecord methods on the returned object.
"""
# The record
record = None
# The language in which the formatting has to be done
lang = CFG_SITE_LANG
# A list of string describing the context in which the record has
# to be formatted.
# It represents the words of the user request in web interface search
search_pattern = []
# The id of the record
recID = 0
# The information about the user, as returned by
# 'webuser.collect_user_info(req)'
user_info = None
# The format in which the record is being formatted
output_format = ''
req = None # DEPRECATED: use bfo.user_info instead. Used by WebJournal.
def __init__(self, recID, ln=CFG_SITE_LANG, search_pattern=None,
xml_record=None, user_info=None, output_format=''):
"""
Creates a new bibformat object, with given record.
You can either specify an record ID to format, or give its xml representation.
if 'xml_record' is not None, use 'xml_record' instead of recID for the record.
'user_info' allows to grant access to some functionalities on
a page depending on the user's priviledges. It is a dictionary
in the following form::
user_info = {
'remote_ip' : '',
'remote_host' : '',
'referer' : '',
'uri' : '',
'agent' : '',
'uid' : -1,
'nickname' : '',
'email' : '',
'group' : [],
'guest' : '1'
}
:param recID: the id of a record
:param ln: the language in which the record has to be formatted
:param search_pattern: list of string representing the request used by the user in web interface
:param xml_record: a xml string of the record to format
:param user_info: the information of the user who will view the formatted page
:param output_format: the output_format used for formatting this record
"""
self.xml_record = None # *Must* remain empty if recid is given
if xml_record is not None:
# If record is given as parameter
self.xml_record = xml_record
self.record = create_record(xml_record)[0]
recID = record_get_field_value(self.record, "001") or None
try:
assert isinstance(recID, (int, long, type(None))), 'Argument of wrong type!'
except AssertionError:
register_exception(prefix="recid needs to be an integer in BibFormatObject",
alert_admin=True)
recID = int(recID)
self.recID = recID
self.lang = wash_language(ln)
if search_pattern is None:
search_pattern = []
self.search_pattern = search_pattern
self.output_format = output_format
self.user_info = user_info
if self.user_info is None:
from invenio.ext.login.legacy_user import UserInfo
self.user_info = UserInfo(None)
def get_record(self):
"""
Returns the record structure of this L{BibFormatObject} instance
@return: the record structure as defined by BibRecord library
"""
from invenio.legacy.search_engine import get_record
# Create record if necessary
if self.record is None:
# on-the-fly creation if current output is xm
self.record = get_record(self.recID)
return self.record
def control_field(self, tag, escape=0):
"""
Returns the value of control field given by tag in record
:param tag: the marc code of a field
:param escape: 1 if returned value should be escaped. Else 0.
@return: value of field tag in record
"""
if self.get_record() is None:
#Case where BibRecord could not parse object
return ''
p_tag = parse_tag(tag)
field_value = record_get_field_value(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if escape == 0:
return field_value
else:
return escape_field(field_value, escape)
def field(self, tag, escape=0):
"""
Returns the value of the field corresponding to tag in the
current record.
If the value does not exist, return empty string. Else
returns the same as bfo.fields(..)[0] (see docstring below).
'escape' parameter allows to escape special characters
of the field. The value of escape can be:
0. no escaping
1. escape all HTML characters
2. remove unsafe HTML tags (Eg. keep <br />)
3. Mix of mode 1 and 2. If value of field starts with
<!-- HTML -->, then use mode 2. Else use mode 1.
4. Remove all HTML tags
5. Same as 2, with more tags allowed (like <img>)
6. Same as 3, with more tags allowed (like <img>)
7. Mix of mode 0 and mode 1. If field_value
starts with <!--HTML-->, then use mode 0.
Else use mode 1.
8. Same as mode 1, but also escape double-quotes
9. Same as mode 4, but also escape double-quotes
:param tag: the marc code of a field
:param escape: 1 if returned value should be escaped. Else 0. (see above for other modes)
@return: value of field tag in record
"""
list_of_fields = self.fields(tag)
if len(list_of_fields) > 0:
# Escaping below
if escape == 0:
return list_of_fields[0]
else:
return escape_field(list_of_fields[0], escape)
else:
return ""
def fields(self, tag, escape=0, repeatable_subfields_p=False):
"""
Returns the list of values corresonding to "tag".
If tag has an undefined subcode (such as 999C5),
the function returns a list of dictionaries, whoose keys
are the subcodes and the values are the values of tag.subcode.
If the tag has a subcode, simply returns list of values
corresponding to tag.
Eg. for given MARC::
999C5 $a value_1a $b value_1b
999C5 $b value_2b
999C5 $b value_3b $b value_3b_bis
>>> bfo.fields('999C5b')
>>> ['value_1b', 'value_2b', 'value_3b', 'value_3b_bis']
>>> bfo.fields('999C5')
>>> [{'a':'value_1a', 'b':'value_1b'},
{'b':'value_2b'},
{'b':'value_3b'}]
By default the function returns only one value for each
subfield (that is it considers that repeatable subfields are
not allowed). It is why in the above example 'value3b_bis' is
not shown for bfo.fields('999C5'). (Note that it is not
defined which of value_3b or value_3b_bis is returned). This
is to simplify the use of the function, as most of the time
subfields are not repeatable (in that way we get a string
instead of a list). You can allow repeatable subfields by
setting 'repeatable_subfields_p' parameter to True. In
this mode, the above example would return:
>>> bfo.fields('999C5b', repeatable_subfields_p=True)
>>> ['value_1b', 'value_2b', 'value_3b']
>>> bfo.fields('999C5', repeatable_subfields_p=True)
>>> [{'a':['value_1a'], 'b':['value_1b']},
{'b':['value_2b']},
{'b':['value_3b', 'value3b_bis']}]
NOTICE THAT THE RETURNED STRUCTURE IS DIFFERENT. Also note
that whatever the value of 'repeatable_subfields_p' is,
bfo.fields('999C5b') always show all fields, even repeatable
ones. This is because the parameter has no impact on the
returned structure (it is always a list).
'escape' parameter allows to escape special characters
of the fields. The value of escape can be:
0. No escaping
1. Escape all HTML characters
2. Remove unsafe HTML tags (Eg. keep <br />)
3. Mix of mode 1 and 2. If value of field starts with
<!-- HTML -->, then use mode 2. Else use mode 1.
4. Remove all HTML tags
5. Same as 2, with more tags allowed (like <img>)
6. Same as 3, with more tags allowed (like <img>)
7. Mix of mode 0 and mode 1. If field_value
starts with <!--HTML-->, then use mode 0.
Else use mode 1.
8. Same as mode 1, but also escape double-quotes
9. Same as mode 4, but also escape double-quotes
:param tag: the marc code of a field
:param escape: 1 if returned values should be escaped. Else 0.
@repeatable_subfields_p if True, returns the list of subfields in the dictionary
@return: values of field tag in record
"""
if self.get_record() is None:
# Case where BibRecord could not parse object
return []
p_tag = parse_tag(tag)
if p_tag[3] != "":
# Subcode has been defined. Simply returns list of values
values = record_get_field_values(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if escape == 0:
return values
else:
return [escape_field(value, escape) for value in values]
else:
# Subcode is undefined. Returns list of dicts.
# However it might be the case of a control field.
instances = record_get_field_instances(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2])
if repeatable_subfields_p:
list_of_instances = []
for instance in instances:
instance_dict = {}
for subfield in instance[0]:
if subfield[0] not in instance_dict:
instance_dict[subfield[0]] = []
if escape == 0:
instance_dict[subfield[0]].append(subfield[1])
else:
instance_dict[subfield[0]].append(escape_field(subfield[1], escape))
list_of_instances.append(instance_dict)
return list_of_instances
else:
if escape == 0:
return [dict(instance[0]) for instance in instances]
else:
return [dict([(subfield[0], escape_field(subfield[1], escape))
for subfield in instance[0]])
for instance in instances]
def kb(self, kb, string, default=""):
"""
Returns the value of the "string" in the knowledge base "kb".
If kb does not exist or string does not exist in kb,
returns 'default' string or empty string if not specified.
:param kb: a knowledge base name
:param string: the string we want to translate
:param default: a default value returned if 'string' not found in 'kb'
@return: a string value corresponding to translated input with given kb
"""
if not string:
return default
val = get_kbr_values(kb, searchkey=string, searchtype='e')
try:
return val[0][0]
except IndexError:
return default
# Utility functions
##
def escape_field(value, mode=0):
"""
Utility function used to escape the value of a field in given mode.
- mode 0: no escaping
- mode 1: escaping all HTML/XML characters (escaped chars are shown as escaped)
- mode 2: escaping unsafe HTML tags to avoid XSS, but
keep basic one (such as <br />)
Escaped tags are removed.
- mode 3: mix of mode 1 and mode 2. If field_value starts with <!--HTML-->,
then use mode 2. Else use mode 1.
- mode 4: escaping all HTML/XML tags (escaped tags are removed)
- mode 5: same as 2, but allows more tags, like <img>
- mode 6: same as 3, but allows more tags, like <img>
- mode 7: mix of mode 0 and mode 1. If field_value starts with <!--HTML-->,
then use mode 0. Else use mode 1.
- mode 8: same as mode 1, but also escape double-quotes
- mode 9: same as mode 4, but also escape double-quotes
:param value: value to escape
:param mode: escaping mode to use
@return: an escaped version of X{value} according to chosen X{mode}
"""
if mode == 1:
return cgi.escape(value)
elif mode == 8:
return cgi.escape(value, True)
elif mode in [2, 5]:
allowed_attribute_whitelist = CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
allowed_tag_whitelist = CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST + \
('class',)
if mode == 5:
allowed_attribute_whitelist += ('src', 'alt',
'width', 'height',
'style', 'summary',
'border', 'cellspacing',
'cellpadding')
allowed_tag_whitelist += ('img', 'table', 'td',
'tr', 'th', 'span', 'caption')
try:
return washer.wash(value,
allowed_attribute_whitelist=
allowed_attribute_whitelist,
allowed_tag_whitelist=
allowed_tag_whitelist
)
except HTMLParseError:
# Parsing failed
return cgi.escape(value)
elif mode in [3, 6]:
if value.lstrip(' \n').startswith(html_field):
allowed_attribute_whitelist = CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
allowed_tag_whitelist = CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST + \
('class',)
if mode == 6:
allowed_attribute_whitelist += ('src', 'alt',
'width', 'height',
'style', 'summary',
'border', 'cellspacing',
'cellpadding')
allowed_tag_whitelist += ('img', 'table', 'td',
'tr', 'th', 'span', 'caption')
try:
return washer.wash(value,
allowed_attribute_whitelist=
allowed_attribute_whitelist,
allowed_tag_whitelist=
allowed_tag_whitelist
)
except HTMLParseError:
# Parsing failed
return cgi.escape(value)
else:
return cgi.escape(value)
elif mode in [4, 9]:
try:
out = washer.wash(value,
allowed_attribute_whitelist=[],
allowed_tag_whitelist=[]
)
if mode == 9:
out = out.replace('"', '"')
return out
except HTMLParseError:
# Parsing failed
if mode == 4:
return cgi.escape(value)
else:
return cgi.escape(value, True)
elif mode == 7:
if value.lstrip(' \n').startswith(html_field):
return value
else:
return cgi.escape(value)
else:
return value
def make_filter_line(hide_tag):
"""Generate a line used for filtering MARCXML."""
hide_tag = str(hide_tag)
tag = hide_tag[:3]
ind1 = hide_tag[3:4]
ind2 = hide_tag[4:5]
if ind1 == "_":
ind1 = " "
if ind2 == "_":
ind2 = " "
if not ind1 and not ind2:
return 'datafield tag="%s"' % tag
if not ind2 and ind1:
return 'datafield tag="%s" ind1="%s"' % (tag, ind1)
return 'datafield tag="%s" ind1="%s" ind2="%s"' % (tag, ind1, ind2)
def filter_hidden_fields(recxml, user_info=None, filter_tags=None,
force_filtering=False):
"""
Filter out tags specified by filter_tags from MARCXML.
If the user is allowed to run bibedit, then filter nothing, unless
force_filtering is set to True.
:param recxml: marcxml presentation of the record
:param user_info: user information; if None, then assume invoked via CLI
with all rights :param filter_tags: list of MARC tags to
be filtered :param force_filtering: do we force filtering
regardless of user rights?
:return: recxml without the hidden fields
"""
filter_tags = filter_tags or cfg['CFG_BIBFORMAT_HIDDEN_TAGS']
if force_filtering:
pass
else:
if user_info is None:
#by default
return recxml
else:
if (acc_authorize_action(user_info, 'runbibedit')[0] == 0):
#no need to filter
return recxml
#filter..
out = ""
omit = False
filter_lines = map(make_filter_line, filter_tags)
for line in recxml.splitlines(True):
#check if this block needs to be omitted
for htag in filter_lines:
if htag in line:
omit = True
if not omit:
out += line
if omit and ('</datafield>' in line or '</marc:datafield>' in line):
omit = False
return out
| 1 | 14,541 | why this is an issue? | inveniosoftware-invenio | py |
@@ -110,9 +110,13 @@ abstract class Type
?string $namespace,
array $aliased_classes,
?string $this_class,
- bool $allow_self = false
+ bool $allow_self = false,
+ bool $was_static = false
) : string {
if ($allow_self && $value === $this_class) {
+ if ($was_static) {
+ return '$this';
+ }
return 'self';
}
| 1 | <?php
namespace Psalm;
use function array_merge;
use function array_pop;
use function array_shift;
use function array_values;
use function explode;
use function implode;
use function preg_quote;
use function preg_replace;
use Psalm\Internal\Type\Comparator\AtomicTypeComparator;
use Psalm\Internal\Type\TypeCombination;
use Psalm\Internal\Type\TypeParser;
use Psalm\Internal\Type\TypeTokenizer;
use Psalm\Type\Atomic\TArray;
use Psalm\Type\Atomic\TArrayKey;
use Psalm\Type\Atomic\TBool;
use Psalm\Type\Atomic\TClassString;
use Psalm\Type\Atomic\TEmpty;
use Psalm\Type\Atomic\TFalse;
use Psalm\Type\Atomic\TFloat;
use Psalm\Type\Atomic\TInt;
use Psalm\Type\Atomic\TIterable;
use Psalm\Type\Atomic\TList;
use Psalm\Type\Atomic\TLiteralClassString;
use Psalm\Type\Atomic\TLiteralFloat;
use Psalm\Type\Atomic\TLiteralInt;
use Psalm\Type\Atomic\TLiteralString;
use Psalm\Type\Atomic\TMixed;
use Psalm\Type\Atomic\TNamedObject;
use Psalm\Type\Atomic\TNull;
use Psalm\Type\Atomic\TNumeric;
use Psalm\Type\Atomic\TObject;
use Psalm\Type\Atomic\TObjectWithProperties;
use Psalm\Type\Atomic\TResource;
use Psalm\Type\Atomic\TScalar;
use Psalm\Type\Atomic\TSingleLetter;
use Psalm\Type\Atomic\TString;
use Psalm\Type\Atomic\TTemplateParam;
use Psalm\Type\Atomic\TTrue;
use Psalm\Type\Atomic\TVoid;
use Psalm\Type\Union;
use function get_class;
use function stripos;
use function strlen;
use function strpos;
use function strtolower;
use function substr;
abstract class Type
{
/**
* Parses a string type representation
*
* @param array{int,int}|null $php_version
* @param array<string, array<string, array{Type\Union}>> $template_type_map
*/
public static function parseString(
string $type_string,
?array $php_version = null,
array $template_type_map = []
): Union {
return TypeParser::parseTokens(
TypeTokenizer::tokenize(
$type_string
),
$php_version,
$template_type_map
);
}
public static function getFQCLNFromString(
string $class,
Aliases $aliases
) : string {
if ($class === '') {
throw new \InvalidArgumentException('$class cannot be empty');
}
if ($class[0] === '\\') {
return substr($class, 1);
}
$imported_namespaces = $aliases->uses;
if (strpos($class, '\\') !== false) {
$class_parts = explode('\\', $class);
$first_namespace = array_shift($class_parts);
if (isset($imported_namespaces[strtolower($first_namespace)])) {
return $imported_namespaces[strtolower($first_namespace)] . '\\' . implode('\\', $class_parts);
}
} elseif (isset($imported_namespaces[strtolower($class)])) {
return $imported_namespaces[strtolower($class)];
}
$namespace = $aliases->namespace;
return ($namespace ? $namespace . '\\' : '') . $class;
}
/**
* @param array<string, string> $aliased_classes
*
* @psalm-pure
*/
public static function getStringFromFQCLN(
string $value,
?string $namespace,
array $aliased_classes,
?string $this_class,
bool $allow_self = false
) : string {
if ($allow_self && $value === $this_class) {
return 'self';
}
if (isset($aliased_classes[strtolower($value)])) {
return $aliased_classes[strtolower($value)];
}
if ($namespace && stripos($value, $namespace . '\\') === 0) {
$candidate = preg_replace(
'/^' . preg_quote($namespace . '\\') . '/i',
'',
$value
);
$candidate_parts = explode('\\', $candidate);
if (!isset($aliased_classes[strtolower($candidate_parts[0])])) {
return $candidate;
}
} elseif (!$namespace && strpos($value, '\\') === false) {
return $value;
}
if (strpos($value, '\\')) {
$parts = explode('\\', $value);
$suffix = array_pop($parts);
while ($parts) {
$left = implode('\\', $parts);
if (isset($aliased_classes[strtolower($left)])) {
return $aliased_classes[strtolower($left)] . '\\' . $suffix;
}
$suffix = array_pop($parts) . '\\' . $suffix;
}
}
return '\\' . $value;
}
public static function getInt(bool $from_calculation = false, ?int $value = null): Union
{
if ($value !== null) {
$union = new Union([new TLiteralInt($value)]);
} else {
$union = new Union([new TInt()]);
}
$union->from_calculation = $from_calculation;
return $union;
}
/**
* @param int|null $value
*
*/
public static function getPositiveInt(bool $from_calculation = false): Union
{
$union = new Union([new Type\Atomic\TPositiveInt()]);
$union->from_calculation = $from_calculation;
return $union;
}
public static function getNumeric(): Union
{
$type = new TNumeric;
return new Union([$type]);
}
public static function getString(?string $value = null): Union
{
$type = null;
if ($value !== null) {
$config = \Psalm\Config::getInstance();
if ($config->string_interpreters) {
foreach ($config->string_interpreters as $string_interpreter) {
if ($type = $string_interpreter::getTypeFromValue($value)) {
break;
}
}
}
if (!$type) {
if (strlen($value) < $config->max_string_length) {
$type = new TLiteralString($value);
} else {
$type = new Type\Atomic\TNonEmptyString();
}
}
}
if (!$type) {
$type = new TString();
}
return new Union([$type]);
}
public static function getSingleLetter(): Union
{
$type = new TSingleLetter;
return new Union([$type]);
}
public static function getClassString(string $extends = 'object'): Union
{
return new Union([
new TClassString(
$extends,
$extends === 'object'
? null
: new TNamedObject($extends)
),
]);
}
public static function getLiteralClassString(string $class_type): Union
{
$type = new TLiteralClassString($class_type);
return new Union([$type]);
}
public static function getNull(): Union
{
$type = new TNull;
return new Union([$type]);
}
public static function getMixed(bool $from_loop_isset = false): Union
{
$type = new TMixed($from_loop_isset);
return new Union([$type]);
}
public static function getScalar(): Union
{
$type = new TScalar();
return new Union([$type]);
}
public static function getEmpty(): Union
{
$type = new TEmpty();
return new Union([$type]);
}
public static function getBool(): Union
{
$type = new TBool;
return new Union([$type]);
}
public static function getFloat(?float $value = null): Union
{
if ($value !== null) {
$type = new TLiteralFloat($value);
} else {
$type = new TFloat();
}
return new Union([$type]);
}
public static function getObject(): Union
{
$type = new TObject;
return new Union([$type]);
}
public static function getClosure(): Union
{
$type = new Type\Atomic\TClosure('Closure');
return new Union([$type]);
}
public static function getArrayKey(): Union
{
$type = new TArrayKey();
return new Union([$type]);
}
public static function getArray(): Union
{
$type = new TArray(
[
new Type\Union([new TArrayKey]),
new Type\Union([new TMixed]),
]
);
return new Union([$type]);
}
public static function getEmptyArray(): Union
{
$array_type = new TArray(
[
new Type\Union([new TEmpty]),
new Type\Union([new TEmpty]),
]
);
return new Type\Union([
$array_type,
]);
}
public static function getList(): Union
{
$type = new TList(new Type\Union([new TMixed]));
return new Union([$type]);
}
public static function getNonEmptyList(): Union
{
$type = new Type\Atomic\TNonEmptyList(new Type\Union([new TMixed]));
return new Union([$type]);
}
public static function getVoid(): Union
{
$type = new TVoid;
return new Union([$type]);
}
public static function getFalse(): Union
{
$type = new TFalse;
return new Union([$type]);
}
public static function getTrue(): Union
{
$type = new TTrue;
return new Union([$type]);
}
public static function getResource(): Union
{
return new Union([new TResource]);
}
/**
* @param non-empty-list<Type\Union> $union_types
*/
public static function combineUnionTypeArray(array $union_types, ?Codebase $codebase) : Type\Union
{
$first_type = array_pop($union_types);
foreach ($union_types as $type) {
$first_type = self::combineUnionTypes($first_type, $type, $codebase);
}
return $first_type;
}
/**
* Combines two union types into one
*
* @param int $literal_limit any greater number of literal types than this
* will be merged to a scalar
*
*/
public static function combineUnionTypes(
Union $type_1,
Union $type_2,
?Codebase $codebase = null,
bool $overwrite_empty_array = false,
bool $allow_mixed_union = true,
int $literal_limit = 500
): Union {
if ($type_1 === $type_2) {
return $type_1;
}
if ($type_1->isVanillaMixed() && $type_2->isVanillaMixed()) {
$combined_type = Type::getMixed();
} else {
$both_failed_reconciliation = false;
if ($type_1->failed_reconciliation) {
if ($type_2->failed_reconciliation) {
$both_failed_reconciliation = true;
} else {
$type_2 = clone $type_2;
$type_2->parent_nodes += $type_1->parent_nodes;
return $type_2;
}
} elseif ($type_2->failed_reconciliation) {
$type_1 = clone $type_1;
$type_1->parent_nodes += $type_2->parent_nodes;
return $type_1;
}
$combined_type = TypeCombination::combineTypes(
array_merge(
array_values($type_1->getAtomicTypes()),
array_values($type_2->getAtomicTypes())
),
$codebase,
$overwrite_empty_array,
$allow_mixed_union,
$literal_limit
);
if (!$type_1->initialized || !$type_2->initialized) {
$combined_type->initialized = false;
}
if ($type_1->from_docblock || $type_2->from_docblock) {
$combined_type->from_docblock = true;
}
if ($type_1->from_calculation || $type_2->from_calculation) {
$combined_type->from_calculation = true;
}
if ($type_1->ignore_nullable_issues || $type_2->ignore_nullable_issues) {
$combined_type->ignore_nullable_issues = true;
}
if ($type_1->ignore_falsable_issues || $type_2->ignore_falsable_issues) {
$combined_type->ignore_falsable_issues = true;
}
if ($type_1->had_template && $type_2->had_template) {
$combined_type->had_template = true;
}
if ($type_1->reference_free && $type_2->reference_free) {
$combined_type->reference_free = true;
}
if ($both_failed_reconciliation) {
$combined_type->failed_reconciliation = true;
}
}
if ($type_1->possibly_undefined || $type_2->possibly_undefined) {
$combined_type->possibly_undefined = true;
}
if ($type_1->possibly_undefined_from_try || $type_2->possibly_undefined_from_try) {
$combined_type->possibly_undefined_from_try = true;
}
if ($type_1->parent_nodes || $type_2->parent_nodes) {
$combined_type->parent_nodes = $type_1->parent_nodes + $type_2->parent_nodes;
}
if ($type_1->by_ref || $type_2->by_ref) {
$combined_type->by_ref = true;
}
return $combined_type;
}
/**
* Combines two union types into one via an intersection
*
*
*/
public static function intersectUnionTypes(
Union $type_1,
Union $type_2,
Codebase $codebase
): ?Union {
$intersection_performed = false;
if ($type_1->isMixed() && $type_2->isMixed()) {
$combined_type = Type::getMixed();
} else {
$both_failed_reconciliation = false;
if ($type_1->failed_reconciliation) {
if ($type_2->failed_reconciliation) {
$both_failed_reconciliation = true;
} else {
return $type_2;
}
} elseif ($type_2->failed_reconciliation) {
return $type_1;
}
if ($type_1->isMixed() && !$type_2->isMixed()) {
$combined_type = clone $type_2;
$intersection_performed = true;
} elseif (!$type_1->isMixed() && $type_2->isMixed()) {
$combined_type = clone $type_1;
$intersection_performed = true;
} else {
$combined_type = clone $type_1;
foreach ($combined_type->getAtomicTypes() as $t1_key => $type_1_atomic) {
foreach ($type_2->getAtomicTypes() as $t2_key => $type_2_atomic) {
if ($type_1_atomic instanceof TNamedObject
&& $type_2_atomic instanceof TNamedObject
) {
if (($type_1_atomic->value === $type_2_atomic->value
&& get_class($type_1_atomic) === TNamedObject::class
&& get_class($type_2_atomic) !== TNamedObject::class)
) {
$combined_type->removeType($t1_key);
$combined_type->addType(clone $type_2_atomic);
$intersection_performed = true;
} elseif (($type_1_atomic->value === $type_2_atomic->value
&& get_class($type_2_atomic) === TNamedObject::class
&& get_class($type_1_atomic) !== TNamedObject::class)
) {
$combined_type->removeType($t2_key);
$combined_type->addType(clone $type_1_atomic);
$intersection_performed = true;
} elseif (AtomicTypeComparator::isContainedBy(
$codebase,
$type_2_atomic,
$type_1_atomic
)) {
$combined_type->removeType($t1_key);
$combined_type->addType(clone $type_2_atomic);
$intersection_performed = true;
} elseif (AtomicTypeComparator::isContainedBy(
$codebase,
$type_1_atomic,
$type_2_atomic
)) {
$combined_type->removeType($t2_key);
$combined_type->addType(clone $type_1_atomic);
$intersection_performed = true;
}
}
if (($type_1_atomic instanceof TIterable
|| $type_1_atomic instanceof TNamedObject
|| $type_1_atomic instanceof TTemplateParam
|| $type_1_atomic instanceof TObjectWithProperties)
&& ($type_2_atomic instanceof TIterable
|| $type_2_atomic instanceof TNamedObject
|| $type_2_atomic instanceof TTemplateParam
|| $type_2_atomic instanceof TObjectWithProperties)
) {
if (!$type_1_atomic->extra_types) {
$type_1_atomic->extra_types = [];
}
$intersection_performed = true;
$type_2_atomic_clone = clone $type_2_atomic;
$type_2_atomic_clone->extra_types = [];
$type_1_atomic->extra_types[$type_2_atomic_clone->getKey()] = $type_2_atomic_clone;
$type_2_atomic_intersection_types = $type_2_atomic->getIntersectionTypes();
if ($type_2_atomic_intersection_types) {
foreach ($type_2_atomic_intersection_types as $type_2_intersection_type) {
$type_1_atomic->extra_types[$type_2_intersection_type->getKey()]
= clone $type_2_intersection_type;
}
}
}
if ($type_1_atomic instanceof TObject && $type_2_atomic instanceof TNamedObject) {
$combined_type->removeType($t1_key);
$combined_type->addType(clone $type_2_atomic);
$intersection_performed = true;
} elseif ($type_2_atomic instanceof TObject && $type_1_atomic instanceof TNamedObject) {
$combined_type->removeType($t2_key);
$combined_type->addType(clone $type_1_atomic);
$intersection_performed = true;
}
}
}
}
if (!$type_1->initialized && !$type_2->initialized) {
$combined_type->initialized = false;
}
if ($type_1->possibly_undefined_from_try && $type_2->possibly_undefined_from_try) {
$combined_type->possibly_undefined_from_try = true;
}
if ($type_1->from_docblock && $type_2->from_docblock) {
$combined_type->from_docblock = true;
}
if ($type_1->from_calculation && $type_2->from_calculation) {
$combined_type->from_calculation = true;
}
if ($type_1->ignore_nullable_issues && $type_2->ignore_nullable_issues) {
$combined_type->ignore_nullable_issues = true;
}
if ($type_1->ignore_falsable_issues && $type_2->ignore_falsable_issues) {
$combined_type->ignore_falsable_issues = true;
}
if ($both_failed_reconciliation) {
$combined_type->failed_reconciliation = true;
}
}
if (!$intersection_performed && $type_1->getId() !== $type_2->getId()) {
return null;
}
if ($type_1->possibly_undefined && $type_2->possibly_undefined) {
$combined_type->possibly_undefined = true;
}
return $combined_type;
}
}
| 1 | 9,369 | The condition should be inverse? | vimeo-psalm | php |
@@ -33,12 +33,16 @@ package azkaban;
public class Constants {
// Azkaban Flow Versions
- public static final String AZKABAN_FLOW_VERSION_2_0 = "2.0";
+ public static final String AZKABAN_FLOW_VERSION = "Azkaban-Flow-Version";
+ public static final Double VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
+ // Flow 2.0 node type
+ public static final String NODE_TYPE_FLOW = "flow";
+
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*/
public class Constants {
// Azkaban Flow Versions
public static final String AZKABAN_FLOW_VERSION_2_0 = "2.0";
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable Quartz Scheduler if true.
public static final String ENABLE_QUARTZ= "azkaban.server.schedule.enable_quartz";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
}
| 1 | 15,150 | this seems to be a property key name, so should it be a inside configurationkey.java? | azkaban-azkaban | java |
@@ -105,7 +105,7 @@ func (v *VolumeDestroy) Execute() ([]byte, error) {
return nil, err
}
// execute command here
- return exec.Command(bin.ZFS, v.Command).CombinedOutput()
+ return exec.Command(bin.BASH, "-c", v.Command).CombinedOutput()
}
// Build returns the VolumeDestroy object generated by builder | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vdestroy
import (
"fmt"
"os/exec"
"reflect"
"runtime"
"strings"
"github.com/openebs/maya/pkg/zfs/cmd/v1alpha1/bin"
"github.com/pkg/errors"
)
const (
// Operation defines type of zfs operation
Operation = "destroy"
)
//VolumeDestroy defines structure for volume 'Destroy' operation
type VolumeDestroy struct {
//Do a dry-run
DryRun bool
//recursively destroy all the clones, snapshots, and children
Recursive bool
//name of the dataset or snapshot
Name string
// command for this structure
Command string
// checks is list of predicate function used for validating object
checks []PredicateFunc
// error
err error
}
// NewVolumeDestroy returns new instance of object VolumeDestroy
func NewVolumeDestroy() *VolumeDestroy {
return &VolumeDestroy{}
}
// WithCheck add given check to checks list
func (v *VolumeDestroy) WithCheck(check ...PredicateFunc) *VolumeDestroy {
v.checks = append(v.checks, check...)
return v
}
// WithDryRun method fills the DryRun field of VolumeDestroy object.
func (v *VolumeDestroy) WithDryRun(DryRun bool) *VolumeDestroy {
v.DryRun = DryRun
return v
}
// WithRecursive method fills the Recursive field of VolumeDestroy object.
func (v *VolumeDestroy) WithRecursive(Recursive bool) *VolumeDestroy {
v.Recursive = Recursive
return v
}
// WithName method fills the Name field of VolumeDestroy object.
func (v *VolumeDestroy) WithName(Name string) *VolumeDestroy {
v.Name = Name
return v
}
// WithCommand method fills the Command field of VolumeDestroy object.
func (v *VolumeDestroy) WithCommand(Command string) *VolumeDestroy {
v.Command = Command
return v
}
// Validate is to validate generated VolumeDestroy object by builder
func (v *VolumeDestroy) Validate() *VolumeDestroy {
for _, check := range v.checks {
if !check(v) {
v.err = errors.Wrapf(v.err, "validation failed {%v}", runtime.FuncForPC(reflect.ValueOf(check).Pointer()).Name())
}
}
return v
}
// Execute is to execute generated VolumeDestroy object
func (v *VolumeDestroy) Execute() ([]byte, error) {
v, err := v.Build()
if err != nil {
return nil, err
}
// execute command here
return exec.Command(bin.ZFS, v.Command).CombinedOutput()
}
// Build returns the VolumeDestroy object generated by builder
func (v *VolumeDestroy) Build() (*VolumeDestroy, error) {
var c strings.Builder
v = v.Validate()
v.appendCommand(c, fmt.Sprintf(" %s ", Operation))
if IsDryRunSet()(v) {
v.appendCommand(c, fmt.Sprintf(" -n"))
}
if IsRecursiveSet()(v) {
v.appendCommand(c, fmt.Sprintf(" -R "))
}
v.appendCommand(c, v.Name)
v.Command = c.String()
return v, v.err
}
// appendCommand append string to given string builder
func (v *VolumeDestroy) appendCommand(c strings.Builder, cmd string) {
_, err := c.WriteString(cmd)
if err != nil {
v.err = errors.Wrapf(v.err, "Failed to append cmd{%s} : %s", cmd, err.Error())
}
}
| 1 | 16,751 | G204: Subprocess launching should be audited (from `gosec`) | openebs-maya | go |
@@ -284,6 +284,10 @@ class Task(object):
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
+ @property
+ def run_on_main_process(self):
+ return False
+
_visible_in_registry = True # TODO: Consider using in luigi.util as well
__not_user_specified = '__not_user_specified' | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
from contextlib import contextmanager
import logging
import traceback
import warnings
import json
import hashlib
import re
import copy
import functools
from luigi import six
from luigi import parameter
from luigi.task_register import Register
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]')
_SAME_AS_PYTHON_MODULE = '_same_as_python_module'
def namespace(namespace=None, scope=''):
"""
Call to set namespace of tasks declared after the call.
It is often desired to call this function with the keyword argument
``scope=__name__``.
The ``scope`` keyword makes it so that this call is only effective for task
classes with a matching [*]_ ``__module__``. The default value for
``scope`` is the empty string, which means all classes. Multiple calls with
the same scope simply replace each other.
The namespace of a :py:class:`Task` can also be changed by specifying the property
``task_namespace``.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
This explicit setting takes priority over whatever is set in the
``namespace()`` method, and it's also inherited through normal python
inheritence.
There's no equivalent way to set the ``task_family``.
*New since Luigi 2.6.0:* ``scope`` keyword argument.
.. [*] When there are multiple levels of matching module scopes like
``a.b`` vs ``a.b.c``, the more specific one (``a.b.c``) wins.
.. seealso:: The new and better scaling :py:func:`auto_namespace`
"""
Register._default_namespace_dict[scope] = namespace or ''
def auto_namespace(scope=''):
"""
Same as :py:func:`namespace`, but instead of a constant namespace, it will
be set to the ``__module__`` of the task class. This is desirable for these
reasons:
* Two tasks with the same name will not have conflicting task families
* It's more pythonic, as modules are Python's recommended way to
do namespacing.
* It's traceable. When you see the full name of a task, you can immediately
identify where it is defined.
We recommend calling this function from your package's outermost
``__init__.py`` file. The file contents could look like this:
.. code-block:: python
import luigi
luigi.auto_namespace(scope=__name__)
To reset an ``auto_namespace()`` call, you can use
``namespace(scope='my_scope'``). But this will not be
needed (and is also discouraged) if you use the ``scope`` kwarg.
*New since Luigi 2.6.0.*
"""
namespace(namespace=_SAME_AS_PYTHON_MODULE, scope=scope)
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and not
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
#: Maximum number of tasks to run together as a batch. Infinite by default
max_batch_size = float('inf')
@property
def batchable(self):
"""
True if this instance can be run as part of a batch. By default, True
if it has any batched parameters
"""
return bool(self.batch_param_names())
@property
def retry_count(self):
"""
Override this positive integer to have different ``retry_count`` at task level
Check :ref:`scheduler-config`
"""
return None
@property
def disable_hard_timeout(self):
"""
Override this positive integer to have different ``disable_hard_timeout`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def disable_window_seconds(self):
"""
Override this positive integer to have different ``disable_window_seconds`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in the global configuration. This should return a string or a list of strings. e.g.
'[email protected]' or ['[email protected]', '[email protected]']
'''
return None
def _owner_list(self):
"""
Turns the owner_email property into a list. This should not be overridden.
"""
owner_email = self.owner_email
if owner_email is None:
return []
elif isinstance(owner_email, six.string_types):
return owner_email.split(',')
else:
return owner_email
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
_visible_in_registry = True # TODO: Consider using in luigi.util as well
__not_user_specified = '__not_user_specified'
# This is here just to help pylint, the Register metaclass will always set
# this value anyway.
_namespace_at_class_time = None
task_namespace = __not_user_specified
"""
This value can be overriden to set the namespace that will be used.
(See :ref:`Task.namespaces_famlies_and_ids`)
If it's not specified and you try to read this value anyway, it will return
garbage. Please use :py:meth:`get_task_namespace` to read the namespace.
Note that setting this value with ``@property`` will not work, because this
is a class level value.
"""
@classmethod
def get_task_namespace(cls):
"""
The task family for the given class.
Note: You normally don't want to override this.
"""
if cls.task_namespace != cls.__not_user_specified:
return cls.task_namespace
elif cls._namespace_at_class_time == _SAME_AS_PYTHON_MODULE:
return cls.__module__
return cls._namespace_at_class_time
@property
def task_family(self):
"""
DEPRECATED since after 2.4.0. See :py:meth:`get_task_family` instead.
Hopefully there will be less meta magic in Luigi.
Convenience method since a property on the metaclass isn't directly
accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_task_family(cls):
"""
The task family for the given class.
If ``task_namespace`` is not set, then it's simply the name of the
class. Otherwise, ``<task_namespace>.`` is prefixed to the class name.
Note: You normally don't want to override this.
"""
if not cls.get_task_namespace():
return cls.__name__
else:
return "{}.{}".format(cls.get_task_namespace(), cls.__name__)
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def batch_param_names(cls):
return [name for name, p in cls.get_params() if p._is_batchable()]
@classmethod
def get_param_names(cls, include_significant=False):
return [name for name, p in cls.get_params() if include_significant or p.significant]
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_family = cls.get_task_family()
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_family, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_family, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_family, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register kwargs as an attribute on the class. Might be useful
self.param_kwargs = dict(param_values)
self._warn_on_wrong_param_types()
self.task_id = task_id_str(self.get_task_family(), self.to_str_params(only_significant=True))
self.__hash = hash(self.task_id)
self.set_tracking_url = None
self.set_status_message = None
self.set_progress_percentage = None
@property
def param_args(self):
warnings.warn("Use of param_args has been deprecated.", DeprecationWarning)
return tuple(self.param_kwargs[k] for k, v in self.get_params())
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
def _warn_on_wrong_param_types(self):
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
params[param_name]._warn_on_wrong_param_type(param_name, param_value)
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
param_str = params_str[param_name]
if isinstance(param_str, list):
kwargs[param_name] = param._parse_list(param_str)
else:
kwargs[param_name] = param.parse(param_str)
return cls(**kwargs)
def to_str_params(self, only_significant=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (not only_significant) or params[param_name].significant:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.get_task_family(), ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_kwargs == other.param_kwargs
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
@contextmanager
def no_unpicklable_properties(self):
"""
Remove unpicklable properties before dump task and resume them after.
This method could be called in subtask's dump method, to ensure unpicklable
properties won't break dump.
This method is a context-manager which can be called as below:
.. code-block: python
class DummyTask(luigi):
def _dump(self):
with self.no_unpicklable_properties():
pickle.dumps(self)
"""
unpicklable_properties = ('set_tracking_url', 'set_status_message', 'set_progress_percentage')
reserved_properties = {}
for property_name in unpicklable_properties:
if hasattr(self, property_name):
reserved_properties[property_name] = getattr(self, property_name)
setattr(self, property_name, 'placeholder_during_pickling')
yield
for property_name, value in six.iteritems(reserved_properties):
setattr(self, property_name, value)
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
generated_tuples = []
for parameter_tuple in parameter_tuples:
if isinstance(parameter_tuple, (list, tuple)):
if cls(*parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
elif isinstance(parameter_tuple, dict):
if cls(**parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
else:
if cls(parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
return generated_tuples
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = None
def externalize(taskclass_or_taskobject):
"""
Returns an externalized version of a Task. You may both pass an
instantiated task object or a task class. Some examples:
.. code-block:: python
class RequiringTask(luigi.Task):
def requires(self):
task_object = self.clone(MyTask)
return externalize(task_object)
...
Here's mostly equivalent code, but ``externalize`` is applied to a task
class instead.
.. code-block:: python
@luigi.util.requires(externalize(MyTask))
class RequiringTask(luigi.Task):
pass
...
Of course, it may also be used directly on classes and objects (for example
for reexporting or other usage).
.. code-block:: python
MyTask = externalize(MyTask)
my_task_2 = externalize(MyTask2(param='foo'))
If you however want a task class to be external from the beginning, you're
better off inheriting :py:class:`ExternalTask` rather than :py:class:`Task`.
This function tries to be side-effect free by creating a copy of the class
or the object passed in and then modify that object. In particular this
code shouldn't do anything.
.. code-block:: python
externalize(MyTask) # BAD: This does nothing (as after luigi 2.4.0)
"""
# Seems like with python < 3.3 copy.copy can't copy classes
# and objects with specified metaclass http://bugs.python.org/issue11480
compatible_copy = copy.copy if six.PY3 else copy.deepcopy
copied_value = compatible_copy(taskclass_or_taskobject)
if copied_value is taskclass_or_taskobject:
# Assume it's a class
clazz = taskclass_or_taskobject
@_task_wraps(clazz)
class _CopyOfClass(clazz):
# How to copy a class: http://stackoverflow.com/a/9541120/621449
_visible_in_registry = False
_CopyOfClass.run = None
return _CopyOfClass
else:
# We assume it's an object
copied_value.run = None
return copied_value
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
# Remaining case: assume struct is iterable...
try:
return [getpaths(r) for r in struct]
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
def _task_wraps(task_class):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
assigned = functools.WRAPPER_ASSIGNMENTS + ('_namespace_at_class_time',)
return functools.wraps(task_class, assigned=assigned, updated=[])
| 1 | 17,872 | Please add docs here. And also warn users that this mode should be avoided whenever possible, because any blocking IO will make the keep-alive-thread not run. | spotify-luigi | py |
@@ -57,3 +57,18 @@ export function isValidDimensions( dimensions ) {
return dimension.hasOwnProperty( 'name' ) && typeof dimension.name === 'string';
} );
}
+
+/**
+ * Verifies provided dimensionFilters to make sure they match allowed values found in dimensions.
+ *
+ * @since n.e.x.t
+ *
+ * @param {Object} dimensionFilters The dimension filters to check.
+ * @return {boolean} TRUE if dimension filters are valid, otherwise FALSE.
+ */
+export function isValidDimensionFilters( dimensionFilters ) {
+ // Ensure every dimensionFilter key corresponds to a valid dimension.
+ return Object.keys( dimensionFilters ).every(
+ ( dimension ) => typeof dimensionFilters[ dimension ] === 'string'
+ );
+} | 1 | /**
* Reporting API validation utilities.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import { isValidStringsOrObjects } from '../../../util/report-validation';
/**
* Verifies that provided metrics match allowed values. Metrics can be a string,
* an array of string, an array of objects or mix of strings and objects. Objects
* must have an "expression" property in order to be considered as valid, and they
* can optionally include an "alias" property.
*
* @since 1.13.0
*
* @param {string|string[]|Object|Object[]} metrics The metrics to check.
* @return {boolean} TRUE if metrics are valid, otherwise FALSE.
*/
export function isValidMetrics( metrics ) {
return isValidStringsOrObjects( metrics, ( metric ) => {
const validExpression = metric.hasOwnProperty( 'expression' ) && typeof metric.expression === 'string';
// 'alias' is optional; if provided, it must be a string.
const validAlias = ! metric.hasOwnProperty( 'alias' ) || typeof metric.alias === 'string';
return validExpression && validAlias;
} );
}
/**
* Verifies provided dimensions to make sure it matches allowed values. It can be a string,
* array of strings, an object with "name" field, array of such objects or an array of strings
* and objects.
*
* @since 1.13.0
*
* @param {string|string[]|Object|Object[]} dimensions The dimensions to check.
* @return {boolean} TRUE if dimensions are valid, otherwise FALSE.
*/
export function isValidDimensions( dimensions ) {
return isValidStringsOrObjects( dimensions, ( dimension ) => {
return dimension.hasOwnProperty( 'name' ) && typeof dimension.name === 'string';
} );
}
| 1 | 34,545 | We shouldn't require dimension values to be a string. They just need to be scalar values (probably we could check whether it's either a string or a number). Something more important to cover in the validation here though is to ensure that a map of `dimensionName => dimensionValue` is passed. The keys here actually need to be strings. For example someone shouldn't be able to pass an array of values. | google-site-kit-wp | js |
@@ -31,7 +31,8 @@ import (
)
var (
- _ yarpc.Router = (*MapRouter)(nil)
+ _ yarpc.Router = (*MapRouter)(nil)
+ _ yarpc.UnaryTransportHandler = &unaryTransportHandler{}
)
type serviceProcedure struct { | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpcrouter
import (
"context"
"sort"
"strconv"
"strings"
yarpc "go.uber.org/yarpc/v2"
"go.uber.org/yarpc/v2/yarpcerror"
)
var (
_ yarpc.Router = (*MapRouter)(nil)
)
type serviceProcedure struct {
service string
procedure string
}
type serviceProcedureEncoding struct {
service string
procedure string
encoding yarpc.Encoding
}
// MapRouter is a Router that maintains a map of the registered
// procedures.
type MapRouter struct {
defaultService string
serviceProcedureEncodings map[serviceProcedureEncoding]yarpc.TransportProcedure
serviceNames map[string]struct{}
}
// NewMapRouter builds a new MapRouter that uses the given name as the
// default service name and registers the given procedures.
//
// If a provided procedure does not specify its service name, it will
// inherit the default service name. Multiple procedures with the
// same name and service name may exist if they handle different encodings.
// If a procedure does not specify an encoding, it can only support one handler.
// The router will select that handler regardless of the encoding.
func NewMapRouter(defaultService string, rs []yarpc.TransportProcedure) MapRouter {
router := MapRouter{
defaultService: defaultService,
serviceProcedureEncodings: make(map[serviceProcedureEncoding]yarpc.TransportProcedure),
serviceNames: map[string]struct{}{defaultService: {}},
}
router.register(rs)
return router
}
func (m MapRouter) register(rs []yarpc.TransportProcedure) {
for _, r := range rs {
if r.Service == "" {
r.Service = m.defaultService
}
if r.Name == "" {
panic("Expected procedure name not to be empty string in registration")
}
m.serviceNames[r.Service] = struct{}{}
spe := serviceProcedureEncoding{
service: r.Service,
procedure: r.Name,
encoding: r.Encoding,
}
// Route to individual handlers for unique combinations of service,
// procedure, and encoding. This shall henceforth be the
// recommended way for models to register procedures.
m.serviceProcedureEncodings[spe] = r
}
}
// Procedures returns a list procedures that
// have been registered so far.
func (m MapRouter) Procedures() []yarpc.TransportProcedure {
procs := make([]yarpc.TransportProcedure, 0, len(m.serviceProcedureEncodings))
for _, v := range m.serviceProcedureEncodings {
procs = append(procs, v)
}
sort.Sort(sortableProcedures(procs))
return procs
}
type sortableProcedures []yarpc.TransportProcedure
func (ps sortableProcedures) Len() int {
return len(ps)
}
func (ps sortableProcedures) Less(i int, j int) bool {
return ps[i].Less(ps[j])
}
func (ps sortableProcedures) Swap(i int, j int) {
ps[i], ps[j] = ps[j], ps[i]
}
// Choose retrives the TransportHandlerSpec for the service, procedure, and encoding
// noted on the transport request, or returns an unrecognized procedure error
// (testable with yarpc.IsUnrecognizedProcedureError(err)).
func (m MapRouter) Choose(ctx context.Context, req *yarpc.Request) (yarpc.TransportHandlerSpec, error) {
service, procedure, encoding := req.Service, req.Procedure, req.Encoding
if service == "" {
service = m.defaultService
}
if _, ok := m.serviceNames[service]; !ok {
return yarpc.TransportHandlerSpec{},
yarpcerror.Newf(yarpcerror.CodeUnimplemented, "unrecognized service name %q, "+
"available services: %s", req.Service, getAvailableServiceNames(m.serviceNames))
}
// Fully specified combinations of service, procedure, and encoding.
spe := serviceProcedureEncoding{
service: service,
procedure: procedure,
encoding: encoding,
}
if procedure, ok := m.serviceProcedureEncodings[spe]; ok {
return procedure.HandlerSpec, nil
}
return yarpc.TransportHandlerSpec{}, yarpcerror.Newf(yarpcerror.CodeUnimplemented, "unrecognized procedure %q for service %q", req.Procedure, req.Service)
}
// Extract keys from service names map and return a formatted string
func getAvailableServiceNames(svcMap map[string]struct{}) string {
var serviceNames []string
for key := range svcMap {
serviceNames = append(serviceNames, strconv.Quote(key))
}
// Sort the string array to generate consistent result
sort.Strings(serviceNames)
return strings.Join(serviceNames, ", ")
}
| 1 | 17,721 | let's do a pointer cast of nil, similar to the `MapRouter` above | yarpc-yarpc-go | go |
@@ -280,6 +280,8 @@ abstract class Abstract_Builder implements Builder {
'default' => '{ "mobile": "0", "tablet": "0", "desktop": "0" }',
]
);
+
+ do_action( 'neve_add_settings_to_hfg_rows', SettingsManager::get_instance(), $row_setting_id, $row_id );
}
SettingsManager::get_instance()->add( | 1 | <?php
/**
* Abstract Builder class for Header Footer Grid.
*
* Name: Header Footer Grid
* Author: Bogdan Preda <[email protected]>
*
* @version 1.0.0
* @package HFG
*/
namespace HFG\Core\Builder;
use HFG\Core\Components\Abstract_Component;
use HFG\Core\Interfaces\Builder;
use HFG\Core\Interfaces\Component;
use HFG\Core\Settings;
use HFG\Core\Settings\Manager as SettingsManager;
use HFG\Traits\Core;
use WP_Customize_Manager;
/**
* Class Abstract_Builder
*
* @package HFG\Core\Builder
*/
abstract class Abstract_Builder implements Builder {
use Core;
const LAYOUT_SETTING = 'layout';
const HEIGHT_SETTING = 'height';
const SKIN_SETTING = 'skin';
/**
* Internal pointer for current device id.
*
* @var null|string Device id.
*/
public static $current_device = null;
/**
* Internal pointer for current row id.
*
* @var null|string Row id.
*/
public static $current_row = null;
/**
* Internal pointer for current component id.
*
* @var null|string Component id.
*/
public static $current_component = null;
/**
* Internal pointer for current builder id.
*
* @var null|string Builder id.
*/
public static $current_builder = null;
/**
* Holds the control id.
*
* @since 1.0.0
* @access protected
* @var string $control_id
*/
protected $control_id;
/**
* Holds the panel id.
*
* @since 1.0.0
* @access protected
* @var string $panel
*/
protected $panel;
/**
* Holds the section id.
*
* @since 1.0.0
* @access protected
* @var string $section
*/
protected $section;
/**
* Holds the title.
*
* @since 1.0.0
* @access protected
* @var string $title
*/
protected $title;
/**
* Holds the description.
*
* @since 1.0.1
* @access protected
* @var string $description
*/
protected $description;
/**
* A list of panel keys to be removed.
*
* @since 1.0.0
* @access protected
* @var array $remove_panels
*/
protected $remove_panels = [];
/**
* A list of sections keys to be removed.
*
* @since 1.0.0
* @access protected
* @var array $remove_sections
*/
protected $remove_sections = [];
/**
* A list of supported devices and labels.
*
* @since 1.0.0
* @access protected
* @var array $devices
*/
protected $devices = [
'desktop' => 'Desktop',
'mobile' => 'Mobile',
];
/**
* A list of builder components.
*
* @since 1.0.0
* @access protected
* @var array $builder_components
*/
protected $builder_components = array();
/**
* Abstract_Builder constructor.
*
* @since 1.0.0
* @access public
*/
public function __construct() {
$this->set_property( 'control_id', 'hfg_' . $this->get_id() . '_layout' );
$this->set_property( 'panel', 'hfg_' . $this->get_id() );
$this->set_property( 'section', $this->control_id . '_section' );
$this->init();
add_action( 'hfg_' . $this->get_id() . '_render', [ $this, 'load_template' ] );
$this->define_builder_settings();
foreach ( $this->get_rows() as $row_id => $row_name ) {
$this->define_row_settings( $row_id );
}
}
/**
* Method to set protected properties for class.
*
* @param string $key The property key name.
* @param string $value The property value.
*
* @return bool
* @since 1.0.0
* @access protected
*/
protected function set_property( $key = '', $value = '' ) {
if ( ! property_exists( $this, $key ) ) {
return false;
}
$this->$key = $value;
return true;
}
/**
* Define builder settings.
*/
public function define_builder_settings() {
SettingsManager::get_instance()->add(
[
'id' => $this->control_id,
'group' => $this->control_id,
'noformat' => true,
'transport' => 'post' . $this->get_id(),
'sanitize_callback' => [ $this, 'sanitize_json' ],
'default' => '',
'label' => '',
'type' => 'text',
'section' => $this->section,
]
);
do_action( 'hfg_row_settings', $this->get_id(), $this->control_id );
}
/**
* Used to define the rows in the builder sections.
*
* @return array Rows array.
*/
abstract protected function get_rows();
/**
* Define Row settings.
*
* @param string $row_id Row id.
*/
public function define_row_settings( $row_id ) {
$row_setting_id = $this->control_id . '_' . $row_id;
SettingsManager::get_instance()->add(
[
'id' => $row_setting_id,
'transport' => 'postMessage',
'sanitize_callback' => array( $this, 'sanitize_json' ),
'default' => '',
]
);
if ( $row_id !== 'sidebar' ) {
SettingsManager::get_instance()->add(
[
'id' => self::LAYOUT_SETTING,
'group' => $row_setting_id,
'tab' => SettingsManager::TAB_LAYOUT,
'label' => __( 'Layout', 'neve' ),
'type' => 'select',
'section' => $row_setting_id,
'options' => [
'choices' => [
'layout-full-contained' => __( 'Full Width', 'neve' ) . ' - ' . __( 'Contained', 'neve' ),
'layout-fullwidth' => __( 'Full Width', 'neve' ),
'layout-contained' => __( 'Contained', 'neve' ),
],
],
'transport' => 'post' . $row_setting_id,
'sanitize_callback' => 'wp_filter_nohtml_kses',
'default' => 'layout-full-contained',
]
);
SettingsManager::get_instance()->add(
[
'id' => self::HEIGHT_SETTING,
'group' => $row_setting_id,
'tab' => SettingsManager::TAB_STYLE,
'section' => $row_setting_id,
'label' => __( 'Row height (px)', 'neve' ),
'type' => '\Neve\Customizer\Controls\Range',
'options' => [
'type' => 'range-value',
'media_query' => true,
'step' => 1,
'input_attr' => [
'mobile' => [
'min' => 0,
'max' => 350,
'default' => 0,
],
'tablet' => [
'min' => 0,
'max' => 350,
'default' => 0,
],
'desktop' => [
'min' => 0,
'max' => 350,
'default' => 0,
],
],
],
'transport' => 'post' . $row_setting_id,
'sanitize_callback' => array( $this, 'sanitize_responsive_int_json' ),
'default' => '{ "mobile": "0", "tablet": "0", "desktop": "0" }',
]
);
}
SettingsManager::get_instance()->add(
[
'id' => self::SKIN_SETTING,
'group' => $row_setting_id,
'tab' => SettingsManager::TAB_STYLE,
'label' => __( 'Skin Mode', 'neve' ),
'section' => $row_setting_id,
'type' => '\Neve\Customizer\Controls\Radio_Image',
'options' => [
'choices' => [
'light-mode' => array(
'url' => Settings\Config::get_url() . '/assets/images/customizer/text_mode_dark.svg',
'name' => '',
),
'dark-mode' => array(
'url' => Settings\Config::get_url() . '/assets/images/customizer/text_mode_light.svg',
'name' => '',
),
],
],
'transport' => 'post' . $row_setting_id,
'sanitize_callback' => 'wp_filter_nohtml_kses',
'default' => 'light-mode',
]
);
do_action( 'hfg_row_settings', $this->get_id(), $row_id, $row_setting_id );
}
/**
* Returns current builder id.
*
* @return string|null Builder id.
*/
public static function get_current_builder() {
return self::$current_builder;
}
/**
* Define templates used to loading the builder.
*/
abstract public function load_template();
/**
* Method to get protected properties for class.
*
* @param string $key The property key name.
*
* @return mixed
* @since 1.0.0
* @access protected
*/
public function get_property( $key = '' ) {
if ( ! property_exists( $this, $key ) ) {
return false;
}
return $this->$key;
}
/**
* Register builder scripts
*
* @since 1.0.0
* @access public
*/
public function scripts() {
wp_add_inline_style( 'hfg-customizer-control', $this->inline_builder_styles() );
}
/**
* Returns a string of css rules.
*
* @return string
* @since 1.0.0
* @access protected
*/
protected function inline_builder_styles() {
$style = '';
$style_array = [];
$rows = $this->get_rows();
if ( ! empty( $rows ) ) {
foreach ( $rows as $row_id => $row_label ) {
$style_array[ '#accordion-section-' . $this->control_id . '_' . $row_id ] = array(
'display' => 'none !important',
);
}
}
return $style . $this->css_array_to_css( $style_array );
}
/**
* Called to register component controls.
*
* @param WP_Customize_Manager $wp_customize The Customize Manager.
*
* @return WP_Customize_Manager
* @since 1.0.0
* @access public
*/
public function customize_register( WP_Customize_Manager $wp_customize ) {
/**
* An instance of Component.
*
* @var Component $component
*/
foreach ( $this->builder_components as $component ) {
$component->customize_register( $wp_customize );
}
if ( null !== $wp_customize->get_panel( $this->panel ) ) {
return $wp_customize;
}
$title = ( isset( $this->title ) && ! empty( $this->title ) )
? $this->title
: __( 'Header', 'neve' );
$description = ( isset( $this->description ) && ! empty( $this->description ) )
? $this->description
: '';
$wp_customize->add_panel(
$this->panel,
array(
'priority' => 25,
'capability' => 'edit_theme_options',
'theme_supports' => Settings\Config::get_support(),
'title' => $title,
'description' => $description,
)
);
$wp_customize->add_section(
$this->section,
array(
'title' => $title,
'priority' => 299,
'panel' => $this->panel,
)
);
Settings\Manager::get_instance()->load( $this->control_id, $wp_customize );
$this->add_rows_controls( $wp_customize );
$wp_customize->selective_refresh->add_partial(
$this->control_id . '_partial',
array(
'selector' => '.' . $this->panel,
'settings' => Settings\Manager::get_instance()->get_transport_group( $this->get_id() ),
'render_callback' => array( $this, 'render' ),
)
);
return $wp_customize;
}
/**
* Adds row controls.
*
* @param WP_Customize_Manager $wp_customize The Customize Manager.
*
* @return WP_Customize_Manager|null
* @since 1.0.0
* @access protected
*/
protected function add_rows_controls( $wp_customize ) {
$rows = $this->get_rows();
if ( empty( $rows ) ) {
return null;
}
foreach ( $rows as $row_id => $row ) {
$row_setting_id = $this->control_id . '_' . $row_id;
$title = $row;
$description = '';
if ( is_array( $row ) ) {
$title = ( isset( $row['title'] ) && ! empty( $row['title'] ) )
? $row['title']
: __( 'Section Content', 'neve' );
$description = ( isset( $row['description'] ) && ! empty( $row['description'] ) )
? $row['description']
: $description;
}
$wp_customize->add_section(
$row_setting_id,
array(
'title' => $title,
'description' => $description,
'description_hidden' => ( $description !== '' ),
'priority' => 100,
'panel' => $this->panel,
)
);
Settings\Manager::get_instance()->load( $row_setting_id, $wp_customize );
$wp_customize->selective_refresh->add_partial(
$row_setting_id . '_partial',
array(
'selector' => '.' . $this->panel,
'settings' => Settings\Manager::get_instance()->get_transport_group( $row_setting_id ),
'render_callback' => array( $this, 'render' ),
)
);
}
return $wp_customize;
}
/**
* Return current device in the loop.
*
* @return null|string Current device.
*/
public function get_current_device() {
return self::$current_device;
}
/**
* Return current row in the loop.
*
* @return null|string Current row.
*/
public function get_current_row_index() {
return self::$current_row;
}
/**
* Render markup for builder.
*/
public function render() {
$layout = $this->get_layout_data();
self::$current_builder = $this->get_id();
if ( is_customize_preview() ) {
$style = $this->css_array_to_css( $this->add_style() );
echo '<style type="text/css">' . $style . '</style>';// WPCS: XSS OK.
}
foreach ( $layout as $device_name => $device ) {
if ( empty( $device ) ) {
continue;
}
self::$current_device = $device_name;
$this->render_device( $device_name, $device );
}
}
/**
* Return builder data.
*
* @return array Builder data.
*/
public function get_layout_data() {
return wp_parse_args( json_decode( SettingsManager::get_instance()->get( 'hfg_' . $this->get_id() . '_layout' ), true ), array_fill_keys( array_keys( $this->devices ), array_fill_keys( array_keys( $this->get_rows() ), [] ) ) );
}
/**
* Method to add Builder css styles.
*
* @param array $css_array An array containing css rules.
*
* @return array
* @since 1.0.0
* @access public
*/
public function add_style( array $css_array = array() ) {
$rows = $this->get_rows();
if ( ! empty( $rows ) ) {
foreach ( $rows as $row_index => $row_label ) {
$css_array = $this->add_row_style( $row_index, $css_array );
}
}
/**
* An instance of Component.
*
* @var Abstract_Component $component
*/
foreach ( $this->builder_components as $component ) {
$component_css_array = $component->add_style( $css_array );
$css_array = $this->array_merge_recursive_distinct( $css_array, $component_css_array );
}
return $css_array;
}
/**
* Method to generate css array for each row.
*
* @param string $row_index The row index.
* @param array $css_array The css array.
*
* @return array
* @since 1.0.0
* @access private
*/
private function add_row_style( $row_index, $css_array = array() ) {
$layout_height = json_decode( get_theme_mod( $this->control_id . '_' . $row_index . '_height', '{ desktop: 0, tablet: 0, mobile: 0 }' ), true );
$selector = '.' . $this->get_id() . '-' . $row_index . '-inner';
if ( isset( $layout_height['mobile'] ) ) {
$layout_height['mobile'] = ( $layout_height['mobile'] > 0 ) ? $layout_height['mobile'] . 'px' : 'auto';
$css_array[' @media (max-width: 576px)'][ $selector ] = array(
'height' => $layout_height['mobile'],
);
}
if ( isset( $layout_height['tablet'] ) ) {
$layout_height['tablet'] = ( $layout_height['tablet'] > 0 ) ? $layout_height['tablet'] . 'px' : 'auto';
$css_array[' @media (min-width: 576px)'][ $selector ] = array(
'height' => $layout_height['tablet'],
);
}
if ( isset( $layout_height['desktop'] ) ) {
$layout_height['desktop'] = ( $layout_height['desktop'] > 0 ) ? $layout_height['desktop'] . 'px' : 'auto';
$css_array[' @media (min-width: 961px)'][ $selector ] = array(
'height' => $layout_height['desktop'],
);
}
return $css_array;
}
/**
* Render device markup.
*
* @param string $device_name Device id.
* @param array $device_details Device meta.
*/
public function render_device( $device_name, $device_details ) {
foreach ( $device_details as $index => $row ) {
if ( empty( $row ) ) {
continue;
}
self::$current_row = $index;
$this->render_row( $device_name, $index, $row );
}
}
/**
* Render components in the row.
*
* @param null|string $device Device id.
* @param null|array $row Row details.
*/
public function render_components( $device = null, $row = null ) {
$row_index = 0;
if ( $device === null && $row === null ) {
$device = self::$current_device;
$row_index = self::$current_row;
}
$data = $this->get_layout_data()[ $device ][ $row_index ];
$max_columns = 12;
$last_item = null;
usort(
$data,
function ( $item1, $item2 ) {
if ( $item1['x'] == $item2['x'] ) {
return 0;
}
return $item1['x'] < $item2['x'] ? - 1 : 1;
}
);
$collection = new \CachingIterator(
new \ArrayIterator(
$data
),
\CachingIterator::TOSTRING_USE_CURRENT
);
foreach ( $collection as $component_location ) {
/**
* An instance of Abstract_Component
*
* @var Abstract_Component $component
*/
if ( ! isset( $this->builder_components[ $component_location['id'] ] ) ) {
continue;
}
$component = $this->builder_components[ $component_location['id'] ];
$x = intval( $component_location['x'] );
$width = intval( $component_location['width'] );
$align = SettingsManager::get_instance()->get( $component_location['id'] . '_' . Abstract_Component::ALIGNMENT_ID, null );
if ( ! $collection->hasNext() && ( $x + $width < $max_columns ) ) {
$width += $max_columns - ( $x + $width );
}
if ( $row_index === 'sidebar' ) {
$width = 12;
}
$classes = [ 'builder-item' ];
$classes[] = 'col-' . $width . ' col-md-' . $width . ' col-sm-' . $width;
$classes[] = 'hfg-item-' . $align;
if ( $last_item === null ) {
$classes[] = 'hfg-item-first';
}
if ( ! $collection->hasNext() ) {
$classes[] = 'hfg-item-last';
}
if ( $row_index !== 'sidebar' ) {
if ( $x > 0 && $last_item !== null ) {
$origin = intval( $last_item['width'] ) + intval( $last_item['x'] );
if ( ( $x - $origin ) > 0 ) {
$x = $x - $origin;
$classes[] = 'offset-' . $x;
}
} elseif ( $x > 0 ) {
$classes[] = 'offset-' . $x;
}
}
$component->current_x = $x;
$component->current_width = $width;
self::$current_component = $component_location['id'];
echo sprintf( '<div class="%s">', esc_attr( join( ' ', $classes ) ) );
$component->render();
echo '</div>';
$last_item = $component_location;
}
}
/**
* Register a new component for builder.
*
* @param mixed $component_to_add A component.
*
* @return bool
* @since 1.0.0
* @access public
*/
public function register_component( $component_to_add ) {
if ( ! class_exists( $component_to_add ) || ! in_array( 'HFG\Core\Interfaces\Component', class_implements( $component_to_add ) ) ) {
return false;
}
/**
* An instance of Component.
*
* @var Abstract_Component $component
*/
$component = new $component_to_add( $this->panel );
$this->builder_components[ $component->get_id() ] = $component;
$component->assign_builder( $this->get_id() );
return true;
}
/**
* Get a component from builder.
*
* @param string|null $id The id of the component.
*
* @return Abstract_Component
* @since 1.0.0
* @access public
*/
public function get_component( $id = null ) {
if ( $id === null ) {
$id = ( self::$current_component === null ) ? Abstract_Component::$current_component : self::$current_component;
}
return $this->builder_components[ $id ];
}
/**
* Returns the builder components.
*
* @return array
* @since 1.0.0
* @access public
*/
public function get_components() {
return $this->builder_components;
}
/**
* A representation of the builder as array.
*
* @return array
* @since 1.0.0
* @access public
*/
final public function get_builder() {
return array(
'id' => $this->control_id,
'control_id' => $this->control_id,
'panel' => $this->panel,
'section' => $this->section,
'title' => $this->title,
'devices' => $this->devices,
'items' => $this->get_components_settings(),
'rows' => $this->get_rows(),
);
}
/**
* Returns the components settings.
*
* @return array
* @since 1.0.0
* @access public
*/
public function get_components_settings() {
$components_settings = array();
/**
* An instance of Component.
*
* @var Component $component
*/
foreach ( $this->builder_components as $component ) {
$components_settings[ $component->get_id() ] = $component->get_settings();
}
return $components_settings;
}
}
| 1 | 19,233 | action should use a prefix of `hfg` rather than `neve` as we plan to bootstrap this as a standalone library. Moreover, previously filter/actions used in this library was using the same pattern. | Codeinwp-neve | php |
@@ -158,6 +158,8 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Https.Internal
if ((_options.HttpProtocols & HttpProtocols.Http2) != 0)
{
sslOptions.ApplicationProtocols.Add(SslApplicationProtocol.Http2);
+ // https://tools.ietf.org/html/rfc7540#section-9.2.1
+ sslOptions.AllowRenegotiation = false;
}
if ((_options.HttpProtocols & HttpProtocols.Http1) != 0) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Net.Security;
using System.Security.Authentication;
using System.Security.Cryptography.X509Certificates;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Connections;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core;
using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Core.Features;
using Microsoft.Extensions.Logging;
namespace Microsoft.AspNetCore.Server.Kestrel.Https.Internal
{
public class HttpsConnectionAdapter : IConnectionAdapter
{
private static readonly ClosedAdaptedConnection _closedAdaptedConnection = new ClosedAdaptedConnection();
private readonly HttpsConnectionAdapterOptions _options;
private readonly X509Certificate2 _serverCertificate;
private readonly Func<ConnectionContext, string, X509Certificate2> _serverCertificateSelector;
private readonly ILogger _logger;
public HttpsConnectionAdapter(HttpsConnectionAdapterOptions options)
: this(options, loggerFactory: null)
{
}
public HttpsConnectionAdapter(HttpsConnectionAdapterOptions options, ILoggerFactory loggerFactory)
{
if (options == null)
{
throw new ArgumentNullException(nameof(options));
}
// capture the certificate now so it can't be switched after validation
_serverCertificate = options.ServerCertificate;
_serverCertificateSelector = options.ServerCertificateSelector;
if (_serverCertificate == null && _serverCertificateSelector == null)
{
throw new ArgumentException(CoreStrings.ServerCertificateRequired, nameof(options));
}
// If a selector is provided then ignore the cert, it may be a default cert.
if (_serverCertificateSelector != null)
{
// SslStream doesn't allow both.
_serverCertificate = null;
}
else
{
EnsureCertificateIsAllowedForServerAuth(_serverCertificate);
}
_options = options;
_logger = loggerFactory?.CreateLogger(nameof(HttpsConnectionAdapter));
}
public bool IsHttps => true;
public Task<IAdaptedConnection> OnConnectionAsync(ConnectionAdapterContext context)
{
// Don't trust SslStream not to block.
return Task.Run(() => InnerOnConnectionAsync(context));
}
private async Task<IAdaptedConnection> InnerOnConnectionAsync(ConnectionAdapterContext context)
{
SslStream sslStream;
bool certificateRequired;
var feature = new TlsConnectionFeature();
context.Features.Set<ITlsConnectionFeature>(feature);
if (_options.ClientCertificateMode == ClientCertificateMode.NoCertificate)
{
sslStream = new SslStream(context.ConnectionStream);
certificateRequired = false;
}
else
{
sslStream = new SslStream(context.ConnectionStream,
leaveInnerStreamOpen: false,
userCertificateValidationCallback: (sender, certificate, chain, sslPolicyErrors) =>
{
if (certificate == null)
{
return _options.ClientCertificateMode != ClientCertificateMode.RequireCertificate;
}
if (_options.ClientCertificateValidation == null)
{
if (sslPolicyErrors != SslPolicyErrors.None)
{
return false;
}
}
var certificate2 = ConvertToX509Certificate2(certificate);
if (certificate2 == null)
{
return false;
}
if (_options.ClientCertificateValidation != null)
{
if (!_options.ClientCertificateValidation(certificate2, chain, sslPolicyErrors))
{
return false;
}
}
return true;
});
certificateRequired = true;
}
var timeoutFeature = context.Features.Get<IConnectionTimeoutFeature>();
timeoutFeature.SetTimeout(_options.HandshakeTimeout);
try
{
#if NETCOREAPP2_1
// Adapt to the SslStream signature
ServerCertificateSelectionCallback selector = null;
if (_serverCertificateSelector != null)
{
selector = (sender, name) =>
{
context.Features.Set(sslStream);
var cert = _serverCertificateSelector(context.ConnectionContext, name);
if (cert != null)
{
EnsureCertificateIsAllowedForServerAuth(cert);
}
return cert;
};
}
var sslOptions = new SslServerAuthenticationOptions()
{
ServerCertificate = _serverCertificate,
ServerCertificateSelectionCallback = selector,
ClientCertificateRequired = certificateRequired,
EnabledSslProtocols = _options.SslProtocols,
CertificateRevocationCheckMode = _options.CheckCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.NoCheck,
ApplicationProtocols = new List<SslApplicationProtocol>()
};
// This is order sensitive
if ((_options.HttpProtocols & HttpProtocols.Http2) != 0)
{
sslOptions.ApplicationProtocols.Add(SslApplicationProtocol.Http2);
}
if ((_options.HttpProtocols & HttpProtocols.Http1) != 0)
{
sslOptions.ApplicationProtocols.Add(SslApplicationProtocol.Http11);
}
await sslStream.AuthenticateAsServerAsync(sslOptions, CancellationToken.None);
#elif NETSTANDARD2_0 // No ALPN support
var serverCert = _serverCertificate;
if (_serverCertificateSelector != null)
{
context.Features.Set(sslStream);
serverCert = _serverCertificateSelector(context.ConnectionContext, null);
if (serverCert != null)
{
EnsureCertificateIsAllowedForServerAuth(serverCert);
}
}
await sslStream.AuthenticateAsServerAsync(serverCert, certificateRequired,
_options.SslProtocols, _options.CheckCertificateRevocation);
#else
#error TFMs need to be updated
#endif
}
catch (OperationCanceledException)
{
_logger?.LogDebug(2, CoreStrings.AuthenticationTimedOut);
sslStream.Dispose();
return _closedAdaptedConnection;
}
catch (Exception ex) when (ex is IOException || ex is AuthenticationException)
{
_logger?.LogDebug(1, ex, CoreStrings.AuthenticationFailed);
sslStream.Dispose();
return _closedAdaptedConnection;
}
finally
{
timeoutFeature.CancelTimeout();
}
#if NETCOREAPP2_1
feature.ApplicationProtocol = sslStream.NegotiatedApplicationProtocol.Protocol;
context.Features.Set<ITlsApplicationProtocolFeature>(feature);
#elif NETSTANDARD2_0 // No ALPN support
#else
#error TFMs need to be updated
#endif
feature.ClientCertificate = ConvertToX509Certificate2(sslStream.RemoteCertificate);
return new HttpsAdaptedConnection(sslStream);
}
private static void EnsureCertificateIsAllowedForServerAuth(X509Certificate2 certificate)
{
if (!CertificateLoader.IsCertificateAllowedForServerAuth(certificate))
{
throw new InvalidOperationException(CoreStrings.FormatInvalidServerCertificateEku(certificate.Thumbprint));
}
}
private static X509Certificate2 ConvertToX509Certificate2(X509Certificate certificate)
{
if (certificate == null)
{
return null;
}
if (certificate is X509Certificate2 cert2)
{
return cert2;
}
return new X509Certificate2(certificate);
}
private class HttpsAdaptedConnection : IAdaptedConnection
{
private readonly SslStream _sslStream;
public HttpsAdaptedConnection(SslStream sslStream)
{
_sslStream = sslStream;
}
public Stream ConnectionStream => _sslStream;
public void Dispose()
{
_sslStream.Dispose();
}
}
private class ClosedAdaptedConnection : IAdaptedConnection
{
public Stream ConnectionStream { get; } = new ClosedStream();
public void Dispose()
{
}
}
}
}
| 1 | 15,878 | I'm halfway tempted to disable this for all https connections. Do you know of any clients that actually renegotiate for any reason? | aspnet-KestrelHttpServer | .cs |
@@ -102,7 +102,7 @@ class presence_of_all_elements_located(object):
def __call__(self, driver):
return _find_elements(driver, self.locator)
-class visibility_of_all_elements_located(object):
+class visibility_of_any_elements_located(object):
""" An expectation for checking that there is at least one element visible
on a web page.
locator is used to find the element | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoAlertPresentException
"""
* Canned "Expected Conditions" which are generally useful within webdriver
* tests.
"""
class title_is(object):
"""An expectation for checking the title of a page.
title is the expected title, which must be an exact match
returns True if the title matches, false otherwise."""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title == driver.title
class title_contains(object):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title in driver.title
class presence_of_element_located(object):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator)
class visibility_of_element_located(object):
""" An expectation for checking that an element is present on the DOM of a
page and visible. Visibility means that the element is not only displayed
but also has a height and width that is greater than 0.
locator - used to find the element
returns the WebElement once it is located and visible
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator))
except StaleElementReferenceException:
return False
class visibility_of(object):
""" An expectation for checking that an element, known to be present on the
DOM of a page, is visible. Visibility means that the element is not only
displayed but also has a height and width that is greater than 0.
element is the WebElement
returns the (same) WebElement once it is visible
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return _element_if_visible(self.element)
def _element_if_visible(element, visibility=True):
return element if element.is_displayed() == visibility else False
class presence_of_all_elements_located(object):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_elements(driver, self.locator)
class visibility_of_all_elements_located(object):
""" An expectation for checking that there is at least one element visible
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return [element for element in _find_elements(driver, self.locator) if _element_if_visible(element)]
class text_to_be_present_in_element(object):
""" An expectation for checking if the given text is present in the
specified element.
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try :
element_text = _find_element(driver, self.locator).text
return self.text in element_text
except StaleElementReferenceException:
return False
class text_to_be_present_in_element_value(object):
"""
An expectation for checking if the given text is present in the element's
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver,
self.locator).get_attribute("value")
if element_text:
return self.text in element_text
else:
return False
except StaleElementReferenceException:
return False
class frame_to_be_available_and_switch_to_it(object):
""" An expectation for checking whether the given frame is available to
switch to. If the frame is available it switches the given driver to the
specified frame.
"""
def __init__(self, locator):
self.frame_locator = locator
def __call__(self, driver):
try:
if isinstance(self.frame_locator, tuple):
driver.switch_to.frame(_find_element(driver,
self.frame_locator))
else:
driver.switch_to.frame(self.frame_locator)
return True
except NoSuchFrameException:
return False
class invisibility_of_element_located(object):
""" An Expectation for checking that an element is either invisible or not
present on the DOM.
locator used to find the element
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator), False)
except (NoSuchElementException, StaleElementReferenceException):
# In the case of NoSuchElement, returns true because the element is
# not present in DOM. The try block checks if the element is present
# but is invisible.
# In the case of StaleElementReference, returns true because stale
# element reference implies that element is no longer visible.
return True
class element_to_be_clickable(object):
""" An Expectation for checking an element is visible and enabled such that
you can click it."""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
element = visibility_of_element_located(self.locator)(driver)
if element and element.is_enabled():
return element
else:
return False
class staleness_of(object):
""" Wait until an element is no longer attached to the DOM.
element is the element to wait for.
returns False if the element is still attached to the DOM, true otherwise.
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
try:
# Calling any method forces a staleness check
self.element.is_enabled()
return False
except StaleElementReferenceException as expected:
return True
class element_to_be_selected(object):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return self.element.is_selected()
class element_located_to_be_selected(object):
"""An expectation for the element to be located is selected.
locator is a tuple of (by, path)"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator).is_selected()
class element_selection_state_to_be(object):
""" An expectation for checking if the given element is selected.
element is WebElement object
is_selected is a Boolean."
"""
def __init__(self, element, is_selected):
self.element = element
self.is_selected = is_selected
def __call__(self, ignored):
return self.element.is_selected() == self.is_selected
class element_located_selection_state_to_be(object):
""" An expectation to locate an element and check if the selection state
specified is in that state.
locator is a tuple of (by, path)
is_selected is a boolean
"""
def __init__(self, locator, is_selected):
self.locator = locator
self.is_selected = is_selected
def __call__(self, driver):
try:
element = _find_element(driver, self.locator)
return element.is_selected() == self.is_selected
except StaleElementReferenceException:
return False
class alert_is_present(object):
""" Expect an alert to be present."""
def __init__(self):
pass
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except NoAlertPresentException:
return False
def _find_element(driver, by):
"""Looks up an element. Logs and re-raises ``WebDriverException``
if thrown."""
try :
return driver.find_element(*by)
except NoSuchElementException as e:
raise e
except WebDriverException as e:
raise e
def _find_elements(driver, by):
try :
return driver.find_elements(*by)
except WebDriverException as e:
raise e
| 1 | 13,212 | shouldn't **call** return a boolean? | SeleniumHQ-selenium | js |
@@ -25,9 +25,8 @@ import (
)
func ExampleOpenCollection() {
- // This example is used in https://gocloud.dev/howto/docstore#dynamodb-ctor.
-
- // import _ "gocloud.dev/docstore/awsdynamodb"
+ // PRAGMA(gocloud.dev): Package this example for gocloud.dev.
+ // PRAGMA(gocloud.dev): Add a blank import: _ "gocloud.dev/docstore/awsdynamodb"
sess, err := session.NewSession()
if err != nil { | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awsdynamodb_test
import (
"context"
"log"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"gocloud.dev/docstore"
"gocloud.dev/docstore/awsdynamodb"
)
func ExampleOpenCollection() {
// This example is used in https://gocloud.dev/howto/docstore#dynamodb-ctor.
// import _ "gocloud.dev/docstore/awsdynamodb"
sess, err := session.NewSession()
if err != nil {
log.Fatal(err)
}
coll, err := awsdynamodb.OpenCollection(
dynamodb.New(sess), "docstore-test", "partitionKeyField", "", nil)
if err != nil {
log.Fatal(err)
}
defer coll.Close()
}
func Example_openCollectionFromURL() {
// This example is used in https://gocloud.dev/howto/docstore#dynamodb.
// import _ "gocloud.dev/docstore/awsdynamodb"
// Variables set up elsewhere:
ctx := context.Background()
// docstore.OpenCollection creates a *docstore.Collection from a URL.
coll, err := docstore.OpenCollection(ctx, "dynamodb://my-table?partition_key=name")
if err != nil {
log.Fatal(err)
}
defer coll.Close()
}
| 1 | 19,587 | I don't think you need the blank import here. This one uses awsdynamodb directly. | google-go-cloud | go |
@@ -121,11 +121,11 @@ public abstract class DeleteFilter<T> {
return applyEqDeletes(applyPosDeletes(records));
}
- private List<Predicate<T>> applyEqDeletes() {
- List<Predicate<T>> isInDeleteSets = Lists.newArrayList();
+ private Predicate<T> buildEqDeletePredicate() {
if (eqDeletes.isEmpty()) {
- return isInDeleteSets;
+ return null;
}
+ Predicate<T> isDeleted = t -> false;
Multimap<Set<Integer>, DeleteFile> filesByDeleteIds = Multimaps.newMultimap(Maps.newHashMap(), Lists::newArrayList);
for (DeleteFile delete : eqDeletes) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import org.apache.iceberg.Accessor;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DeleteFile;
import org.apache.iceberg.FileContent;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.avro.Avro;
import org.apache.iceberg.data.avro.DataReader;
import org.apache.iceberg.data.orc.GenericOrcReader;
import org.apache.iceberg.data.parquet.GenericParquetReaders;
import org.apache.iceberg.deletes.Deletes;
import org.apache.iceberg.deletes.PositionDeleteIndex;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.orc.ORC;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Multimap;
import org.apache.iceberg.relocated.com.google.common.collect.Multimaps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.Filter;
import org.apache.iceberg.util.StructLikeSet;
import org.apache.iceberg.util.StructProjection;
public abstract class DeleteFilter<T> {
private static final long DEFAULT_SET_FILTER_THRESHOLD = 100_000L;
private static final Schema POS_DELETE_SCHEMA = new Schema(
MetadataColumns.DELETE_FILE_PATH,
MetadataColumns.DELETE_FILE_POS);
private final long setFilterThreshold;
private final DataFile dataFile;
private final List<DeleteFile> posDeletes;
private final List<DeleteFile> eqDeletes;
private final Schema requiredSchema;
private final Accessor<StructLike> posAccessor;
private PositionDeleteIndex deleteRowPositions = null;
protected DeleteFilter(FileScanTask task, Schema tableSchema, Schema requestedSchema) {
this.setFilterThreshold = DEFAULT_SET_FILTER_THRESHOLD;
this.dataFile = task.file();
ImmutableList.Builder<DeleteFile> posDeleteBuilder = ImmutableList.builder();
ImmutableList.Builder<DeleteFile> eqDeleteBuilder = ImmutableList.builder();
for (DeleteFile delete : task.deletes()) {
switch (delete.content()) {
case POSITION_DELETES:
posDeleteBuilder.add(delete);
break;
case EQUALITY_DELETES:
eqDeleteBuilder.add(delete);
break;
default:
throw new UnsupportedOperationException("Unknown delete file content: " + delete.content());
}
}
this.posDeletes = posDeleteBuilder.build();
this.eqDeletes = eqDeleteBuilder.build();
this.requiredSchema = fileProjection(tableSchema, requestedSchema, posDeletes, eqDeletes);
this.posAccessor = requiredSchema.accessorForField(MetadataColumns.ROW_POSITION.fieldId());
}
public Schema requiredSchema() {
return requiredSchema;
}
public boolean hasPosDeletes() {
return !posDeletes.isEmpty();
}
Accessor<StructLike> posAccessor() {
return posAccessor;
}
protected abstract StructLike asStructLike(T record);
protected abstract InputFile getInputFile(String location);
protected long pos(T record) {
return (Long) posAccessor.get(asStructLike(record));
}
public CloseableIterable<T> filter(CloseableIterable<T> records) {
return applyEqDeletes(applyPosDeletes(records));
}
private List<Predicate<T>> applyEqDeletes() {
List<Predicate<T>> isInDeleteSets = Lists.newArrayList();
if (eqDeletes.isEmpty()) {
return isInDeleteSets;
}
Multimap<Set<Integer>, DeleteFile> filesByDeleteIds = Multimaps.newMultimap(Maps.newHashMap(), Lists::newArrayList);
for (DeleteFile delete : eqDeletes) {
filesByDeleteIds.put(Sets.newHashSet(delete.equalityFieldIds()), delete);
}
for (Map.Entry<Set<Integer>, Collection<DeleteFile>> entry : filesByDeleteIds.asMap().entrySet()) {
Set<Integer> ids = entry.getKey();
Iterable<DeleteFile> deletes = entry.getValue();
Schema deleteSchema = TypeUtil.select(requiredSchema, ids);
// a projection to select and reorder fields of the file schema to match the delete rows
StructProjection projectRow = StructProjection.create(requiredSchema, deleteSchema);
Iterable<CloseableIterable<Record>> deleteRecords = Iterables.transform(deletes,
delete -> openDeletes(delete, deleteSchema));
// copy the delete records because they will be held in a set
CloseableIterable<Record> records = CloseableIterable.transform(
CloseableIterable.concat(deleteRecords), Record::copy);
StructLikeSet deleteSet = Deletes.toEqualitySet(
CloseableIterable.transform(
records, record -> new InternalRecordWrapper(deleteSchema.asStruct()).wrap(record)),
deleteSchema.asStruct());
Predicate<T> isInDeleteSet = record -> deleteSet.contains(projectRow.wrap(asStructLike(record)));
isInDeleteSets.add(isInDeleteSet);
}
return isInDeleteSets;
}
public CloseableIterable<T> findEqualityDeleteRows(CloseableIterable<T> records) {
// Predicate to test whether a row has been deleted by equality deletions.
Predicate<T> deletedRows = applyEqDeletes().stream()
.reduce(Predicate::or)
.orElse(t -> false);
Filter<T> deletedRowsFilter = new Filter<T>() {
@Override
protected boolean shouldKeep(T item) {
return deletedRows.test(item);
}
};
return deletedRowsFilter.filter(records);
}
private CloseableIterable<T> applyEqDeletes(CloseableIterable<T> records) {
// Predicate to test whether a row should be visible to user after applying equality deletions.
Predicate<T> remainingRows = applyEqDeletes().stream()
.map(Predicate::negate)
.reduce(Predicate::and)
.orElse(t -> true);
Filter<T> remainingRowsFilter = new Filter<T>() {
@Override
protected boolean shouldKeep(T item) {
return remainingRows.test(item);
}
};
return remainingRowsFilter.filter(records);
}
public PositionDeleteIndex deletedRowPositions() {
if (posDeletes.isEmpty()) {
return null;
}
if (deleteRowPositions == null) {
List<CloseableIterable<Record>> deletes = Lists.transform(posDeletes, this::openPosDeletes);
deleteRowPositions = Deletes.toPositionBitmap(dataFile.path(), deletes);
}
return deleteRowPositions;
}
private CloseableIterable<T> applyPosDeletes(CloseableIterable<T> records) {
if (posDeletes.isEmpty()) {
return records;
}
List<CloseableIterable<Record>> deletes = Lists.transform(posDeletes, this::openPosDeletes);
// if there are fewer deletes than a reasonable number to keep in memory, use a set
if (posDeletes.stream().mapToLong(DeleteFile::recordCount).sum() < setFilterThreshold) {
return Deletes.filter(
records, this::pos,
Deletes.toPositionSet(dataFile.path(), CloseableIterable.concat(deletes)));
}
return Deletes.streamingFilter(records, this::pos, Deletes.deletePositions(dataFile.path(), deletes));
}
private CloseableIterable<Record> openPosDeletes(DeleteFile file) {
return openDeletes(file, POS_DELETE_SCHEMA);
}
private CloseableIterable<Record> openDeletes(DeleteFile deleteFile, Schema deleteSchema) {
InputFile input = getInputFile(deleteFile.path().toString());
switch (deleteFile.format()) {
case AVRO:
return Avro.read(input)
.project(deleteSchema)
.reuseContainers()
.createReaderFunc(DataReader::create)
.build();
case PARQUET:
Parquet.ReadBuilder builder = Parquet.read(input)
.project(deleteSchema)
.reuseContainers()
.createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(deleteSchema, fileSchema));
if (deleteFile.content() == FileContent.POSITION_DELETES) {
builder.filter(Expressions.equal(MetadataColumns.DELETE_FILE_PATH.name(), dataFile.path()));
}
return builder.build();
case ORC:
// Reusing containers is automatic for ORC. No need to set 'reuseContainers' here.
ORC.ReadBuilder orcBuilder = ORC.read(input)
.project(deleteSchema)
.createReaderFunc(fileSchema -> GenericOrcReader.buildReader(deleteSchema, fileSchema));
if (deleteFile.content() == FileContent.POSITION_DELETES) {
orcBuilder.filter(Expressions.equal(MetadataColumns.DELETE_FILE_PATH.name(), dataFile.path()));
}
return orcBuilder.build();
default:
throw new UnsupportedOperationException(String.format(
"Cannot read deletes, %s is not a supported format: %s", deleteFile.format().name(), deleteFile.path()));
}
}
private static Schema fileProjection(Schema tableSchema, Schema requestedSchema,
List<DeleteFile> posDeletes, List<DeleteFile> eqDeletes) {
if (posDeletes.isEmpty() && eqDeletes.isEmpty()) {
return requestedSchema;
}
Set<Integer> requiredIds = Sets.newLinkedHashSet();
if (!posDeletes.isEmpty()) {
requiredIds.add(MetadataColumns.ROW_POSITION.fieldId());
}
for (DeleteFile eqDelete : eqDeletes) {
requiredIds.addAll(eqDelete.equalityFieldIds());
}
requiredIds.add(MetadataColumns.IS_DELETED.fieldId());
Set<Integer> missingIds = Sets.newLinkedHashSet(
Sets.difference(requiredIds, TypeUtil.getProjectedIds(requestedSchema)));
if (missingIds.isEmpty()) {
return requestedSchema;
}
// TODO: support adding nested columns. this will currently fail when finding nested columns to add
List<Types.NestedField> columns = Lists.newArrayList(requestedSchema.columns());
for (int fieldId : missingIds) {
if (fieldId == MetadataColumns.ROW_POSITION.fieldId() || fieldId == MetadataColumns.IS_DELETED.fieldId()) {
continue; // add _pos and _deleted at the end
}
Types.NestedField field = tableSchema.asStruct().field(fieldId);
Preconditions.checkArgument(field != null, "Cannot find required field for ID %s", fieldId);
columns.add(field);
}
if (missingIds.contains(MetadataColumns.ROW_POSITION.fieldId())) {
columns.add(MetadataColumns.ROW_POSITION);
}
if (missingIds.contains(MetadataColumns.IS_DELETED.fieldId())) {
columns.add(MetadataColumns.IS_DELETED);
}
return new Schema(columns);
}
}
| 1 | 35,480 | I think this should be initialized to `null` instead of a predicate. There is no need to run an extra predicate (with an extra method dispatch for each row in a data file. That's a tight loop so we should do more work here to avoid it. Instead of using `isDeleted.or`, this should test whether `isDeleted` is `null` and either initialize `isDeleted` or call `isDeleted.or`. | apache-iceberg | java |
@@ -246,6 +246,15 @@ def __build_clangsa_config_handler(args, context):
config_handler.compiler_sysroot = context.compiler_sysroot
config_handler.system_includes = context.extra_system_includes
config_handler.includes = context.extra_includes
+
+ if 'ctu_phases' in args:
+ config_handler.ctu_dir = os.path.join(args.output_path,
+ args.ctu_dir)
+ config_handler.ctu_in_memory = args.ctu_in_memory
+ config_handler.log_file = args.logfile
+ config_handler.path_env_extra = context.path_env_extra
+ config_handler.ld_lib_path_extra = context.ld_lib_path_extra
+
try:
with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
config_handler.analyzer_extra_arguments = \ | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Supported analyzer types.
"""
import os
import platform
import re
from libcodechecker.analyze import analyzer_env
from libcodechecker.analyze import host_check
from libcodechecker.analyze.analyzers import analyzer_clang_tidy
from libcodechecker.analyze.analyzers import analyzer_clangsa
from libcodechecker.analyze.analyzers import config_handler_clang_tidy
from libcodechecker.analyze.analyzers import config_handler_clangsa
from libcodechecker.analyze.analyzers import result_handler_base
from libcodechecker.analyze.analyzers import result_handler_clang_tidy
from libcodechecker.analyze.analyzers import result_handler_plist_to_db
from libcodechecker.analyze.analyzers import result_handler_plist_to_stdout
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYZER TYPES')
CLANG_SA = 'clangsa'
CLANG_TIDY = 'clang-tidy'
supported_analyzers = {CLANG_SA, CLANG_TIDY}
def is_sa_checker_name(checker_name):
"""
Match for Clang Static analyzer names like:
- unix
- unix.Malloc
- security.insecureAPI
- security.insecureAPI.gets
"""
# No '-' is allowed in the checker name.
sa_checker_name = r'^[^-]+$'
ptn = re.compile(sa_checker_name)
return ptn.match(checker_name) is not None
def is_tidy_checker_name(checker_name):
"""
Match for Clang Tidy analyzer names like:
-*
modernize-*
clang-diagnostic-*
cert-fio38-c
google-global-names-in-headers
"""
# Must contain at least one '-'.
tidy_checker_name = r'^(?=.*[\-]).+$'
ptn = re.compile(tidy_checker_name)
return ptn.match(checker_name) is not None
def check_supported_analyzers(analyzers, context):
"""
Checks the given analyzers in the current context for their executability
and support in CodeChecker.
This method also updates the given context.analyzer_binaries if the
context's configuration is bogus but had been resolved.
:return: (enabled, failed) where enabled is a list of analyzer names
and failed is a list of (analyzer, reason) tuple.
"""
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_binaries = context.analyzer_binaries
enabled_analyzers = set()
failed_analyzers = set()
for analyzer_name in analyzers:
if analyzer_name not in supported_analyzers:
failed_analyzers.add((analyzer_name,
"Analyzer unsupported by CodeChecker."))
# Get the compiler binary to check if it can run.
available_analyzer = True
analyzer_bin = analyzer_binaries.get(analyzer_name)
if not analyzer_bin:
failed_analyzers.add((analyzer_name,
"Failed to detect analyzer binary."))
available_analyzer = False
elif not os.path.isabs(analyzer_bin):
# If the analyzer is not in an absolute path, try to find it...
if analyzer_name == CLANG_SA:
found_bin = analyzer_clangsa.ClangSA. \
resolve_missing_binary(analyzer_bin, check_env)
elif analyzer_name == CLANG_TIDY:
found_bin = analyzer_clang_tidy.ClangTidy. \
resolve_missing_binary(analyzer_bin, check_env)
# found_bin is an absolute path, an executable in one of the
# PATH folders.
# If found_bin is the same as the original binary, ie., normally
# calling the binary without any search would have resulted in
# the same binary being called, it's NOT a "not found".
if found_bin and os.path.basename(found_bin) != analyzer_bin:
LOG.debug("Configured binary '{0}' for analyzer '{1}' was "
"not found, but environment PATH contains '{2}'."
.format(analyzer_bin, analyzer_name, found_bin))
context.analyzer_binaries[analyzer_name] = found_bin
if not found_bin or \
not host_check.check_clang(found_bin, check_env):
# If analyzer_bin is not False here, the resolver found one.
failed_analyzers.add((analyzer_name,
"Couldn't run analyzer binary."))
available_analyzer = False
elif not host_check.check_clang(analyzer_bin, check_env):
# Analyzers unavailable under absolute paths are deliberately a
# configuration problem.
failed_analyzers.add((analyzer_name,
"Cannot execute analyzer binary."))
available_analyzer = False
if available_analyzer:
enabled_analyzers.add(analyzer_name)
return enabled_analyzers, failed_analyzers
def construct_analyzer_type(analyzer_type, config_handler, buildaction):
"""
Construct a specific analyzer based on the type.
"""
LOG.debug_analyzer('Constructing ' + analyzer_type + ' analyzer')
if analyzer_type == CLANG_SA:
analyzer = analyzer_clangsa.ClangSA(config_handler,
buildaction)
return analyzer
elif analyzer_type == CLANG_TIDY:
analyzer = analyzer_clang_tidy.ClangTidy(config_handler,
buildaction)
return analyzer
else:
LOG.error('Unsupported analyzer type: ' + analyzer_type)
return None
def construct_analyzer(buildaction,
analyzer_config_map):
"""
Construct an analyzer.
"""
try:
analyzer_type = buildaction.analyzer_type
# Get the proper config handler for this analyzer type.
config_handler = analyzer_config_map.get(analyzer_type)
analyzer = construct_analyzer_type(analyzer_type,
config_handler,
buildaction)
return analyzer
except Exception as ex:
LOG.debug_analyzer(ex)
return None
def initialize_checkers(config_handler,
checkers,
default_checkers=None,
cmdline_checkers=None,
enable_all=False):
"""
Initializes the checker list for the specified config handler based
on the given defaults, commandline arguments and analyzer-retriever
checker list.
"""
# By default disable all checkers.
for checker_name, description in checkers:
config_handler.add_checker(checker_name, False, description)
# Set default enabled or disabled checkers, retrieved from a config file.
if default_checkers:
for checker in default_checkers:
for checker_name, enabled in checker.items():
if enabled:
config_handler.enable_checker(checker_name)
else:
config_handler.disable_checker(checker_name)
# If enable_all is given, almost all checkers should be enabled.
if enable_all:
for checker_name, enabled in checkers:
if not checker_name.startswith("alpha.") and \
not checker_name.startswith("debug.") and \
not checker_name.startswith("osx."):
# There are a few exceptions, though, which still need to
# be manually enabled by the user: alpha and debug.
config_handler.enable_checker(checker_name)
if checker_name.startswith("osx.") and \
platform.system() == 'Darwin':
# OSX checkers are only enable-all'd if we are on OSX.
config_handler.enable_checker(checker_name)
# Set user defined enabled or disabled checkers from the command line.
if cmdline_checkers:
for checker_name, enabled in cmdline_checkers:
if enabled:
config_handler.enable_checker(checker_name)
else:
config_handler.disable_checker(checker_name)
def __replace_env_var(cfg_file):
def replacer(matchobj):
env_var = matchobj.group(1)
if matchobj.group(1) not in os.environ:
LOG.error(env_var + ' environment variable not set in ' + cfg_file)
return ''
return os.environ[env_var]
return replacer
def __build_clangsa_config_handler(args, context):
"""
Build the config handler for clang static analyzer.
Handle config options from the command line and config files.
"""
config_handler = config_handler_clangsa.ClangSAConfigHandler()
config_handler.analyzer_plugins_dir = context.checker_plugin
config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_SA)
config_handler.compiler_resource_dir = context.compiler_resource_dir
config_handler.compiler_sysroot = context.compiler_sysroot
config_handler.system_includes = context.extra_system_includes
config_handler.includes = context.extra_includes
try:
with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:
config_handler.analyzer_extra_arguments = \
re.sub('\$\((.*?)\)',
__replace_env_var(args.clangsa_args_cfg_file),
sa_cfg.read().strip())
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clangsa arguments file was given in the command line.
LOG.debug_analyzer(aerr)
analyzer = construct_analyzer_type(CLANG_SA, config_handler, None)
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
checkers = analyzer.get_analyzer_checkers(config_handler, check_env)
# Read clang-sa checkers from the config file.
clang_sa_checkers = context.default_checkers_config.get(CLANG_SA +
'_checkers')
try:
cmdline_checkers = args.ordered_checkers
except AttributeError:
LOG.debug_analyzer('No checkers were defined in '
'the command line for ' + CLANG_SA)
cmdline_checkers = None
initialize_checkers(config_handler,
checkers,
clang_sa_checkers,
cmdline_checkers,
'enable_all' in args and args.enable_all)
return config_handler
def __build_clang_tidy_config_handler(args, context):
"""
Build the config handler for clang tidy analyzer.
Handle config options from the command line and config files.
"""
config_handler = config_handler_clang_tidy.ClangTidyConfigHandler()
config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_TIDY)
config_handler.compiler_resource_dir = context.compiler_resource_dir
config_handler.compiler_sysroot = context.compiler_sysroot
config_handler.system_includes = context.extra_system_includes
config_handler.includes = context.extra_includes
try:
with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg:
config_handler.analyzer_extra_arguments = \
re.sub('\$\((.*?)\)', __replace_env_var,
tidy_cfg.read().strip())
except IOError as ioerr:
LOG.debug_analyzer(ioerr)
except AttributeError as aerr:
# No clang tidy arguments file was given in the command line.
LOG.debug_analyzer(aerr)
analyzer = construct_analyzer_type(CLANG_TIDY, config_handler, None)
check_env = analyzer_env.get_check_env(context.path_env_extra,
context.ld_lib_path_extra)
checkers = analyzer.get_analyzer_checkers(config_handler, check_env)
# Read clang-tidy checkers from the config file.
clang_tidy_checkers = context.default_checkers_config.get(CLANG_TIDY +
'_checkers')
try:
cmdline_checkers = args.ordered_checkers
except AttributeError:
LOG.debug_analyzer('No checkers were defined in '
'the command line for ' +
CLANG_TIDY)
cmdline_checkers = None
initialize_checkers(config_handler,
checkers,
clang_tidy_checkers,
cmdline_checkers,
'enable_all' in args and args.enable_all)
return config_handler
def build_config_handlers(args, context, enabled_analyzers):
"""
Handle config from command line or from config file if no command line
config is given.
Supported command line config format is in JSON tidy supports YAML also but
no standard lib for yaml parsing is available in python.
"""
analyzer_config_map = {}
for ea in enabled_analyzers:
if ea == CLANG_SA:
config_handler = __build_clangsa_config_handler(args, context)
elif ea == CLANG_TIDY:
config_handler = __build_clang_tidy_config_handler(args, context)
else:
LOG.debug("Unhandled analyzer: " + str(ea))
analyzer_config_map[ea] = config_handler
return analyzer_config_map
def construct_analyze_handler(buildaction,
report_output,
severity_map,
skiplist_handler):
"""
Construct an empty (base) ResultHandler which is capable of returning
analyzer worker statuses to the caller method, but does not provide
actual parsing and processing of results, instead only saves the analysis
results.
"""
if buildaction.analyzer_type not in supported_analyzers:
return None
if buildaction.analyzer_type == CLANG_SA:
res_handler = result_handler_base.ResultHandler(buildaction,
report_output)
elif buildaction.analyzer_type == CLANG_TIDY:
res_handler = result_handler_clang_tidy.ClangTidyPlistToFile(
buildaction, report_output)
res_handler.severity_map = severity_map
res_handler.skiplist_handler = skiplist_handler
return res_handler
def construct_parse_handler(buildaction,
output,
severity_map,
suppress_handler,
print_steps):
"""
Construct a result handler for parsing results in a human-readable format.
"""
if buildaction.analyzer_type not in supported_analyzers:
return None
if buildaction.analyzer_type == CLANG_SA:
res_handler = result_handler_plist_to_stdout.PlistToStdout(
buildaction,
output,
None)
res_handler.print_steps = print_steps
elif buildaction.analyzer_type == CLANG_TIDY:
res_handler = result_handler_clang_tidy.ClangTidyPlistToStdout(
buildaction,
output,
None)
res_handler.severity_map = severity_map
res_handler.suppress_handler = suppress_handler
return res_handler
def construct_store_handler(buildaction,
run_id,
severity_map):
"""
Construct a result handler for storing results in a database.
"""
res_handler = result_handler_plist_to_db.PlistToDB(
buildaction,
None,
run_id)
res_handler.severity_map = severity_map
return res_handler
| 1 | 7,188 | What is this and the next entry in config_handler used for? | Ericsson-codechecker | c |
@@ -244,3 +244,9 @@ func NewWithDefault(keyFilePath string, certFilePath string) (m upstreamca.Upstr
return m, err
}
+
+func NewEmpty () (m upstreamca.UpstreamCa) {
+ return &memoryPlugin{
+ mtx:&sync.RWMutex{},
+ }
+} | 1 | package pkg
import (
"crypto/ecdsa"
"crypto/rand"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"math/big"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/hcl"
"github.com/spiffe/go-spiffe/uri"
"github.com/spiffe/spire/pkg/common/plugin"
common "github.com/spiffe/spire/pkg/common/plugin"
iface "github.com/spiffe/spire/pkg/common/plugin"
"github.com/spiffe/spire/pkg/server/upstreamca"
"log"
)
var (
pluginInfo = sriplugin.GetPluginInfoResponse{
Description: "",
DateCreated: "",
Version: "",
Author: "",
Company: "",
}
)
type configuration struct {
TTL string `hcl:"ttl" json:"ttl"` // time to live for generated certs
TrustDomain string `hcl:"trust_domain" json:"trust_domain"`
CertFilePath string `hcl:"cert_file_path" json:"cert_file_path"`
KeyFilePath string `hcl:"key_file_path" json:"key_file_path"`
}
type memoryPlugin struct {
config *configuration
key *ecdsa.PrivateKey
cert *x509.Certificate
serial int64
mtx *sync.RWMutex
}
func (m *memoryPlugin) Configure(req *common.ConfigureRequest) (*common.ConfigureResponse, error) {
log.Print("Starting Configure")
resp := &sriplugin.ConfigureResponse{}
// Parse HCL config payload into config struct
config := &configuration{}
hclTree, err := hcl.Parse(req.Configuration)
if err != nil {
resp.ErrorList = []string{err.Error()}
return resp, err
}
err = hcl.DecodeObject(&config, hclTree)
if err != nil {
resp.ErrorList = []string{err.Error()}
return resp, err
}
keyPEM, err := ioutil.ReadFile(config.KeyFilePath)
if err != nil {
return nil, fmt.Errorf("Could not read %s: %s", config.KeyFilePath, err)
}
block, rest := pem.Decode(keyPEM)
if block == nil {
return nil, errors.New("Invalid cert format")
}
if len(rest) > 0 {
return nil, errors.New("Invalid cert format: too many certs")
}
key, err := x509.ParseECPrivateKey(block.Bytes)
if err != nil {
return nil, err
}
certPEM, err := ioutil.ReadFile(config.CertFilePath)
if err != nil {
return nil, fmt.Errorf("Could not read %s: %s", config.CertFilePath, err)
}
block, rest = pem.Decode(certPEM)
if block == nil {
return nil, errors.New("Invalid cert format")
}
if len(rest) > 0 {
return nil, errors.New("Invalid cert format: too many certs")
}
cert, err := x509.ParseCertificate(block.Bytes)
// Set local vars from config struct
m.mtx.Lock()
defer m.mtx.Unlock()
m.config = &configuration{}
m.config.TrustDomain = config.TrustDomain
m.config.TTL = config.TTL
m.config.KeyFilePath = config.KeyFilePath
m.config.CertFilePath = config.CertFilePath
m.cert = cert
m.key = key
log.Print("Plugin successfully configured")
return &common.ConfigureResponse{}, nil
}
func (*memoryPlugin) GetPluginInfo(req *sriplugin.GetPluginInfoRequest) (*sriplugin.GetPluginInfoResponse, error) {
log.Print("Getting plugin information")
return &sriplugin.GetPluginInfoResponse{}, nil
}
func (m *memoryPlugin) SubmitCSR(request *upstreamca.SubmitCSRRequest) (*upstreamca.SubmitCSRResponse, error) {
m.mtx.RLock()
defer m.mtx.RUnlock()
log.Print("Starting SubmitCSR")
if m.cert == nil {
return nil, errors.New("Invalid state: no cert")
}
if m.key == nil {
return nil, errors.New("Invalid state: no key")
}
csr, err := ParseSpiffeCsr(request.Csr, m.config.TrustDomain)
if err != nil {
return nil, err
}
serial := atomic.AddInt64(&m.serial, 1)
now := time.Now()
expiry, err := time.ParseDuration(m.config.TTL)
if err != nil {
return nil, fmt.Errorf("Unable to parse TTL: %s", err)
}
template := x509.Certificate{
ExtraExtensions: csr.Extensions,
Subject: csr.Subject,
Issuer: m.cert.Subject,
SerialNumber: big.NewInt(serial),
NotBefore: now,
NotAfter: now.Add(expiry),
KeyUsage: x509.KeyUsageDigitalSignature |
x509.KeyUsageCertSign |
x509.KeyUsageCRLSign,
BasicConstraintsValid: true,
IsCA: true,
}
cert, err := x509.CreateCertificate(rand.Reader,
&template, m.cert, csr.PublicKey, m.key)
if err != nil {
return nil, err
}
log.Print("Successfully created certificate")
return &upstreamca.SubmitCSRResponse{
Cert: cert,
UpstreamTrustBundle: m.cert.Raw,
}, nil
}
func ParseSpiffeCsr(csrDER []byte, trustDomain string) (csr *x509.CertificateRequest, err error) {
csr, err = x509.ParseCertificateRequest(csrDER)
if err != nil {
return nil, err
}
err = csr.CheckSignature()
if err != nil {
return nil, errors.New("Failed to check certificate request signature: " + err.Error())
}
urinames, err := uri.GetURINamesFromExtensions(&csr.Extensions)
if err != nil {
return nil, err
}
if len(urinames) != 1 {
return nil, errors.New("The CSR must have exactly one URI SAN")
}
csrSpiffeID, err := url.Parse(urinames[0])
if err != nil {
return nil, err
}
log.Printf("Parsing CSR with SPIFFE ID: '%v'", csrSpiffeID.String())
if csrSpiffeID.Scheme != "spiffe" {
return nil, fmt.Errorf("SPIFFE ID '%v' is not prefixed with the spiffe:// scheme.", csrSpiffeID)
}
if csrSpiffeID.Host != trustDomain {
return nil, fmt.Errorf("The SPIFFE ID '%v' does not reside in the trust domain '%v'.", urinames[0], trustDomain)
}
return csr, nil
}
func NewWithDefault(keyFilePath string, certFilePath string) (m upstreamca.UpstreamCa, err error) {
config := configuration{
TrustDomain: "localhost",
KeyFilePath: keyFilePath,
CertFilePath: certFilePath,
TTL: "1h",
}
jsonConfig, err := json.Marshal(config)
pluginConfig := &iface.ConfigureRequest{
Configuration: string(jsonConfig),
}
m = &memoryPlugin{
mtx: &sync.RWMutex{},
}
_, err = m.Configure(pluginConfig)
return m, err
}
| 1 | 8,525 | will your editor integrate `gofmt`, `goimports`, etc...? | spiffe-spire | go |
@@ -78,7 +78,12 @@ public class ProductPhotosFragment extends BaseFragment implements ImagesAdapter
public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
Intent intent = getActivity().getIntent();
- final State state = (State) intent.getExtras().getSerializable("state");
+ State state;
+ if(intent!=null && intent.getExtras()!=null && intent.getExtras().getSerializable("state")!=null){
+ state = (State) intent.getExtras().getSerializable("state");
+ }else{
+ state = ProductFragment.mState;
+ }
product = state.getProduct();
mFragment = this;
// initialize the arraylist | 1 | package openfoodfacts.github.scrachx.openfood.fragments;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.provider.Settings;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.app.ActivityCompat;
import android.support.v4.app.Fragment;
import android.support.v4.content.ContextCompat;
import android.support.v7.widget.GridLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import butterknife.BindView;
import okhttp3.MediaType;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.ProductImage;
import openfoodfacts.github.scrachx.openfood.models.ProductImageField;
import openfoodfacts.github.scrachx.openfood.models.State;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService;
import openfoodfacts.github.scrachx.openfood.views.FullScreenImage;
import openfoodfacts.github.scrachx.openfood.views.adapters.ImagesAdapter;
/**
* @author prajwalm
*/
public class ProductPhotosFragment extends BaseFragment implements ImagesAdapter.OnImageClickInterface {
private OpenFoodAPIClient openFoodAPIClient;
private Product product;
private ProductPhotosFragment mFragment;
// A Array list to store image names
private ArrayList<String> imageNames;
private RecyclerView imagesRecycler;
private ImagesAdapter adapter;
private HashMap<String, String> imgMap;
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
openFoodAPIClient = new OpenFoodAPIClient(getActivity());
return createView(inflater, container, R.layout.fragment_product_photos);
}
@Override
public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
Intent intent = getActivity().getIntent();
final State state = (State) intent.getExtras().getSerializable("state");
product = state.getProduct();
mFragment = this;
// initialize the arraylist
imageNames = new ArrayList<>();
imagesRecycler = view.findViewById(R.id.images_recycler);
imgMap = new HashMap<>();
openFoodAPIClient.getImages(product.getCode(), new OpenFoodAPIClient.OnImagesCallback() {
@Override
public void onImageResponse(boolean value, String response) {
if (value && response != null) {
Log.i("response", response);
// a json object referring to base json object
JSONObject jsonObject = null;
try {
jsonObject = new JSONObject(response);
} catch (JSONException e) {
e.printStackTrace();
}
// a json object referring to images
JSONObject images = null;
try {
images = jsonObject.getJSONObject("product").getJSONObject("images");
} catch (JSONException e) {
e.printStackTrace();
}
if (images != null) {
// loop through all the image names and store them in a array list
for (int i = 0; i < images.names().length(); i++) {
try {
// do not include images with contain nutrients,ingredients or other in their names
// as they are duplicate and do not load as well
if (images.names().getString(i).contains("n") ||
images.names().getString(i).contains("f") ||
images.names().getString(i).contains("i") ||
images.names().getString(i).contains("o")) {
continue;
}
imageNames.add(images.names().getString(i));
} catch (JSONException e) {
e.printStackTrace();
}
}
}
//Check if user is logged in
SharedPreferences preferences = getActivity().getSharedPreferences("login", 0);
String login = preferences.getString("user", null);
if (login != null) {
adapter = new ImagesAdapter(getContext(), imageNames, product.getCode(), ProductPhotosFragment.this::onImageClick, product, true);
} else {
adapter = new ImagesAdapter(getContext(), imageNames, product.getCode(), ProductPhotosFragment.this::onImageClick, product, false);
}
imagesRecycler.setAdapter(adapter);
imagesRecycler.setLayoutManager(new GridLayoutManager(getContext(), 3));
}
}
});
}
public void openFullScreen(String mUrlImage) {
if (mUrlImage != null) {
Intent intent = new Intent(getContext(), FullScreenImage.class);
Bundle bundle = new Bundle();
bundle.putString("imageurl", mUrlImage);
intent.putExtras(bundle);
startActivity(intent);
}
}
@Override
public void onImageClick(int position) {
String baseUrlString = "https://static.openfoodfacts.org/images/products/";
String barcodePattern = product.getCode();
if (barcodePattern.length() > 8) {
barcodePattern = new StringBuilder(product.getCode())
.insert(3, "/")
.insert(7, "/")
.insert(11, "/")
.toString();
}
String finalUrlString = baseUrlString + barcodePattern + "/" + imageNames.get(position) + ".jpg";
imgMap.put("imgid", imageNames.get(position));
imgMap.put("id", ProductImageField.OTHER.toString() + '_' + product.getLang());
openFullScreen(finalUrlString);
}
}
| 1 | 66,339 | looks like the code is not properly formatted. for example here are some missing spaces in **if** command. please take care and reformat the code using default android formatting. | openfoodfacts-openfoodfacts-androidapp | java |
@@ -218,6 +218,15 @@ def __add_filtering_arguments(parser, defaults=None, diff_mode=False):
default=init_default('severity'),
help="Filter results by severities.")
+ f_group.add_argument('--bug-path-length',
+ type=str,
+ dest='bug_path_length',
+ default=argparse.SUPPRESS,
+ help="Filter results by bug path length. This has "
+ "the following format: <minimum_bug_path_length>"
+ ":<maximum_bug_path_length>. Valid values are: "
+ "\"4:10\", \"4:\", \":10\"")
+
f_group.add_argument('--tag',
nargs='*',
dest="tag", | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
The CodeChechecker command-line client can be used to view information about
analysis reports found on a running viewer 'server' from a command-line.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import getpass
import datetime
import sys
from codechecker_client import cmd_line_client
from codechecker_client import product_client
from codechecker_client import source_component_client, token_client
from codechecker_common import logger
from codechecker_common import output_formatters
from codechecker_common import util
DEFAULT_FILTER_VALUES = {
'review_status': ['unreviewed', 'confirmed'],
'detection_status': ['new', 'reopened', 'unresolved'],
'uniqueing': 'off'
}
class NewLineDefaultHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
"""
Split a multi line string into multiple lines and wraps those lines so
every line is at most 'width' character long.
"""
lines = []
for line in text.splitlines():
w_lines = argparse.HelpFormatter._split_lines(self, line, width)
for w_line in w_lines:
lines.append(w_line)
return lines
def valid_time(t):
"""
Constructs a datetime from a 'year:month:day:hour:minute:second'-formatted
string.
"""
try:
parts = map(int, t.split(':'))
parts = parts + [0] * (6 - len(parts))
year, month, day, hour, minute, second = parts
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError as ex:
raise argparse.ArgumentTypeError(ex)
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker cmd',
'formatter_class': argparse.ArgumentDefaultsHelpFormatter,
# Description is shown when the command's help is queried directly
'description': "The command-line client is used to connect to a "
"running 'CodeChecker server' (either remote or "
"local) and quickly inspect analysis results, such as "
"runs, individual defect reports, compare analyses, "
"etc. Please see the invidual subcommands for further "
"details.",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "View analysis results on a running server from the "
"command line.",
}
def __add_common_arguments(parser,
needs_product_url=True,
has_matrix_output=False,
allow_html_output=False):
"""
Add some common arguments, like server address and verbosity, to parser.
"""
common_group = parser.add_argument_group('common arguments')
if needs_product_url is None:
# Explicitly not add anything, the command does not connect to a
# server.
pass
elif needs_product_url:
# Command connects to a product on a server.
common_group.add_argument('--url',
type=str,
metavar='PRODUCT_URL',
dest="product_url",
default="localhost:8001/Default",
required=False,
help="The URL of the product which will be "
"accessed by the client, in the "
"format of"
" '[http[s]://]host:port/Endpoint'.")
else:
# Command connects to a server directly.
common_group.add_argument('--url',
type=str,
metavar='SERVER_URL',
dest="server_url",
default="localhost:8001",
required=False,
help="The URL of the server to access, "
"in the format of"
" '[http[s]://]host:port'.")
if has_matrix_output:
output_formats = ["plaintext"] + output_formatters.USER_FORMATS
if allow_html_output:
output_formats += ["html"]
common_group.add_argument('-o', '--output',
dest="output_format",
required=False,
# TODO: 'plaintext' only kept for legacy.
default="plaintext",
choices=output_formats,
help="The output format to use in showing "
"the data.")
if allow_html_output:
common_group.add_argument('-e', '--export-dir',
dest="export_dir",
default=argparse.SUPPRESS,
help="Store the output in the given"
"folder.")
common_group.add_argument('-c', '--clean',
dest="clean",
required=False,
action='store_true',
default=argparse.SUPPRESS,
help="Delete output results stored in"
"the output directory. (By "
"default, it would keep output "
"files and overwrites only those "
"that contain any reports).")
logger.add_verbose_arguments(common_group)
def __add_filtering_arguments(parser, defaults=None, diff_mode=False):
"""
Add some common filtering arguments to the given parser.
"""
def init_default(dest):
return defaults[dest] if defaults and dest in defaults \
else argparse.SUPPRESS
f_group = parser.add_argument_group('filter arguments')
warn_diff_mode = ""
if diff_mode:
warn_diff_mode = " This can be used only if basename or newname is " \
"a run name (on the remote server)."
f_group.add_argument('--uniqueing',
dest="uniqueing",
required=False,
default=init_default('uniqueing'),
choices=['on', 'off'],
help="The same bug may appear several times if it is "
"found on different execution paths, i.e. "
"through different function calls. By turning "
"on uniqueing a report appears only once even "
"if it is found on several paths.")
f_group.add_argument('--report-hash',
nargs='*',
dest="report_hash",
metavar='REPORT_HASH',
default=init_default('report_hash'),
help="Filter results by report hashes.")
f_group.add_argument('--review-status',
nargs='*',
dest="review_status",
metavar='REVIEW_STATUS',
default=init_default('review_status'),
help="Filter results by review statuses." +
warn_diff_mode)
f_group.add_argument('--detection-status',
nargs='*',
dest="detection_status",
metavar='DETECTION_STATUS',
default=init_default('detection_status'),
help="Filter results by detection statuses." +
warn_diff_mode)
f_group.add_argument('--severity',
nargs='*',
dest="severity",
metavar='SEVERITY',
default=init_default('severity'),
help="Filter results by severities.")
f_group.add_argument('--tag',
nargs='*',
dest="tag",
metavar='TAG',
default=init_default('tag'),
help="Filter results by version tag names." +
warn_diff_mode)
f_group.add_argument('--file',
nargs='*',
dest="file_path",
metavar='FILE_PATH',
default=init_default('file_path'),
help="Filter results by file path. "
"The file path can contain multiple * "
"quantifiers which matches any number of "
"characters (zero or more). So if you have "
"/a/x.cpp and /a/y.cpp then \"/a/*.cpp\" "
"selects both.")
f_group.add_argument('--checker-name',
nargs='*',
dest="checker_name",
metavar='CHECKER_NAME',
default=init_default('checker_name'),
help="Filter results by checker names. "
"The checker name can contain multiple * "
"quantifiers which matches any number of "
"characters (zero or more). So for example "
"\"*DeadStores\" will matches "
"\"deadcode.DeadStores\"")
f_group.add_argument('--checker-msg',
nargs='*',
dest="checker_msg",
metavar='CHECKER_MSG',
default=init_default('checker_msg'),
help="Filter results by checker messages."
"The checker message can contain multiple * "
"quantifiers which matches any number of "
"characters (zero or more).")
f_group.add_argument('--component',
nargs='*',
dest="component",
metavar='COMPONENT',
default=argparse.SUPPRESS,
help="Filter results by source components." +
warn_diff_mode)
f_group.add_argument('--detected-at',
type=valid_time,
dest="detected_at",
metavar='TIMESTAMP',
default=argparse.SUPPRESS,
help="Filter results by detection date. The format "
" of TIMESTAMP is "
"'year:month:day:hour:minute:second' (the "
"\"time\" part can be omitted, in which case "
"midnight (00:00:00) is used).")
f_group.add_argument('--fixed-at',
type=valid_time,
dest="fixed_at",
metavar='TIMESTAMP',
default=argparse.SUPPRESS,
help="Filter results by fix date. The format "
" of TIMESTAMP is "
"'year:month:day:hour:minute:second' (the "
"\"time\" part can be omitted, in which case "
"midnight (00:00:00) is used).")
f_group.add_argument('-s', '--suppressed',
default=argparse.SUPPRESS,
dest="suppressed",
action='store_true',
help="DEPRECATED. Use the '--filter' option to get "
"false positive (suppressed) results. Show only "
"suppressed results instead of only "
"unsuppressed ones.")
f_group.add_argument('--filter',
type=str,
dest='filter',
default=argparse.SUPPRESS,
help="DEPRECATED. Filter results. Use separated "
"filter options to filter the results. The "
"filter string has the following format: "
"[<SEVERITIES>]:[<CHECKER_NAMES>]:"
"[<FILE_PATHS>]:[<DETECTION_STATUSES>]:"
"[<REVIEW_STATUSES>] where severites, "
"checker_names, file_paths, detection_statuses, "
"review_statuses should be a comma separated "
"list, e.g.: \"high,medium:unix,core:*.cpp,*.h:"
"new,unresolved:false_positive,intentional\"")
def __register_results(parser):
"""
Add argparse subcommand parser for the "list analysis results" action.
"""
parser.add_argument(type=str,
dest="name",
metavar='RUN_NAMES',
help="Names of the analysis runs to show result "
"summaries of. This has the following format: "
"<run_name_1>:<run_name_2>:<run_name_3> "
"where run names can contain * quantifiers which "
"matches any number of characters (zero or "
"more). So if you have run_1_a_name, "
"run_2_b_name, run_2_c_name, run_3_d_name then "
"\"run_2*:run_3_d_name\" selects the last three "
"runs. Use 'CodeChecker cmd runs' to get the "
"available runs.")
parser.add_argument('--details',
dest="details",
action="store_true",
required=False,
default=argparse.SUPPRESS,
help="Get report details for reports such as bug path "
"events, bug report points etc.")
__add_filtering_arguments(parser, DEFAULT_FILTER_VALUES)
def __register_diff(parser):
"""
Add argparse subcommand parser for the "diff results" action.
"""
parser.add_argument('-b', '--basename',
type=str,
dest="basename",
metavar='BASE_RUN',
required=True,
default=argparse.SUPPRESS,
help="The 'base' (left) side of the difference: this "
"analysis run is used as the initial state in "
"the comparison. The parameter can be a run name "
"(on the remote server) or a local report "
"directory (result of the analyze command). In "
"case of run name the the basename can contain * "
"quantifiers which matches any number of "
"characters (zero or more). So if you have "
"run-a-1, run-a-2 and run-b-1 "
"then \"run-a*\" selects the first two.")
parser.add_argument('-n', '--newname',
type=str,
dest="newname",
metavar='NEW_RUN',
required=True,
default=argparse.SUPPRESS,
help="The 'new' (right) side of the difference: this "
"analysis run is compared to the -b/--basename "
"run. The parameter can be a run name "
"(on the remote server) or a local "
"report directory "
"(result of the analyze command). In case of run "
"name the newname can contain * quantifiers "
"which matches any number of characters "
"(zero or more). So if you have "
"run-a-1, run-a-2 and run-b-1 "
"then \"run-a*\" selects the first two.")
__add_filtering_arguments(parser, DEFAULT_FILTER_VALUES, True)
group = parser.add_argument_group("comparison modes")
group = group.add_mutually_exclusive_group(required=True)
group.add_argument('--new',
dest="new",
default=argparse.SUPPRESS,
action='store_true',
help="Show results that didn't exist in the 'base' "
"but appear in the 'new' run.")
group.add_argument('--resolved',
dest="resolved",
default=argparse.SUPPRESS,
action='store_true',
help="Show results that existed in the 'base' but "
"disappeared from the 'new' run.")
group.add_argument('--unresolved',
dest="unresolved",
default=argparse.SUPPRESS,
action='store_true',
help="Show results that appear in both the 'base' and "
"the 'new' run.")
def __handle(args):
"""Custom handler for 'diff' so custom error messages can be
printed without having to capture 'parser' in main."""
output_dir = ['-e', '--export-dir']
if args.output_format == 'html' and \
not any(util.arg_match(output_dir, sys.argv[1:])):
parser.error("argument --output html: not allowed without "
"argument --export-dir")
cmd_line_client.handle_diff_results(args)
parser.set_defaults(func=__handle)
def __register_sum(parser):
"""
Add argparse subcommand parser for the "list result count by checker"
action.
"""
name_group = parser.add_mutually_exclusive_group(required=True)
name_group.add_argument('-n', '--name',
type=str,
nargs='+',
dest="names",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
help="Names of the analysis runs to show result "
"count breakdown for. This has the following "
"format: <run_name_1>:<run_name_2>:"
"<run_name_3> where run names can contain "
"multiple * quantifiers which matches any "
"number of characters (zero or more). So if "
"you have run_1_a_name, run_2_b_name, "
"run_2_c_name, run_3_d_name then "
"\"run_2*:run_3_d_name\" selects the last "
"three runs. Use 'CodeChecker cmd runs' to "
"get the available runs.")
name_group.add_argument('-a', '--all',
dest="all_results",
action='store_true',
default=argparse.SUPPRESS,
help="Show breakdown for all analysis runs.")
parser.add_argument('--disable-unique',
dest="disable_unique",
action='store_true',
default=argparse.SUPPRESS,
help="DEPRECATED. Use the '--uniqueing' option to "
"get uniqueing results. List all bugs even if "
"these end up in the same bug location, but "
"reached through different paths. By uniqueing "
"the bugs a report will be appeared only once "
"even if it is found on several paths.")
default_filter_values = DEFAULT_FILTER_VALUES
default_filter_values['uniqueing'] = 'on'
__add_filtering_arguments(parser, default_filter_values)
def __register_delete(parser):
"""
Add argparse subcommand parser for the "delete runs" action.
"""
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-n', '--name',
type=str,
nargs='+',
dest="name",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
help="Full name(s) of the analysis run or runs to "
"delete.")
group.add_argument('--all-before-run',
type=str,
dest="all_before_run",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
help="Delete all runs that were stored to the server "
"BEFORE the specified one.")
group.add_argument('--all-after-run',
type=str,
dest="all_after_run",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
help="Delete all runs that were stored to the server "
"AFTER the specified one.")
group.add_argument('--all-after-time',
type=valid_time,
dest="all_after_time",
metavar='TIMESTAMP',
default=argparse.SUPPRESS,
help="Delete all analysis runs that were stored to the "
"server AFTER the given timestamp. The format of "
"TIMESTAMP is "
"'year:month:day:hour:minute:second' (the "
"\"time\" part can be omitted, in which case "
"midnight (00:00:00) is used).")
group.add_argument('--all-before-time',
type=valid_time,
dest="all_before_time",
metavar='TIMESTAMP',
default=argparse.SUPPRESS,
help="Delete all analysis runs that were stored to the "
"server BEFORE the given timestamp. The format of "
"TIMESTAMP is "
"'year:month:day:hour:minute:second' (the "
"\"time\" part can be omitted, in which case "
"midnight (00:00:00) is used).")
def __register_suppress(parser):
"""
Add argparse subcommand parser for the "suppress file management" action.
"""
parser.add_argument(type=str,
dest="name",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
help="Name of the analysis run to suppress or "
"unsuppress a report in.")
parser.add_argument('-f', '--force',
dest="force",
action='store_true',
default=argparse.SUPPRESS,
help="Enable suppression of already suppressed "
"reports.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-i', '--import',
type=str,
dest="input",
metavar='SUPPRESS_FILE',
default=argparse.SUPPRESS,
help="Import suppression from the suppress file into "
"the database.")
def __register_products(parser):
"""
Add argparse subcommand parser for the "product management" action.
"""
def __register_add(parser):
"""
Add argparse subcommand parser for the "add new product" action.
"""
parser.add_argument("endpoint",
type=str,
metavar='ENDPOINT',
default=argparse.SUPPRESS,
help="The URL endpoint where clients can access "
"the analysis results for this product.")
parser.add_argument('-n', '--name',
type=str,
dest="display_name",
default=argparse.SUPPRESS,
required=False,
help="A custom display name for the product, "
"which will be shown in the viewer. This "
"is purely for decoration and user "
"experience, program calls use the "
"<ENDPOINT>.")
parser.add_argument('--description',
type=str,
dest="description",
default=argparse.SUPPRESS,
required=False,
help="A custom textual description to be shown "
"alongside the product.")
dbmodes = parser.add_argument_group(
"database arguments",
"NOTE: These database arguments are relative to the server "
"machine, as it is the server which will make the database "
"connection.")
dbmodes = dbmodes.add_mutually_exclusive_group(required=False)
SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR = '<ENDPOINT>.sqlite'
dbmodes.add_argument('--sqlite',
type=str,
dest="sqlite",
metavar='SQLITE_FILE',
default=SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR,
required=False,
help="Path of the SQLite database file to use. "
"Not absolute paths will be relative to "
"the server's <CONFIG_DIRECTORY>.")
dbmodes.add_argument('--postgresql',
dest="postgresql",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Specifies that a PostgreSQL database is "
"to be used instead of SQLite. See the "
"\"PostgreSQL arguments\" section on how "
"to configure the database connection.")
PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR = '<ENDPOINT>'
pgsql = parser.add_argument_group(
"PostgreSQL arguments",
"Values of these arguments are ignored, unless '--postgresql' is "
"specified! The database specified here must exist, and be "
"connectible by the server.")
# TODO: --dbSOMETHING arguments are kept to not break interface from
# old command. Database using commands such as "CodeChecker store" no
# longer supports these --- it would be ideal to break and remove args
# with this style and only keep --db-SOMETHING.
pgsql.add_argument('--dbaddress', '--db-host',
type=str,
dest="dbaddress",
default="localhost",
required=False,
help="Database server address.")
pgsql.add_argument('--dbport', '--db-port',
type=int,
dest="dbport",
default=5432,
required=False,
help="Database server port.")
pgsql.add_argument('--dbusername', '--db-username',
type=str,
dest="dbusername",
default=PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR,
required=False,
help="Username to use for connection.")
pgsql.add_argument('--dbpassword', '--db-password',
type=str,
dest="dbpassword",
default="",
required=False,
help="Password to use for authenticating the "
"connection.")
pgsql.add_argument('--dbname', '--db-name',
type=str,
dest="dbname",
default=PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR,
required=False,
help="Name of the database to use.")
def __handle(args):
"""Custom handler for 'add' so custom error messages can be
printed without having to capture 'parser' in main."""
def arg_match(options):
"""Checks and selects the option string specified in 'options'
that are present in the invocation argv."""
matched_args = []
for option in options:
if any([arg if option.startswith(arg) else None
for arg in sys.argv[1:]]):
matched_args.append(option)
continue
return matched_args
# See if there is a "PostgreSQL argument" specified in the
# invocation without '--postgresql' being there. There is no way
# to distinguish a default argument and a deliberately specified
# argument without inspecting sys.argv.
options = ['--dbaddress', '--dbport', '--dbusername', '--dbname',
'--db-host', '--db-port', '--db-username', '--db-name']
psql_args_matching = arg_match(options)
if any(psql_args_matching) and \
'postgresql' not in args:
first_matching_arg = next(iter([match for match
in psql_args_matching]))
parser.error("argument {0}: not allowed without argument "
"--postgresql".format(first_matching_arg))
# parser.error() terminates with return code 2.
# Some arguments get a dynamic default value that depends on the
# value of another argument.
if args.sqlite == SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR:
args.sqlite = args.endpoint + '.sqlite'
if args.dbusername == PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR:
args.dbusername = args.endpoint
if args.dbname == PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR:
args.dbname = args.endpoint
if 'postgresql' not in args:
# The --db-SOMETHING arguments are irrelevant if --postgresql
# is not used.
delattr(args, 'dbaddress')
delattr(args, 'dbport')
delattr(args, 'dbusername')
delattr(args, 'dbpassword')
delattr(args, 'dbname')
else:
# If --postgresql is given, --sqlite is useless.
delattr(args, 'sqlite')
# If everything is fine, do call the handler for the subcommand.
product_client.handle_add_product(args)
parser.set_defaults(func=__handle)
def __register_del(parser):
"""
Add argparse subcommand parser for the "delete product" action.
"""
parser.add_argument("endpoint",
type=str,
metavar='ENDPOINT',
default=argparse.SUPPRESS,
help="The URL endpoint where clients can access "
"the analysis results for the product.")
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
list_p = subcommands.add_parser(
'list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the name and basic information about products "
"added to the server.",
help="List products available on the server.")
list_p.set_defaults(func=product_client.handle_list_products)
__add_common_arguments(list_p,
needs_product_url=False, has_matrix_output=True)
add = subcommands.add_parser(
'add',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create a new product to be managed by the server by "
"providing the product's details and database connection.",
help="Register a new product to the server.")
__register_add(add)
__add_common_arguments(add, needs_product_url=False)
del_p = subcommands.add_parser(
'del',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Removes the specified product from the list of products "
"managed by the server. NOTE: This only removes the "
"association and disconnects the server from the "
"database -- NO actual ANALYSIS DATA is REMOVED. "
"Configuration, such as access control, however, WILL BE "
"LOST!",
help="Delete a product from the server's products.")
__register_del(del_p)
del_p.set_defaults(func=product_client.handle_del_product)
__add_common_arguments(del_p, needs_product_url=False)
def __register_source_components(parser):
"""
Add argparse subcommand parser for the "source component management"
action.
"""
def __register_add(parser):
parser.add_argument("name",
type=str,
metavar='NAME',
default=argparse.SUPPRESS,
help="Unique name of the source component.")
parser.add_argument('--description',
type=str,
dest="description",
default=argparse.SUPPRESS,
required=False,
help="A custom textual description to be shown "
"alongside the source component.")
parser.add_argument('-i', '--import',
type=str,
dest="component_file",
metavar='COMPONENT_FILE',
default=argparse.SUPPRESS,
required=True,
help="Path to the source component file which "
"contains multiple file paths. Each file "
"path should start with a '+' or '-' sign. "
"Results will be listed only from paths with "
"a '+' sign. "
"Results will not be listed from paths with "
"a '-' sign. Let's assume there are three "
"directories: test_files, test_data and "
"test_config. In the given example only the "
"results from the test_files and test_data "
"directories will be listed.\n"
"E.g.: \n"
" +*/test*/*\n"
" -*/test_dat*/*\n"
"Please see the User guide for more "
"information.")
def __register_del(parser):
"""
Add argparse subcommand parser for the "del component" action.
"""
parser.add_argument("name",
type=str,
metavar='NAME',
default=argparse.SUPPRESS,
help="Name of the source component name which "
"will be removed.")
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
list_components = subcommands.add_parser(
'list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the name and basic information about source "
"component added to the server.",
help="List source components available on the server.")
list_components.set_defaults(
func=source_component_client.handle_list_components)
__add_common_arguments(list_components, has_matrix_output=True)
add = subcommands.add_parser(
'add',
formatter_class=NewLineDefaultHelpFormatter,
description="Creates a new source component or updates an existing "
"one.",
help="Creates/updates a source component.")
__register_add(add)
add.set_defaults(func=source_component_client.handle_add_component)
__add_common_arguments(add)
del_c = subcommands.add_parser(
'del',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Removes the specified source component.",
help="Delete a source component from the server.")
__register_del(del_c)
del_c.set_defaults(func=source_component_client.handle_del_component)
__add_common_arguments(del_c)
def __register_login(parser):
"""
Add argparse subcommand parser for the "handle authentication" action.
"""
parser.add_argument(type=str,
dest="username",
metavar='USERNAME',
nargs='?',
default=getpass.getuser(),
help="The username to authenticate with.")
parser.add_argument('-d', '--deactivate', '--logout',
dest="logout",
action='store_true',
default=argparse.SUPPRESS,
help="Send a logout request to end your privileged "
"session.")
def __register_runs(parser):
"""
Add argparse subcommand parser for the "list runs by run name" action.
"""
parser.add_argument('-n', '--name',
type=str,
nargs='*',
dest="names",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
required=False,
help="Names of the analysis runs. If this argument is "
"not supplied it will show all runs. This has "
"the following format: \"<run_name_1> "
"<run_name_2> <run_name_3>\" where run names can "
"contain multiple * quantifiers which matches "
"any number of characters (zero or more). So if "
"you have run_1_a_name, run_2_b_name, "
"run_2_c_name, run_3_d_name then \"run_2* "
"run_3_d_name\" shows the last three runs.")
def __register_run_histories(parser):
"""
Add argparse subcommand parser for the "list run histories by run name"
action.
"""
parser.add_argument('-n', '--name',
type=str,
nargs='*',
dest="names",
metavar='RUN_NAME',
default=argparse.SUPPRESS,
required=False,
help="Names of the analysis runs to show history for. "
"If this argument is not supplied it will show "
"the history for all runs. This has the "
"following format: \"<run_name_1> <run_name_2> "
"<run_name_3>\" where run names can contain "
"multiple * quantifiers which matches any number "
"of characters (zero or more). So if you have "
"run_1_a_name, run_2_b_name, run_2_c_name, "
"run_3_d_name then \"run_2* run_3_d_name\" shows "
"history for the last three runs. Use "
"'CodeChecker cmd runs' to get the available "
"runs.")
def __register_token(parser):
"""
Add argparse subcommand parser for the "handle token" action.
"""
def __register_new(parser):
parser.add_argument("--description",
type=str,
metavar='DESCRIPTION',
default=argparse.SUPPRESS,
required=False,
help="A custom textual description to be shown "
"alongside the token.")
def __register_del(parser):
"""
Add argparse subcommand parser for the "del token" action.
"""
parser.add_argument("token",
type=str,
metavar='TOKEN',
default=argparse.SUPPRESS,
help="Personal access token which will be "
"deleted.")
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
list_tokens = subcommands.add_parser(
'list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the available personal access tokens.",
help="List tokens available on the server.")
list_tokens.set_defaults(
func=token_client.handle_list_tokens)
__add_common_arguments(list_tokens, has_matrix_output=True,
needs_product_url=False)
new_t = subcommands.add_parser(
'new',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Creating a new personal access token.",
help="Creates a new personal access token.")
__register_new(new_t)
new_t.set_defaults(func=token_client.handle_add_token)
__add_common_arguments(new_t, needs_product_url=False)
del_t = subcommands.add_parser(
'del',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Removes the specified access token.",
help="Deletes a token from the server.")
__register_del(del_t)
del_t.set_defaults(func=token_client.handle_del_token)
__add_common_arguments(del_t, needs_product_url=False)
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
runs = subcommands.add_parser(
'runs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the analysis runs available on the server.",
help="List the available analysis runs.")
__register_runs(runs)
runs.set_defaults(func=cmd_line_client.handle_list_runs)
__add_common_arguments(runs, has_matrix_output=True)
run_histories = subcommands.add_parser(
'history',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Show run history for some analysis runs.",
help="Show run history of multiple runs.")
__register_run_histories(run_histories)
run_histories.set_defaults(func=cmd_line_client.handle_list_run_histories)
__add_common_arguments(run_histories, has_matrix_output=True)
results = subcommands.add_parser(
'results',
formatter_class=util.RawDescriptionDefaultHelpFormatter,
description="Show the individual analysis reports' summary.",
help="List analysis result (finding) summary for a given run.",
epilog='''Example scenario: List analysis results
------------------------------------------------
Get analysis results for a run:
CodeChecker cmd results my_run
Get analysis results for multiple runs:
CodeChecker cmd results "my_run1:my_run2"
Get analysis results by using regex:
CodeChecker cmd results "my_run*"
Get analysis results for a run and filter the analysis results:
CodeChecker cmd results my_run --severity critical high medium \\
--file "/home/username/my_project/*"
CodeChecker cmd results my_run --review-status confirmed unreviewed \\
--component my_component_name''')
__register_results(results)
results.set_defaults(func=cmd_line_client.handle_list_results)
__add_common_arguments(results, has_matrix_output=True)
diff = subcommands.add_parser(
'diff',
formatter_class=util.RawDescriptionDefaultHelpFormatter,
description="Compare two analysis runs to show the results that "
"differ between the two.",
help="Compare two analysis runs and show the difference.",
epilog='''Example scenario: Compare multiple analysis runs
------------------------------------------------
Compare two runs and show results that didn't exist in the 'run1' but appear in
the 'run2' run:
CodeChecker cmd diff -b run1 -n run2 --new
Compare a remote run with a local report directory and show results that didn't
exist in the remote run 'run1' but appear in the local report directory:
CodeChecker cmd diff -b run1 -n /my_report_dir --new
Compare two runs and show results that exist in both runs and filter results
by multiple severity values:
CodeChecker cmd diff -b run1 -n run2 --unresolved --severity high medium'''
)
__register_diff(diff)
__add_common_arguments(diff, has_matrix_output=True,
allow_html_output=True)
sum_p = subcommands.add_parser(
'sum',
formatter_class=util.RawDescriptionDefaultHelpFormatter,
description="Show checker statistics for some analysis runs.",
help="Show statistics of checkers.",
epilog='''Example scenario: Get checker statistics
------------------------------------------------
Get statistics for a run:
CodeChecker cmd sum -n my_run
Get statistics for all runs filtered by multiple checker names:
CodeChecker cmd sum --all --checker-name "core.*" "deadcode.*"
Get statistics for all runs and only for severity 'high':
CodeChecker cmd sum --all --severity "high"''')
__register_sum(sum_p)
sum_p.set_defaults(func=cmd_line_client.handle_list_result_types)
__add_common_arguments(sum_p, has_matrix_output=True)
token = subcommands.add_parser(
'token',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Access subcommands related to configuring personal "
"access tokens managed by a CodeChecker server. Please "
"see the individual subcommands for details.",
help="Access subcommands related to configuring personal access "
"tokens managed by a CodeChecker server.")
__register_token(token)
del_p = subcommands.add_parser(
'del',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Remove analysis runs from the server based on some "
"criteria. NOTE! When a run is deleted, ALL associated "
"information is permanently lost!",
help="Delete analysis runs.")
__register_delete(del_p)
del_p.set_defaults(func=cmd_line_client.handle_remove_run_results)
__add_common_arguments(del_p)
suppress = subcommands.add_parser(
'suppress',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Imports suppressions from a suppress file to a "
"CodeChecker server.",
help="Manage and import suppressions of a CodeChecker server.")
__register_suppress(suppress)
suppress.set_defaults(func=cmd_line_client.handle_suppress)
__add_common_arguments(suppress)
products = subcommands.add_parser(
'products',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="CodeChecker organises its databases into products. "
"Each product has an individually configured database "
"which stores the analysis results. These subcommands "
"are used to manage the products configured by the "
"server. Please see the individual subcommands for "
"details.",
epilog="Most of these commands require authentication and "
"appropriate access rights. Please see 'CodeChecker cmd "
"login' to authenticate.",
help="Access subcommands related to configuring the products managed "
"by a CodeChecker server.")
__register_products(products)
__add_common_arguments(products, needs_product_url=None)
components = subcommands.add_parser(
'components',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Source components are named collection of directories "
"specified as directory filter.",
help="Access subcommands related to configuring the source components "
"managed by a CodeChecker server.")
__register_source_components(components)
__add_common_arguments(components)
login = subcommands.add_parser(
'login',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Certain CodeChecker servers can require elevated "
"privileges to access analysis results. In such cases "
"it is mandatory to authenticate to the server. This "
"action is used to perform an authentication in the "
"command-line.",
help="Authenticate into CodeChecker servers that require privileges.")
__register_login(login)
login.set_defaults(func=cmd_line_client.handle_login)
__add_common_arguments(login, needs_product_url=False)
# 'cmd' does not have a main() method in itself, as individual subcommands are
# handled later on separately.
| 1 | 10,601 | Please add some valid examples to help message like `"4:10"`, `"4:"`, `":10"` | Ericsson-codechecker | c |
@@ -490,7 +490,10 @@ EOS
# end
def its(attribute, &block)
RSpec.deprecate("Use of rspec-core's `its` method", :replacement => 'the rspec-its gem')
- describe(attribute) do
+ description = attribute
+ description = description.to_s if Symbol === description
+
+ describe(description) do
if Array === attribute
let(:__its_subject) { subject[*attribute] }
else | 1 | module RSpec
module Core
module MemoizedHelpers
# @note `subject` was contributed by Joe Ferris to support the one-liner
# syntax embraced by shoulda matchers:
#
# describe Widget do
# it { is_expected.to validate_presence_of(:name) }
# # or
# it { should validate_presence_of(:name) }
# end
#
# While the examples below demonstrate how to use `subject`
# explicitly in examples, we recommend that you define a method with
# an intention revealing name instead.
#
# @example
#
# # explicit declaration of subject
# describe Person do
# subject { Person.new(:birthdate => 19.years.ago) }
# it "should be eligible to vote" do
# subject.should be_eligible_to_vote
# # ^ ^ explicit reference to subject not recommended
# end
# end
#
# # implicit subject => { Person.new }
# describe Person do
# it "should be eligible to vote" do
# subject.should be_eligible_to_vote
# # ^ ^ explicit reference to subject not recommended
# end
# end
#
# # one-liner syntax - expectation is set on the subject
# describe Person do
# it { is_expected.to be_eligible_to_vote }
# # or
# it { should be_eligible_to_vote }
# end
#
# @note Because `subject` is designed to create state that is reset between
# each example, and `before(:all)` is designed to setup state that is
# shared across _all_ examples in an example group, `subject` is _not_
# intended to be used in a `before(:all)` hook. RSpec 2.13.1 prints
# a warning when you reference a `subject` from `before(:all)` and we plan
# to have it raise an error in RSpec 3.
#
# @see #should
def subject
__memoized.fetch(:subject) do
__memoized[:subject] = begin
described = described_class || self.class.description
Class === described ? described.new : described
end
end
end
# When `should` is called with no explicit receiver, the call is
# delegated to the object returned by `subject`. Combined with an
# implicit subject this supports very concise expressions.
#
# @example
#
# describe Person do
# it { should be_eligible_to_vote }
# end
#
# @see #subject
# @see #is_expected
#
# @note This only works if you are using rspec-expectations.
# @note If you are using RSpec's newer expect-based syntax you may
# want to use `is_expected.to` instead of `should`.
def should(matcher=nil, message=nil)
RSpec::Expectations::PositiveExpectationHandler.handle_matcher(subject, matcher, message)
end
# Just like `should`, `should_not` delegates to the subject (implicit or
# explicit) of the example group.
#
# @example
#
# describe Person do
# it { should_not be_eligible_to_vote }
# end
#
# @see #subject
# @see #is_expected
#
# @note This only works if you are using rspec-expectations.
# @note If you are using RSpec's newer expect-based syntax you may
# want to use `is_expected.to_not` instead of `should_not`.
def should_not(matcher=nil, message=nil)
RSpec::Expectations::NegativeExpectationHandler.handle_matcher(subject, matcher, message)
end
# Wraps the `subject` in `expect` to make it the target of an expectation.
# Designed to read nicely for one-liners.
#
# @example
#
# describe [1, 2, 3] do
# it { is_expected.to be_an Array }
# it { is_expected.not_to include 4 }
# end
#
# @see #subject
# @see #should
# @see #should_not
#
# @note This only works if you are using rspec-expectations.
def is_expected
expect(subject)
end
private
# @private
def __memoized
@__memoized ||= {}
end
# Used internally to customize the behavior of the
# memoized hash when used in a `before(:all)` hook.
#
# @private
class AllHookMemoizedHash
def self.isolate_for_all_hook(example_group_instance)
hash_type = self
example_group_instance.instance_eval do
@__memoized = hash_type.new(example_group_instance)
begin
yield
ensure
@__memoized.preserve_accessed_lets
@__memoized = nil
end
end
end
def initialize(example_group_instance)
@example_group_instance = example_group_instance
@hash = {}
end
def fetch(key, &block)
description = if key == :subject
"subject"
else
"let declaration `#{key}`"
end
::RSpec.warn_deprecation <<-EOS
DEPRECATION: #{description} accessed in #{article} #{hook_expression} hook at:
#{CallerFilter.first_non_rspec_line}
`let` and `subject` declarations are not intended to be called
in #{article} #{hook_expression} hook, as they exist to define state that
is reset between each example, while #{hook_expression} exists to
#{hook_intention}.
This is deprecated behavior that will not be supported in RSpec 3.
EOS
@hash.fetch(key, &block)
end
def []=(key, value)
@hash[key] = value
end
def preserve_accessed_lets
hash = @hash
@example_group_instance.class.class_eval do
hash.each do |key, value|
undef_method(key) if method_defined?(key)
define_method(key) { value }
end
end
end
class Before < self
def hook_expression
"`before(:all)`"
end
def article
"a"
end
def hook_intention
"define state that is shared across examples in an example group"
end
end
class After < self
def hook_expression
"`after(:all)`"
end
def article
"an"
end
def hook_intention
"cleanup state that is shared across examples in an example group"
end
end
end
def self.included(mod)
mod.extend(ClassMethods)
end
module ClassMethods
# Generates a method whose return value is memoized after the first
# call. Useful for reducing duplication between examples that assign
# values to the same local variable.
#
# @note `let` _can_ enhance readability when used sparingly (1,2, or
# maybe 3 declarations) in any given example group, but that can
# quickly degrade with overuse. YMMV.
#
# @note `let` uses an `||=` conditional that has the potential to
# behave in surprising ways in examples that spawn separate threads,
# though we have yet to see this in practice. You've been warned.
#
# @note Because `let` is designed to create state that is reset between
# each example, and `before(:all)` is designed to setup state that is
# shared across _all_ examples in an example group, `let` is _not_
# intended to be used in a `before(:all)` hook. RSpec 2.13.1 prints
# a warning when you reference a `let` from `before(:all)` and we plan
# to have it raise an error in RSpec 3.
#
# @example
#
# describe Thing do
# let(:thing) { Thing.new }
#
# it "does something" do
# # first invocation, executes block, memoizes and returns result
# thing.do_something
#
# # second invocation, returns the memoized value
# thing.should be_something
# end
# end
def let(name, &block)
# We have to pass the block directly to `define_method` to
# allow it to use method constructs like `super` and `return`.
raise "#let or #subject called without a block" if block.nil?
MemoizedHelpers.module_for(self).send(:define_method, name, &block)
# Apply the memoization. The method has been defined in an ancestor
# module so we can use `super` here to get the value.
if block.arity == 1
define_method(name) { __memoized.fetch(name) { |k| __memoized[k] = super(RSpec.current_example, &nil) } }
else
define_method(name) { __memoized.fetch(name) { |k| __memoized[k] = super(&nil) } }
end
end
# Just like `let`, except the block is invoked by an implicit `before`
# hook. This serves a dual purpose of setting up state and providing a
# memoized reference to that state.
#
# @example
#
# class Thing
# def self.count
# @count ||= 0
# end
#
# def self.count=(val)
# @count += val
# end
#
# def self.reset_count
# @count = 0
# end
#
# def initialize
# self.class.count += 1
# end
# end
#
# describe Thing do
# after(:each) { Thing.reset_count }
#
# context "using let" do
# let(:thing) { Thing.new }
#
# it "is not invoked implicitly" do
# Thing.count.should eq(0)
# end
#
# it "can be invoked explicitly" do
# thing
# Thing.count.should eq(1)
# end
# end
#
# context "using let!" do
# let!(:thing) { Thing.new }
#
# it "is invoked implicitly" do
# Thing.count.should eq(1)
# end
#
# it "returns memoized version on first invocation" do
# thing
# Thing.count.should eq(1)
# end
# end
# end
def let!(name, &block)
let(name, &block)
before { __send__(name) }
end
# Declares a `subject` for an example group which can then be wrapped
# with `expect` using `is_expected` to make it the target of an expectation
# in a concise, one-line example.
#
# Given a `name`, defines a method with that name which returns the
# `subject`. This lets you declare the subject once and access it
# implicitly in one-liners and explicitly using an intention revealing
# name.
#
# @param [String,Symbol] name used to define an accessor with an
# intention revealing name
# @param block defines the value to be returned by `subject` in examples
#
# @example
#
# describe CheckingAccount, "with $50" do
# subject { CheckingAccount.new(Money.new(50, :USD)) }
# it { is_expected.to have_a_balance_of(Money.new(50, :USD)) }
# it { is_expected.not_to be_overdrawn }
# end
#
# describe CheckingAccount, "with a non-zero starting balance" do
# subject(:account) { CheckingAccount.new(Money.new(50, :USD)) }
# it { is_expected.not_to be_overdrawn }
# it "has a balance equal to the starting balance" do
# account.balance.should eq(Money.new(50, :USD))
# end
# end
#
# @see MemoizedHelpers#should
def subject(name=nil, &block)
if name
let(name, &block)
alias_method :subject, name
self::NamedSubjectPreventSuper.send(:define_method, name) do
raise NotImplementedError, "`super` in named subjects is not supported"
end
else
let(:subject, &block)
end
end
# Just like `subject`, except the block is invoked by an implicit `before`
# hook. This serves a dual purpose of setting up state and providing a
# memoized reference to that state.
#
# @example
#
# class Thing
# def self.count
# @count ||= 0
# end
#
# def self.count=(val)
# @count += val
# end
#
# def self.reset_count
# @count = 0
# end
#
# def initialize
# self.class.count += 1
# end
# end
#
# describe Thing do
# after(:each) { Thing.reset_count }
#
# context "using subject" do
# subject { Thing.new }
#
# it "is not invoked implicitly" do
# Thing.count.should eq(0)
# end
#
# it "can be invoked explicitly" do
# subject
# Thing.count.should eq(1)
# end
# end
#
# context "using subject!" do
# subject!(:thing) { Thing.new }
#
# it "is invoked implicitly" do
# Thing.count.should eq(1)
# end
#
# it "returns memoized version on first invocation" do
# subject
# Thing.count.should eq(1)
# end
# end
# end
def subject!(name=nil, &block)
subject(name, &block)
before { subject }
end
# Creates a nested example group named by the submitted `attribute`,
# and then generates an example using the submitted block.
#
# @example
#
# # This ...
# describe Array do
# its(:size) { should eq(0) }
# end
#
# # ... generates the same runtime structure as this:
# describe Array do
# describe "size" do
# it "should eq(0)" do
# subject.size.should eq(0)
# end
# end
# end
#
# The attribute can be a `Symbol` or a `String`. Given a `String`
# with dots, the result is as though you concatenated that `String`
# onto the subject in an expression.
#
# @example
#
# describe Person do
# subject do
# Person.new.tap do |person|
# person.phone_numbers << "555-1212"
# end
# end
#
# its("phone_numbers.first") { should eq("555-1212") }
# end
#
# When the subject is a `Hash`, you can refer to the Hash keys by
# specifying a `Symbol` or `String` in an array.
#
# @example
#
# describe "a configuration Hash" do
# subject do
# { :max_users => 3,
# 'admin' => :all_permissions }
# end
#
# its([:max_users]) { should eq(3) }
# its(['admin']) { should eq(:all_permissions) }
#
# # You can still access to its regular methods this way:
# its(:keys) { should include(:max_users) }
# its(:count) { should eq(2) }
# end
#
# Note that this method does not modify `subject` in any way, so if you
# refer to `subject` in `let` or `before` blocks, you're still
# referring to the outer subject.
#
# @example
#
# describe Person do
# subject { Person.new }
# before { subject.age = 25 }
# its(:age) { should eq(25) }
# end
def its(attribute, &block)
RSpec.deprecate("Use of rspec-core's `its` method", :replacement => 'the rspec-its gem')
describe(attribute) do
if Array === attribute
let(:__its_subject) { subject[*attribute] }
else
let(:__its_subject) do
attribute_chain = attribute.to_s.split('.')
attribute_chain.inject(subject) do |inner_subject, attr|
inner_subject.send(attr)
end
end
end
def should(matcher=nil, message=nil)
RSpec::Expectations::PositiveExpectationHandler.handle_matcher(__its_subject, matcher, message)
end
def should_not(matcher=nil, message=nil)
RSpec::Expectations::NegativeExpectationHandler.handle_matcher(__its_subject, matcher, message)
end
example(&block)
end
end
end
# @api private
#
# Gets the LetDefinitions module. The module is mixed into
# the example group and is used to hold all let definitions.
# This is done so that the block passed to `let` can be
# forwarded directly on to `define_method`, so that all method
# constructs (including `super` and `return`) can be used in
# a `let` block.
#
# The memoization is provided by a method definition on the
# example group that supers to the LetDefinitions definition
# in order to get the value to memoize.
def self.module_for(example_group)
get_constant_or_yield(example_group, :LetDefinitions) do
mod = Module.new do
include Module.new {
example_group.const_set(:NamedSubjectPreventSuper, self)
}
end
example_group.const_set(:LetDefinitions, mod)
mod
end
end
# @api private
def self.define_helpers_on(example_group)
example_group.send(:include, module_for(example_group))
end
if Module.method(:const_defined?).arity == 1 # for 1.8
# @api private
#
# Gets the named constant or yields.
# On 1.8, const_defined? / const_get do not take into
# account the inheritance hierarchy.
def self.get_constant_or_yield(example_group, name)
if example_group.const_defined?(name)
example_group.const_get(name)
else
yield
end
end
else
# @api private
#
# Gets the named constant or yields.
# On 1.9, const_defined? / const_get take into account the
# the inheritance by default, and accept an argument to
# disable this behavior. It's important that we don't
# consider inheritance here; each example group level that
# uses a `let` should get its own `LetDefinitions` module.
def self.get_constant_or_yield(example_group, name)
if example_group.const_defined?(name, (check_ancestors = false))
example_group.const_get(name, check_ancestors)
else
yield
end
end
end
end
end
end
| 1 | 12,189 | Is it only Symbols we're worried about converting? Is it not safe to just call `to_s` anyway? | rspec-rspec-core | rb |
@@ -15,9 +15,6 @@ import (
"github.com/stretchr/testify/require"
)
-// BlockTimeTest is the block time used by workers during testing
-const BlockTimeTest = time.Second
-
// MineDelayTest is the mining delay used by schedulers during testing
const MineDelayTest = time.Millisecond * 500
| 1 | package mining
import (
"context"
"sync"
"time"
"gx/ipfs/QmcmpX42gtDv1fz24kau4wjS9hfwWj5VexWBKgGnWzsyag/go-ipfs-blockstore"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/state"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// BlockTimeTest is the block time used by workers during testing
const BlockTimeTest = time.Second
// MineDelayTest is the mining delay used by schedulers during testing
const MineDelayTest = time.Millisecond * 500
// MockScheduler is a mock Scheduler.
type MockScheduler struct {
mock.Mock
}
// Start is the MockScheduler's Start function.
func (s *MockScheduler) Start(ctx context.Context) (<-chan Output, *sync.WaitGroup) {
args := s.Called(ctx)
return args.Get(0).(<-chan Output), args.Get(1).(*sync.WaitGroup)
}
// TestWorker is a worker with a customizable work function to facilitate
// easy testing.
type TestWorker struct {
WorkFunc func(context.Context, consensus.TipSet, int, chan<- Output)
}
// Mine is the TestWorker's Work function. It simply calls the WorkFunc
// field.
func (w *TestWorker) Mine(ctx context.Context, ts consensus.TipSet, nullBlkCount int, outCh chan<- Output) {
if w.WorkFunc == nil {
panic("must set MutableTestWorker's WorkFunc before calling Work")
}
w.WorkFunc(ctx, ts, nullBlkCount, outCh)
}
// NewTestWorkerWithDeps creates a worker that calls the provided input
// function when Mine() is called.
func NewTestWorkerWithDeps(f func(context.Context, consensus.TipSet, int, chan<- Output)) *TestWorker {
return &TestWorker{
WorkFunc: f,
}
}
// MakeEchoMine returns a test worker function that itself returns the first
// block of the input tipset as output.
func MakeEchoMine(require *require.Assertions) func(context.Context, consensus.TipSet, int, chan<- Output) {
echoMine := func(c context.Context, ts consensus.TipSet, nullBlkCount int, outCh chan<- Output) {
require.NotEqual(0, len(ts))
b := ts.ToSlice()[0]
select {
case outCh <- Output{NewBlock: b}:
case <-c.Done():
}
}
return echoMine
}
const (
// ChannelClosed is returned by the Receive*Ch helper functions to indicate
// the cahnnel is closed.
ChannelClosed = iota
// ChannelEmpty indicates an empty channel.
ChannelEmpty
// ChannelReceivedValue indicates the channel held a value, which has been
// received.
ChannelReceivedValue
)
// ReceiveInCh returns the channel status.
/*func ReceiveInCh(ch <-chan Input) int {
select {
case _, ok := <-ch:
if ok {
return ChannelReceivedValue
}
return ChannelClosed
default:
return ChannelEmpty
}
}*/
// ReceiveOutCh returns the channel status.
func ReceiveOutCh(ch <-chan Output) int {
select {
case _, ok := <-ch:
if ok {
return ChannelReceivedValue
}
return ChannelClosed
default:
return ChannelEmpty
}
}
// FifthTestView is an implementation of the powertable view used for testing mining
// wherein each miner has 1/5 power.
type FifthTestView struct{}
var _ consensus.PowerTableView = &FifthTestView{}
// Total always returns 5.
func (tv *FifthTestView) Total(ctx context.Context, st state.Tree, bstore blockstore.Blockstore) (uint64, error) {
return uint64(5), nil
}
// Miner always returns 1.
func (tv *FifthTestView) Miner(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) (uint64, error) {
return uint64(1), nil
}
// HasPower always returns true.
func (tv *FifthTestView) HasPower(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) bool {
return true
}
| 1 | 14,705 | Note for those who come across this later: It was moved to `testhelpers.mining.go` so that `testhelpers.NewDaemon` and the `mining/worker_test.go` can share it. | filecoin-project-venus | go |
@@ -12,7 +12,7 @@ class Practice
end
def incomplete_trails
- trails.select(&:incomplete?)
+ trails.select(&:incomplete?).partition(&:in_progress?).flatten
end
private | 1 | class Practice
def initialize(user)
@user = user
end
def has_completed_trails?
completed_trails.any?
end
def just_finished_trails
trails.select(&:just_finished?)
end
def incomplete_trails
trails.select(&:incomplete?)
end
private
attr_reader :user
def trails
Trail.
most_recent_published.
map { |trail| TrailWithProgress.new(trail, user: user) }
end
def completed_trails
trails.select(&:complete?)
end
end
| 1 | 14,347 | What about `sort_by(&:in_progress?)`? Maybe with a `.reverse` thrown in? | thoughtbot-upcase | rb |
@@ -205,7 +205,6 @@ const (
FlagTLSKeyPath = "tls_key_path"
FlagTLSCaPath = "tls_ca_path"
FlagTLSEnableHostVerification = "tls_enable_host_verification"
- FlagGRPC = "grpc"
)
var flagsForExecution = []cli.Flag{ | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cli
import "github.com/urfave/cli"
// Flags used to specify cli command line arguments
const (
FlagUsername = "username"
FlagPassword = "password"
FlagKeyspace = "keyspace"
FlagAddress = "address"
FlagAddressWithAlias = FlagAddress + ", ad"
FlagHistoryAddress = "history_address"
FlagDBAddress = "db_address"
FlagDBPort = "db_port"
FlagHistoryAddressWithAlias = FlagHistoryAddress + ", had"
FlagDomainID = "domain_id"
FlagDomain = "domain"
FlagDomainWithAlias = FlagDomain + ", do"
FlagShardID = "shard_id"
FlagShardIDWithAlias = FlagShardID + ", sid"
FlagWorkflowID = "workflow_id"
FlagWorkflowIDWithAlias = FlagWorkflowID + ", wid, w"
FlagRunID = "run_id"
FlagTreeID = "tree_id"
FlagBranchID = "branch_id"
FlagNumberOfShards = "number_of_shards"
FlagRunIDWithAlias = FlagRunID + ", rid, r"
FlagTargetCluster = "target_cluster"
FlagMinEventID = "min_event_id"
FlagMaxEventID = "max_event_id"
FlagTaskList = "tasklist"
FlagTaskListWithAlias = FlagTaskList + ", tl"
FlagTaskListType = "tasklisttype"
FlagTaskListTypeWithAlias = FlagTaskListType + ", tlt"
FlagWorkflowIDReusePolicy = "workflowidreusepolicy"
FlagWorkflowIDReusePolicyAlias = FlagWorkflowIDReusePolicy + ", wrp"
FlagCronSchedule = "cron"
FlagWorkflowType = "workflow_type"
FlagWorkflowTypeWithAlias = FlagWorkflowType + ", wt"
FlagWorkflowStatus = "status"
FlagWorkflowStatusWithAlias = FlagWorkflowStatus + ", s"
FlagExecutionTimeout = "execution_timeout"
FlagExecutionTimeoutWithAlias = FlagExecutionTimeout + ", et"
FlagDecisionTimeout = "decision_timeout"
FlagDecisionTimeoutWithAlias = FlagDecisionTimeout + ", dt"
FlagContextTimeout = "context_timeout"
FlagContextTimeoutWithAlias = FlagContextTimeout + ", ct"
FlagInput = "input"
FlagInputWithAlias = FlagInput + ", i"
FlagInputFile = "input_file"
FlagInputFileWithAlias = FlagInputFile + ", if"
FlagExcludeFile = "exclude_file"
FlagInputSeparator = "input_separator"
FlagParallism = "input_parallism"
FlagSkipCurrent = "skip_current_open"
FlagInputTopic = "input_topic"
FlagInputTopicWithAlias = FlagInputTopic + ", it"
FlagHostFile = "host_file"
FlagCluster = "cluster"
FlagInputCluster = "input_cluster"
FlagStartOffset = "start_offset"
FlagTopic = "topic"
FlagGroup = "group"
FlagResult = "result"
FlagIdentity = "identity"
FlagDetail = "detail"
FlagReason = "reason"
FlagReasonWithAlias = FlagReason + ", re"
FlagOpen = "open"
FlagOpenWithAlias = FlagOpen + ", op"
FlagMore = "more"
FlagMoreWithAlias = FlagMore + ", m"
FlagAll = "all"
FlagAllWithAlias = FlagAll + ", a"
FlagPageSize = "pagesize"
FlagPageSizeWithAlias = FlagPageSize + ", ps"
FlagEarliestTime = "earliest_time"
FlagEarliestTimeWithAlias = FlagEarliestTime + ", et"
FlagLatestTime = "latest_time"
FlagLatestTimeWithAlias = FlagLatestTime + ", lt"
FlagPrintEventVersion = "print_event_version"
FlagPrintEventVersionWithAlias = FlagPrintEventVersion + ", pev"
FlagPrintFullyDetail = "print_full"
FlagPrintFullyDetailWithAlias = FlagPrintFullyDetail + ", pf"
FlagPrintRawTime = "print_raw_time"
FlagPrintRawTimeWithAlias = FlagPrintRawTime + ", prt"
FlagPrintRaw = "print_raw"
FlagPrintRawWithAlias = FlagPrintRaw + ", praw"
FlagPrintDateTime = "print_datetime"
FlagPrintDateTimeWithAlias = FlagPrintDateTime + ", pdt"
FlagPrintMemo = "print_memo"
FlagPrintMemoWithAlias = FlagPrintMemo + ", pme"
FlagPrintSearchAttr = "print_search_attr"
FlagPrintSearchAttrWithAlias = FlagPrintSearchAttr + ", psa"
FlagPrintJSON = "print_json"
FlagPrintJSONWithAlias = FlagPrintJSON + ", pjson"
FlagDescription = "description"
FlagDescriptionWithAlias = FlagDescription + ", desc"
FlagOwnerEmail = "owner_email"
FlagOwnerEmailWithAlias = FlagOwnerEmail + ", oe"
FlagRetentionDays = "retention"
FlagRetentionDaysWithAlias = FlagRetentionDays + ", rd"
FlagEmitMetric = "emit_metric"
FlagEmitMetricWithAlias = FlagEmitMetric + ", em"
FlagHistoryArchivalStatus = "history_archival_status"
FlagHistoryArchivalStatusWithAlias = FlagHistoryArchivalStatus + ", has"
FlagHistoryArchivalURI = "history_uri"
FlagHistoryArchivalURIWithAlias = FlagHistoryArchivalURI + ", huri"
FlagVisibilityArchivalStatus = "visibility_archival_status"
FlagVisibilityArchivalStatusWithAlias = FlagVisibilityArchivalStatus + ", vas"
FlagVisibilityArchivalURI = "visibility_uri"
FlagVisibilityArchivalURIWithAlias = FlagVisibilityArchivalURI + ", vuri"
FlagName = "name"
FlagNameWithAlias = FlagName + ", n"
FlagOutputFilename = "output_filename"
FlagOutputFilenameWithAlias = FlagOutputFilename + ", of"
FlagOutputFormat = "output"
FlagQueryType = "query_type"
FlagQueryTypeWithAlias = FlagQueryType + ", qt"
FlagQueryRejectCondition = "query_reject_condition"
FlagQueryRejectConditionWithAlias = FlagQueryRejectCondition + ", qrc"
FlagQueryConsistencyLevel = "query_consistency_level"
FlagQueryConsistencyLevelWithAlias = FlagQueryConsistencyLevel + ", qcl"
FlagShowDetail = "show_detail"
FlagShowDetailWithAlias = FlagShowDetail + ", sd"
FlagActiveClusterName = "active_cluster"
FlagActiveClusterNameWithAlias = FlagActiveClusterName + ", ac"
FlagClusters = "clusters"
FlagClustersWithAlias = FlagClusters + ", cl"
FlagIsGlobalDomain = "global_domain"
FlagIsGlobalDomainWithAlias = FlagIsGlobalDomain + ", gd"
FlagDomainData = "domain_data"
FlagDomainDataWithAlias = FlagDomainData + ", dmd"
FlagEventID = "event_id"
FlagEventIDWithAlias = FlagEventID + ", eid"
FlagActivityID = "activity_id"
FlagActivityIDWithAlias = FlagActivityID + ", aid"
FlagMaxFieldLength = "max_field_length"
FlagMaxFieldLengthWithAlias = FlagMaxFieldLength + ", maxl"
FlagSecurityToken = "security_token"
FlagSecurityTokenWithAlias = FlagSecurityToken + ", st"
FlagSkipErrorMode = "skip_errors"
FlagSkipErrorModeWithAlias = FlagSkipErrorMode + ", serr"
FlagHeadersMode = "headers"
FlagHeadersModeWithAlias = FlagHeadersMode + ", he"
FlagMessageType = "message_type"
FlagMessageTypeWithAlias = FlagMessageType + ", mt"
FlagURL = "url"
FlagMuttleyDestination = "muttely_destination"
FlagMuttleyDestinationWithAlias = FlagMuttleyDestination + ", muttley"
FlagIndex = "index"
FlagBatchSize = "batch_size"
FlagBatchSizeWithAlias = FlagBatchSize + ", bs"
FlagMemoKey = "memo_key"
FlagMemo = "memo"
FlagMemoFile = "memo_file"
FlagSearchAttributesKey = "search_attr_key"
FlagSearchAttributesVal = "search_attr_value"
FlagSearchAttributesType = "search_attr_type"
FlagAddBadBinary = "add_bad_binary"
FlagRemoveBadBinary = "remove_bad_binary"
FlagResetType = "reset_type"
FlagResetPointsOnly = "reset_points_only"
FlagResetBadBinaryChecksum = "reset_bad_binary_checksum"
FlagListQuery = "query"
FlagListQueryWithAlias = FlagListQuery + ", q"
FlagBatchType = "batch_type"
FlagBatchTypeWithAlias = FlagBatchType + ", bt"
FlagSignalName = "signal_name"
FlagSignalNameWithAlias = FlagSignalName + ", sig"
FlagRemoveTaskID = "task_id"
FlagRemoveTypeID = "type_id"
FlagRPS = "rps"
FlagJobID = "job_id"
FlagJobIDWithAlias = FlagJobID + ", jid"
FlagYes = "yes"
FlagServiceConfigDir = "service_config_dir"
FlagServiceConfigDirWithAlias = FlagServiceConfigDir + ", scd"
FlagServiceEnv = "service_env"
FlagServiceEnvWithAlias = FlagServiceEnv + ", se"
FlagServiceZone = "service_zone"
FlagServiceZoneWithAlias = FlagServiceZone + ", sz"
FlagEnableTLS = "tls"
FlagTLSCertPath = "tls_cert_path"
FlagTLSKeyPath = "tls_key_path"
FlagTLSCaPath = "tls_ca_path"
FlagTLSEnableHostVerification = "tls_enable_host_verification"
FlagGRPC = "grpc"
)
var flagsForExecution = []cli.Flag{
cli.StringFlag{
Name: FlagWorkflowIDWithAlias,
Usage: "WorkflowID",
},
cli.StringFlag{
Name: FlagRunIDWithAlias,
Usage: "RunID",
},
}
func getFlagsForShow() []cli.Flag {
return append(flagsForExecution, getFlagsForShowID()...)
}
func getFlagsForShowID() []cli.Flag {
return []cli.Flag{
cli.BoolFlag{
Name: FlagPrintDateTimeWithAlias,
Usage: "Print timestamp",
},
cli.BoolFlag{
Name: FlagPrintRawTimeWithAlias,
Usage: "Print raw timestamp",
},
cli.StringFlag{
Name: FlagOutputFilenameWithAlias,
Usage: "Serialize history event to a file",
},
cli.BoolFlag{
Name: FlagPrintFullyDetailWithAlias,
Usage: "Print fully event detail",
},
cli.BoolFlag{
Name: FlagPrintEventVersionWithAlias,
Usage: "Print event version",
},
cli.IntFlag{
Name: FlagEventIDWithAlias,
Usage: "Print specific event details",
},
cli.IntFlag{
Name: FlagMaxFieldLengthWithAlias,
Usage: "Maximum length for each attribute field",
Value: defaultMaxFieldLength,
},
cli.BoolFlag{
Name: FlagResetPointsOnly,
Usage: "Only show events that are eligible for reset",
},
}
}
func getFlagsForStart() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: FlagTaskListWithAlias,
Usage: "TaskList",
},
cli.StringFlag{
Name: FlagWorkflowIDWithAlias,
Usage: "WorkflowID",
},
cli.StringFlag{
Name: FlagWorkflowTypeWithAlias,
Usage: "WorkflowTypeName",
},
cli.IntFlag{
Name: FlagExecutionTimeoutWithAlias,
Usage: "Execution start to close timeout in seconds",
},
cli.IntFlag{
Name: FlagDecisionTimeoutWithAlias,
Value: defaultDecisionTimeoutInSeconds,
Usage: "Decision task start to close timeout in seconds",
},
cli.StringFlag{
Name: FlagCronSchedule,
Usage: "Optional cron schedule for the workflow. Cron spec is as following: \n" +
"\t┌───────────── minute (0 - 59) \n" +
"\t│ ┌───────────── hour (0 - 23) \n" +
"\t│ │ ┌───────────── day of the month (1 - 31) \n" +
"\t│ │ │ ┌───────────── month (1 - 12) \n" +
"\t│ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday) \n" +
"\t│ │ │ │ │ \n" +
"\t* * * * *",
},
cli.IntFlag{
Name: FlagWorkflowIDReusePolicyAlias,
Usage: "Optional input to configure if the same workflow ID is allow to use for new workflow execution. " +
"Available options: 0: AllowDuplicateFailedOnly, 1: AllowDuplicate, 2: RejectDuplicate",
},
cli.StringFlag{
Name: FlagInputWithAlias,
Usage: "Optional input for the workflow, in JSON format. If there are multiple parameters, concatenate them and separate by space.",
},
cli.StringFlag{
Name: FlagInputFileWithAlias,
Usage: "Optional input for the workflow from JSON file. If there are multiple JSON, concatenate them and separate by space or newline. " +
"Input from file will be overwrite by input from command line",
},
cli.StringFlag{
Name: FlagMemoKey,
Usage: "Optional key of memo. If there are multiple keys, concatenate them and separate by space",
},
cli.StringFlag{
Name: FlagMemo,
Usage: "Optional info that can be showed when list workflow, in JSON format. If there are multiple JSON, concatenate them and separate by space. " +
"The order must be same as memo_key",
},
cli.StringFlag{
Name: FlagMemoFile,
Usage: "Optional info that can be listed in list workflow, from JSON format file. If there are multiple JSON, concatenate them and separate by space or newline. " +
"The order must be same as memo_key",
},
cli.StringFlag{
Name: FlagSearchAttributesKey,
Usage: "Optional search attributes keys that can be be used in list query. If there are multiple keys, concatenate them and separate by |. " +
"Use 'cluster get-search-attr' cmd to list legal keys.",
},
cli.StringFlag{
Name: FlagSearchAttributesVal,
Usage: "Optional search attributes value that can be be used in list query. If there are multiple keys, concatenate them and separate by |. " +
"If value is array, use json array like [\"a\",\"b\"], [1,2], [\"true\",\"false\"], [\"2019-06-07T17:16:34-08:00\",\"2019-06-07T18:16:34-08:00\"]. " +
"Use 'cluster get-search-attr' cmd to list legal keys and value types",
},
}
}
func getFlagsForRun() []cli.Flag {
flagsForRun := []cli.Flag{
cli.BoolFlag{
Name: FlagShowDetailWithAlias,
Usage: "Show event details",
},
cli.IntFlag{
Name: FlagMaxFieldLengthWithAlias,
Usage: "Maximum length for each attribute field",
},
}
flagsForRun = append(getFlagsForStart(), flagsForRun...)
return flagsForRun
}
func getCommonFlagsForVisibility() []cli.Flag {
return []cli.Flag{
cli.BoolFlag{
Name: FlagPrintRawTimeWithAlias,
Usage: "Print raw timestamp",
},
cli.BoolFlag{
Name: FlagPrintDateTimeWithAlias,
Usage: "Print full date time in '2006-01-02T15:04:05Z07:00' format",
},
cli.BoolFlag{
Name: FlagPrintMemoWithAlias,
Usage: "Print memo",
},
cli.BoolFlag{
Name: FlagPrintSearchAttrWithAlias,
Usage: "Print search attributes",
},
cli.BoolFlag{
Name: FlagPrintFullyDetailWithAlias,
Usage: "Print full message without table format",
},
cli.BoolFlag{
Name: FlagPrintJSONWithAlias,
Usage: "Print in raw json format",
},
}
}
func getFlagsForList() []cli.Flag {
flagsForList := []cli.Flag{
cli.BoolFlag{
Name: FlagMoreWithAlias,
Usage: "List more pages, default is to list one page of default page size 10",
},
cli.IntFlag{
Name: FlagPageSizeWithAlias,
Value: 10,
Usage: "Result page size",
},
}
flagsForList = append(getFlagsForListAll(), flagsForList...)
return flagsForList
}
func getFlagsForListAll() []cli.Flag {
flagsForListAll := []cli.Flag{
cli.BoolFlag{
Name: FlagOpenWithAlias,
Usage: "List for open workflow executions, default is to list for closed ones",
},
cli.StringFlag{
Name: FlagEarliestTimeWithAlias,
Usage: "EarliestTime of start time, supported formats are '2006-01-02T15:04:05+07:00' and raw UnixNano",
},
cli.StringFlag{
Name: FlagLatestTimeWithAlias,
Usage: "LatestTime of start time, supported formats are '2006-01-02T15:04:05+07:00' and raw UnixNano",
},
cli.StringFlag{
Name: FlagWorkflowIDWithAlias,
Usage: "WorkflowID",
},
cli.StringFlag{
Name: FlagWorkflowTypeWithAlias,
Usage: "WorkflowTypeName",
},
cli.StringFlag{
Name: FlagWorkflowStatusWithAlias,
Usage: "Closed workflow status [completed, failed, canceled, terminated, continuedasnew, timedout]",
},
cli.StringFlag{
Name: FlagListQueryWithAlias,
Usage: "Optional SQL like query for use of search attributes. NOTE: using query will ignore all other filter flags including: " +
"[open, earliest_time, latest_time, workflow_id, workflow_type]",
},
}
flagsForListAll = append(getCommonFlagsForVisibility(), flagsForListAll...)
return flagsForListAll
}
func getFlagsForScan() []cli.Flag {
flagsForScan := []cli.Flag{
cli.IntFlag{
Name: FlagPageSizeWithAlias,
Value: 2000,
Usage: "Page size for each Scan API call",
},
cli.StringFlag{
Name: FlagListQueryWithAlias,
Usage: "Optional SQL like query",
},
}
flagsForScan = append(getCommonFlagsForVisibility(), flagsForScan...)
return flagsForScan
}
func getFlagsForListArchived() []cli.Flag {
flagsForListArchived := []cli.Flag{
cli.StringFlag{
Name: FlagListQueryWithAlias,
Usage: "SQL like query. Please check the documentation of the visibility archiver used by your domain for detailed instructions",
},
cli.IntFlag{
Name: FlagPageSizeWithAlias,
Value: 100,
Usage: "Count of visibility records included in a single page, default to 100",
},
cli.BoolFlag{
Name: FlagAllWithAlias,
Usage: "List all pages",
},
}
flagsForListArchived = append(getCommonFlagsForVisibility(), flagsForListArchived...)
return flagsForListArchived
}
func getFlagsForCount() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: FlagListQueryWithAlias,
Usage: "Optional SQL like query. e.g count all open workflows 'CloseTime = missing'; 'WorkflowType=\"wtype\" and CloseTime > 0'",
},
}
}
func getFlagsForQuery() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: FlagWorkflowIDWithAlias,
Usage: "WorkflowID",
},
cli.StringFlag{
Name: FlagRunIDWithAlias,
Usage: "RunID",
},
cli.StringFlag{
Name: FlagQueryTypeWithAlias,
Usage: "The query type you want to run",
},
cli.StringFlag{
Name: FlagInputWithAlias,
Usage: "Optional input for the query, in JSON format. If there are multiple parameters, concatenate them and separate by space.",
},
cli.StringFlag{
Name: FlagInputFileWithAlias,
Usage: "Optional input for the query from JSON file. If there are multiple JSON, concatenate them and separate by space or newline. " +
"Input from file will be overwrite by input from command line",
},
cli.StringFlag{
Name: FlagQueryRejectConditionWithAlias,
Usage: "Optional flag to reject queries based on workflow state. Valid values are \"not_open\" and \"not_completed_cleanly\"",
},
cli.StringFlag{
Name: FlagQueryConsistencyLevelWithAlias,
Usage: "Optional flag to set query consistency level. Valid values are \"eventual\" and \"strong\"",
},
}
}
// all flags of query except QueryType
func getFlagsForStack() []cli.Flag {
flags := getFlagsForQuery()
for i := 0; i < len(flags); i++ {
if flags[i].GetName() == FlagQueryTypeWithAlias {
return append(flags[:i], flags[i+1:]...)
}
}
return flags
}
func getFlagsForDescribe() []cli.Flag {
return append(flagsForExecution, getFlagsForDescribeID()...)
}
func getFlagsForDescribeID() []cli.Flag {
return []cli.Flag{
cli.BoolFlag{
Name: FlagPrintRawWithAlias,
Usage: "Print properties as they are stored",
},
cli.BoolFlag{
Name: FlagResetPointsOnly,
Usage: "Only show auto-reset points",
},
}
}
func getFlagsForObserve() []cli.Flag {
return append(flagsForExecution, getFlagsForObserveID()...)
}
func getFlagsForObserveID() []cli.Flag {
return []cli.Flag{
cli.BoolFlag{
Name: FlagShowDetailWithAlias,
Usage: "Optional show event details",
},
cli.IntFlag{
Name: FlagMaxFieldLengthWithAlias,
Usage: "Optional maximum length for each attribute field when show details",
},
}
}
| 1 | 9,215 | Flag is removed because it is only gRPC now. | temporalio-temporal | go |
@@ -38,6 +38,17 @@ class Role(base.Base):
.. option:: molecule init role --role-name foo
Initialize a new role.
+
+ .. program:: molecule init role --role-name foo --template path
+
+ .. option:: molecule init role --role-name foo --template path
+
+ Initialize a new role using a local *cookiecutter* template. This
+ allows the customization of a role while still using the default
+ generated ``molecule`` folder. This is similar to an
+ ``ansible-galaxy init`` skeleton. Please refer to the ``init scenario``
+ command in order to customise the default generated ``molecule``
+ folder.
"""
def __init__(self, command_args): | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import click
from molecule import config
from molecule import logger
from molecule import util
from molecule.command import base as command_base
from molecule.command.init import base
LOG = logger.get_logger(__name__)
class Role(base.Base):
"""
.. program:: molecule init role --role-name foo
.. option:: molecule init role --role-name foo
Initialize a new role.
"""
def __init__(self, command_args):
self._command_args = command_args
def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
self._process_templates('role', self._command_args, role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg)
@click.command()
@click.pass_context
@click.option(
'--dependency-name',
type=click.Choice(['galaxy']),
default='galaxy',
help='Name of dependency to initialize. (galaxy)')
@click.option(
'--driver-name',
'-d',
type=click.Choice(config.molecule_drivers()),
default='docker',
help='Name of driver to initialize. (docker)')
@click.option(
'--lint-name',
type=click.Choice(['yamllint']),
default='yamllint',
help='Name of lint to initialize. (yamllint)')
@click.option(
'--provisioner-name',
type=click.Choice(['ansible']),
default='ansible',
help='Name of provisioner to initialize. (ansible)')
@click.option(
'--role-name', '-r', required=True, help='Name of the role to create.')
@click.option(
'--verifier-name',
type=click.Choice(config.molecule_verifiers()),
default='testinfra',
help='Name of verifier to initialize. (testinfra)')
def role(ctx, dependency_name, driver_name, lint_name, provisioner_name,
role_name, verifier_name): # pragma: no cover
""" Initialize a new role for use with Molecule. """
command_args = {
'dependency_name': dependency_name,
'driver_name': driver_name,
'lint_name': lint_name,
'provisioner_name': provisioner_name,
'role_name': role_name,
'scenario_name': command_base.MOLECULE_DEFAULT_SCENARIO_NAME,
'subcommand': __name__,
'verifier_name': verifier_name,
}
if verifier_name == 'inspec':
command_args['verifier_lint_name'] = 'rubocop'
if verifier_name == 'goss':
command_args['verifier_lint_name'] = 'yamllint'
r = Role(command_args)
r.execute()
| 1 | 7,910 | Might be better to rephrase to: Please refer to the ``init scenario`` command in order to generate a custom ``molecule`` scenario. Since you aren't customizing the default scenario since it already exists, right? | ansible-community-molecule | py |
@@ -640,9 +640,17 @@ class StdlibChecker(DeprecatedMixin, BaseChecker):
encoding_arg = utils.get_argument_from_call(
node, position=0, keyword="encoding"
)
+ elif open_module == "pathlib" and node.func.attrname == "write_text":
+ encoding_arg = utils.get_argument_from_call(
+ node, position=1, keyword="encoding"
+ )
+ elif open_module == "pathlib":
+ encoding_arg = utils.get_argument_from_call(
+ node, position=2, keyword="encoding"
+ )
else:
encoding_arg = utils.get_argument_from_call(
- node, position=None, keyword="encoding"
+ node, position=3, keyword="encoding"
)
except utils.NoSuchArgumentError:
self.add_message("unspecified-encoding", node=node) | 1 | # Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Cosmin Poieana <[email protected]>
# Copyright (c) 2014 Vlad Temian <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Cezar <[email protected]>
# Copyright (c) 2015 Chris Rebert <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Jared Garst <[email protected]>
# Copyright (c) 2017 Renat Galimov <[email protected]>
# Copyright (c) 2017 Martin <[email protected]>
# Copyright (c) 2017 Christopher Zurcher <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Banjamin Freeman <[email protected]>
# Copyright (c) 2018 Ioana Tagirta <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Julien Palard <[email protected]>
# Copyright (c) 2019 laike9m <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Robert Schweizer <[email protected]>
# Copyright (c) 2019 fadedDexofan <[email protected]>
# Copyright (c) 2020 Sorin Sbarnea <[email protected]>
# Copyright (c) 2020 Federico Bond <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 谭九鼎 <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Yilei "Dolee" Yang <[email protected]>
# Copyright (c) 2021 Matus Valo <[email protected]>
# Copyright (c) 2021 victor <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Checkers for various standard library functions."""
import sys
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker, DeprecatedMixin, utils
from pylint.interfaces import IAstroidChecker
if TYPE_CHECKING:
from pylint.lint import PyLinter
OPEN_FILES_MODE = ("open", "file")
OPEN_FILES_ENCODING = ("open", "read_text", "write_text")
UNITTEST_CASE = "unittest.case"
THREADING_THREAD = "threading.Thread"
COPY_COPY = "copy.copy"
OS_ENVIRON = "os._Environ"
ENV_GETTERS = ("os.getenv",)
SUBPROCESS_POPEN = "subprocess.Popen"
SUBPROCESS_RUN = "subprocess.run"
OPEN_MODULE = {"_io", "pathlib"}
DEBUG_BREAKPOINTS = ("builtins.breakpoint", "sys.breakpointhook", "pdb.set_trace")
DEPRECATED_MODULES = {
(0, 0, 0): {"tkinter.tix", "fpectl"},
(3, 2, 0): {"optparse"},
(3, 4, 0): {"imp"},
(3, 5, 0): {"formatter"},
(3, 6, 0): {"asynchat", "asyncore"},
(3, 7, 0): {"macpath"},
(3, 9, 0): {"lib2to3", "parser", "symbol", "binhex"},
}
DEPRECATED_ARGUMENTS = {
(0, 0, 0): {
"int": ((None, "x"),),
"bool": ((None, "x"),),
"float": ((None, "x"),),
},
(3, 8, 0): {
"asyncio.tasks.sleep": ((None, "loop"),),
"asyncio.tasks.gather": ((None, "loop"),),
"asyncio.tasks.shield": ((None, "loop"),),
"asyncio.tasks.wait_for": ((None, "loop"),),
"asyncio.tasks.wait": ((None, "loop"),),
"asyncio.tasks.as_completed": ((None, "loop"),),
"asyncio.subprocess.create_subprocess_exec": ((None, "loop"),),
"asyncio.subprocess.create_subprocess_shell": ((4, "loop"),),
"gettext.translation": ((5, "codeset"),),
"gettext.install": ((2, "codeset"),),
"functools.partialmethod": ((None, "func"),),
"weakref.finalize": ((None, "func"), (None, "obj")),
"profile.Profile.runcall": ((None, "func"),),
"cProfile.Profile.runcall": ((None, "func"),),
"bdb.Bdb.runcall": ((None, "func"),),
"trace.Trace.runfunc": ((None, "func"),),
"curses.wrapper": ((None, "func"),),
"unittest.case.TestCase.addCleanup": ((None, "function"),),
"concurrent.futures.thread.ThreadPoolExecutor.submit": ((None, "fn"),),
"concurrent.futures.process.ProcessPoolExecutor.submit": ((None, "fn"),),
"contextlib._BaseExitStack.callback": ((None, "callback"),),
"contextlib.AsyncExitStack.push_async_callback": ((None, "callback"),),
"multiprocessing.managers.Server.create": ((None, "c"), (None, "typeid")),
"multiprocessing.managers.SharedMemoryServer.create": (
(None, "c"),
(None, "typeid"),
),
},
(3, 9, 0): {"random.Random.shuffle": ((1, "random"),)},
}
DEPRECATED_DECORATORS = {
(3, 8, 0): {"asyncio.coroutine"},
(3, 3, 0): {
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
"abc.abstractproperty",
},
}
DEPRECATED_METHODS: Dict = {
0: {
"cgi.parse_qs",
"cgi.parse_qsl",
"ctypes.c_buffer",
"distutils.command.register.register.check_metadata",
"distutils.command.sdist.sdist.check_metadata",
"tkinter.Misc.tk_menuBar",
"tkinter.Menu.tk_bindForTraversal",
},
2: {
(2, 6, 0): {
"commands.getstatus",
"os.popen2",
"os.popen3",
"os.popen4",
"macostools.touched",
},
(2, 7, 0): {
"unittest.case.TestCase.assertEquals",
"unittest.case.TestCase.assertNotEquals",
"unittest.case.TestCase.assertAlmostEquals",
"unittest.case.TestCase.assertNotAlmostEquals",
"unittest.case.TestCase.assert_",
"xml.etree.ElementTree.Element.getchildren",
"xml.etree.ElementTree.Element.getiterator",
"xml.etree.ElementTree.XMLParser.getiterator",
"xml.etree.ElementTree.XMLParser.doctype",
},
},
3: {
(3, 0, 0): {
"inspect.getargspec",
"failUnlessEqual",
"assertEquals",
"failIfEqual",
"assertNotEquals",
"failUnlessAlmostEqual",
"assertAlmostEquals",
"failIfAlmostEqual",
"assertNotAlmostEquals",
"failUnless",
"assert_",
"failUnlessRaises",
"failIf",
"assertRaisesRegexp",
"assertRegexpMatches",
"assertNotRegexpMatches",
},
(3, 1, 0): {
"base64.encodestring",
"base64.decodestring",
"ntpath.splitunc",
"os.path.splitunc",
"os.stat_float_times",
},
(3, 2, 0): {
"cgi.escape",
"configparser.RawConfigParser.readfp",
"xml.etree.ElementTree.Element.getchildren",
"xml.etree.ElementTree.Element.getiterator",
"xml.etree.ElementTree.XMLParser.getiterator",
"xml.etree.ElementTree.XMLParser.doctype",
},
(3, 3, 0): {
"inspect.getmoduleinfo",
"logging.warn",
"logging.Logger.warn",
"logging.LoggerAdapter.warn",
"nntplib._NNTPBase.xpath",
"platform.popen",
"sqlite3.OptimizedUnicode",
"time.clock",
},
(3, 4, 0): {
"importlib.find_loader",
"plistlib.readPlist",
"plistlib.writePlist",
"plistlib.readPlistFromBytes",
"plistlib.writePlistToBytes",
},
(3, 4, 4): {"asyncio.tasks.async"},
(3, 5, 0): {
"fractions.gcd",
"inspect.formatargspec",
"inspect.getcallargs",
"platform.linux_distribution",
"platform.dist",
},
(3, 6, 0): {
"importlib._bootstrap_external.FileLoader.load_module",
"_ssl.RAND_pseudo_bytes",
},
(3, 7, 0): {
"sys.set_coroutine_wrapper",
"sys.get_coroutine_wrapper",
"aifc.openfp",
"threading.Thread.isAlive",
"asyncio.Task.current_task",
"asyncio.Task.all_task",
"locale.format",
"ssl.wrap_socket",
"ssl.match_hostname",
"sunau.openfp",
"wave.openfp",
},
(3, 8, 0): {
"gettext.lgettext",
"gettext.ldgettext",
"gettext.lngettext",
"gettext.ldngettext",
"gettext.bind_textdomain_codeset",
"gettext.NullTranslations.output_charset",
"gettext.NullTranslations.set_output_charset",
"threading.Thread.isAlive",
},
(3, 9, 0): {
"binascii.b2a_hqx",
"binascii.a2b_hqx",
"binascii.rlecode_hqx",
"binascii.rledecode_hqx",
},
(3, 10, 0): {
"_sqlite3.enable_shared_cache",
"pathlib.Path.link_to",
"zipimport.zipimporter.load_module",
"zipimport.zipimporter.find_module",
"zipimport.zipimporter.find_loader",
"threading.currentThread",
"threading.activeCount",
"threading.Condition.notifyAll",
"threading.Event.isSet",
"threading.Thread.setName",
"threading.Thread.getName",
"threading.Thread.isDaemon",
"threading.Thread.setDaemon",
"cgi.log",
},
},
}
DEPRECATED_CLASSES = {
(3, 3, 0): {
"importlib.abc": {
"Finder",
},
"pkgutil": {
"ImpImporter",
"ImpLoader",
},
"collections": {
"Awaitable",
"Coroutine",
"AsyncIterable",
"AsyncIterator",
"AsyncGenerator",
"Hashable",
"Iterable",
"Iterator",
"Generator",
"Reversible",
"Sized",
"Container",
"Callable",
"Collection",
"Set",
"MutableSet",
"Mapping",
"MutableMapping",
"MappingView",
"KeysView",
"ItemsView",
"ValuesView",
"Sequence",
"MutableSequence",
"ByteString",
},
},
(3, 9, 0): {
"smtpd": {
"MailmanProxy",
}
},
}
def _check_mode_str(mode):
# check type
if not isinstance(mode, str):
return False
# check syntax
modes = set(mode)
_mode = "rwatb+Ux"
creating = "x" in modes
if modes - set(_mode) or len(mode) > len(modes):
return False
# check logic
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending or creating:
return False
reading = True
if text and binary:
return False
total = reading + writing + appending + creating
if total > 1:
return False
if not (reading or writing or appending or creating):
return False
return True
class StdlibChecker(DeprecatedMixin, BaseChecker):
__implements__ = (IAstroidChecker,)
name = "stdlib"
msgs = {
"W1501": (
'"%s" is not a valid mode for open.',
"bad-open-mode",
"Python supports: r, w, a[, x] modes with b, +, "
"and U (only with r) options. "
"See https://docs.python.org/2/library/functions.html#open",
),
"W1502": (
"Using datetime.time in a boolean context.",
"boolean-datetime",
"Using datetime.time in a boolean context can hide "
"subtle bugs when the time they represent matches "
"midnight UTC. This behaviour was fixed in Python 3.5. "
"See https://bugs.python.org/issue13936 for reference.",
{"maxversion": (3, 5)},
),
"W1503": (
"Redundant use of %s with constant value %r",
"redundant-unittest-assert",
"The first argument of assertTrue and assertFalse is "
"a condition. If a constant is passed as parameter, that "
"condition will be always true. In this case a warning "
"should be emitted.",
),
"W1505": (
"Using deprecated method %s()",
"deprecated-method",
"The method is marked as deprecated and will be removed in "
"a future version of Python. Consider looking for an "
"alternative in the documentation.",
),
"W1506": (
"threading.Thread needs the target function",
"bad-thread-instantiation",
"The warning is emitted when a threading.Thread class "
"is instantiated without the target function being passed. "
"By default, the first parameter is the group param, not the target param. ",
),
"W1507": (
"Using copy.copy(os.environ). Use os.environ.copy() instead. ",
"shallow-copy-environ",
"os.environ is not a dict object but proxy object, so "
"shallow copy has still effects on original object. "
"See https://bugs.python.org/issue15373 for reference. ",
),
"E1507": (
"%s does not support %s type argument",
"invalid-envvar-value",
"Env manipulation functions support only string type arguments. "
"See https://docs.python.org/3/library/os.html#os.getenv. ",
),
"W1508": (
"%s default type is %s. Expected str or None.",
"invalid-envvar-default",
"Env manipulation functions return None or str values. "
"Supplying anything different as a default may cause bugs. "
"See https://docs.python.org/3/library/os.html#os.getenv. ",
),
"W1509": (
"Using preexec_fn keyword which may be unsafe in the presence "
"of threads",
"subprocess-popen-preexec-fn",
"The preexec_fn parameter is not safe to use in the presence "
"of threads in your application. The child process could "
"deadlock before exec is called. If you must use it, keep it "
"trivial! Minimize the number of libraries you call into."
"https://docs.python.org/3/library/subprocess.html#popen-constructor",
),
"W1510": (
"Using subprocess.run without explicitly set `check` is not recommended.",
"subprocess-run-check",
"The check parameter should always be used with explicitly set "
"`check` keyword to make clear what the error-handling behavior is."
"https://docs.python.org/3/library/subprocess.html#subprocess.run",
),
"W1511": (
"Using deprecated argument %s of method %s()",
"deprecated-argument",
"The argument is marked as deprecated and will be removed in the future.",
),
"W1512": (
"Using deprecated class %s of module %s",
"deprecated-class",
"The class is marked as deprecated and will be removed in the future.",
),
"W1513": (
"Using deprecated decorator %s()",
"deprecated-decorator",
"The decorator is marked as deprecated and will be removed in the future.",
),
"W1514": (
"Using open without explicitly specifying an encoding",
"unspecified-encoding",
"It is better to specify an encoding when opening documents. "
"Using the system default implicitly can create problems on other operating systems. "
"See https://www.python.org/dev/peps/pep-0597/",
),
"W1515": (
"Leaving functions creating breakpoints in production code is not recommended",
"forgotten-debug-statement",
"Calls to breakpoint(), sys.breakpointhook() and pdb.set_trace() should be removed "
"from code that is not actively being debugged.",
),
}
def __init__(
self, linter: Optional["PyLinter"] = None
): # pylint: disable=super-init-not-called # See https://github.com/PyCQA/pylint/issues/4941
BaseChecker.__init__(self, linter)
self._deprecated_methods: Set[Any] = set()
self._deprecated_methods.update(DEPRECATED_METHODS[0])
for since_vers, func_list in DEPRECATED_METHODS[sys.version_info[0]].items():
if since_vers <= sys.version_info:
self._deprecated_methods.update(func_list)
self._deprecated_attributes = {}
for since_vers, func_list in DEPRECATED_ARGUMENTS.items():
if since_vers <= sys.version_info:
self._deprecated_attributes.update(func_list)
self._deprecated_classes = {}
for since_vers, class_list in DEPRECATED_CLASSES.items():
if since_vers <= sys.version_info:
self._deprecated_classes.update(class_list)
self._deprecated_modules = set()
for since_vers, mod_list in DEPRECATED_MODULES.items():
if since_vers <= sys.version_info:
self._deprecated_modules.update(mod_list)
self._deprecated_decorators = set()
for since_vers, decorator_list in DEPRECATED_DECORATORS.items():
if since_vers <= sys.version_info:
self._deprecated_decorators.update(decorator_list)
def _check_bad_thread_instantiation(self, node):
if not node.kwargs and not node.keywords and len(node.args) <= 1:
self.add_message("bad-thread-instantiation", node=node)
def _check_for_preexec_fn_in_popen(self, node):
if node.keywords:
for keyword in node.keywords:
if keyword.arg == "preexec_fn":
self.add_message("subprocess-popen-preexec-fn", node=node)
def _check_for_check_kw_in_run(self, node):
kwargs = {keyword.arg for keyword in (node.keywords or ())}
if "check" not in kwargs:
self.add_message("subprocess-run-check", node=node)
def _check_shallow_copy_environ(self, node: nodes.Call) -> None:
arg = utils.get_argument_from_call(node, position=0)
try:
inferred_args = arg.inferred()
except astroid.InferenceError:
return
for inferred in inferred_args:
if inferred.qname() == OS_ENVIRON:
self.add_message("shallow-copy-environ", node=node)
break
@utils.check_messages(
"bad-open-mode",
"redundant-unittest-assert",
"deprecated-method",
"deprecated-argument",
"bad-thread-instantiation",
"shallow-copy-environ",
"invalid-envvar-value",
"invalid-envvar-default",
"subprocess-popen-preexec-fn",
"subprocess-run-check",
"deprecated-class",
"unspecified-encoding",
"forgotten-debug-statement",
)
def visit_call(self, node: nodes.Call) -> None:
"""Visit a Call node."""
self.check_deprecated_class_in_call(node)
for inferred in utils.infer_all(node.func):
if inferred is astroid.Uninferable:
continue
if inferred.root().name in OPEN_MODULE:
if (
isinstance(node.func, nodes.Name)
and node.func.name in OPEN_FILES_MODE
):
self._check_open_mode(node)
if (
isinstance(node.func, nodes.Name)
and node.func.name in OPEN_FILES_ENCODING
or isinstance(node.func, nodes.Attribute)
and node.func.attrname in OPEN_FILES_ENCODING
):
self._check_open_encoded(node, inferred.root().name)
elif inferred.root().name == UNITTEST_CASE:
self._check_redundant_assert(node, inferred)
elif isinstance(inferred, nodes.ClassDef):
if inferred.qname() == THREADING_THREAD:
self._check_bad_thread_instantiation(node)
elif inferred.qname() == SUBPROCESS_POPEN:
self._check_for_preexec_fn_in_popen(node)
elif isinstance(inferred, nodes.FunctionDef):
name = inferred.qname()
if name == COPY_COPY:
self._check_shallow_copy_environ(node)
elif name in ENV_GETTERS:
self._check_env_function(node, inferred)
elif name == SUBPROCESS_RUN:
self._check_for_check_kw_in_run(node)
elif name in DEBUG_BREAKPOINTS:
self.add_message("forgotten-debug-statement", node=node)
self.check_deprecated_method(node, inferred)
@utils.check_messages("boolean-datetime")
def visit_unaryop(self, node: nodes.UnaryOp) -> None:
if node.op == "not":
self._check_datetime(node.operand)
@utils.check_messages("boolean-datetime")
def visit_if(self, node: nodes.If) -> None:
self._check_datetime(node.test)
@utils.check_messages("boolean-datetime")
def visit_ifexp(self, node: nodes.IfExp) -> None:
self._check_datetime(node.test)
@utils.check_messages("boolean-datetime")
def visit_boolop(self, node: nodes.BoolOp) -> None:
for value in node.values:
self._check_datetime(value)
def _check_redundant_assert(self, node, infer):
if (
isinstance(infer, astroid.BoundMethod)
and node.args
and isinstance(node.args[0], nodes.Const)
and infer.name in {"assertTrue", "assertFalse"}
):
self.add_message(
"redundant-unittest-assert",
args=(infer.name, node.args[0].value),
node=node,
)
def _check_datetime(self, node):
"""Check that a datetime was inferred.
If so, emit boolean-datetime warning.
"""
try:
inferred = next(node.infer())
except astroid.InferenceError:
return
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == "datetime.time"
):
self.add_message("boolean-datetime", node=node)
def _check_open_mode(self, node):
"""Check that the mode argument of an open or file call is valid."""
try:
mode_arg = utils.get_argument_from_call(node, position=1, keyword="mode")
except utils.NoSuchArgumentError:
return
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if isinstance(mode_arg, nodes.Const) and not _check_mode_str(
mode_arg.value
):
self.add_message("bad-open-mode", node=node, args=mode_arg.value)
def _check_open_encoded(self, node: nodes.Call, open_module: str) -> None:
"""Check that the encoded argument of an open call is valid."""
mode_arg = None
try:
if open_module == "_io":
mode_arg = utils.get_argument_from_call(
node, position=1, keyword="mode"
)
elif open_module == "pathlib":
mode_arg = utils.get_argument_from_call(
node, position=0, keyword="mode"
)
except utils.NoSuchArgumentError:
pass
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if (
not mode_arg
or isinstance(mode_arg, nodes.Const)
and "b" not in mode_arg.value
):
encoding_arg = None
try:
if open_module == "pathlib" and node.func.attrname == "read_text":
encoding_arg = utils.get_argument_from_call(
node, position=0, keyword="encoding"
)
else:
encoding_arg = utils.get_argument_from_call(
node, position=None, keyword="encoding"
)
except utils.NoSuchArgumentError:
self.add_message("unspecified-encoding", node=node)
if encoding_arg:
encoding_arg = utils.safe_infer(encoding_arg)
if isinstance(encoding_arg, nodes.Const) and encoding_arg.value is None:
self.add_message("unspecified-encoding", node=node)
def _check_env_function(self, node, infer):
env_name_kwarg = "key"
env_value_kwarg = "default"
if node.keywords:
kwargs = {keyword.arg: keyword.value for keyword in node.keywords}
else:
kwargs = None
if node.args:
env_name_arg = node.args[0]
elif kwargs and env_name_kwarg in kwargs:
env_name_arg = kwargs[env_name_kwarg]
else:
env_name_arg = None
if env_name_arg:
self._check_invalid_envvar_value(
node=node,
message="invalid-envvar-value",
call_arg=utils.safe_infer(env_name_arg),
infer=infer,
allow_none=False,
)
if len(node.args) == 2:
env_value_arg = node.args[1]
elif kwargs and env_value_kwarg in kwargs:
env_value_arg = kwargs[env_value_kwarg]
else:
env_value_arg = None
if env_value_arg:
self._check_invalid_envvar_value(
node=node,
infer=infer,
message="invalid-envvar-default",
call_arg=utils.safe_infer(env_value_arg),
allow_none=True,
)
def _check_invalid_envvar_value(self, node, infer, message, call_arg, allow_none):
if call_arg in (astroid.Uninferable, None):
return
name = infer.qname()
if isinstance(call_arg, nodes.Const):
emit = False
if call_arg.value is None:
emit = not allow_none
elif not isinstance(call_arg.value, str):
emit = True
if emit:
self.add_message(message, node=node, args=(name, call_arg.pytype()))
else:
self.add_message(message, node=node, args=(name, call_arg.pytype()))
def deprecated_modules(self):
"""Callback returning the deprecated modules."""
return self._deprecated_modules
def deprecated_methods(self):
return self._deprecated_methods
def deprecated_arguments(self, method: str):
return self._deprecated_attributes.get(method, ())
def deprecated_classes(self, module: str):
return self._deprecated_classes.get(module, ())
def deprecated_decorators(self) -> Iterable:
return self._deprecated_decorators
def register(linter: "PyLinter") -> None:
linter.register_checker(StdlibChecker(linter))
| 1 | 20,106 | Shall we merge these `if` for `path lib` and then do the `attrname` one. I'm trying to count `if`-calls and its getting late but I think we can reduce the number checks needed to get into L648 from 3 to 2 if you understand what I mean | PyCQA-pylint | py |
@@ -9,6 +9,7 @@ import (
"errors"
"sync"
+ "github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags" | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mock
import (
"context"
"errors"
"sync"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
)
var _ storage.Storer = (*MockStorer)(nil)
type MockStorer struct {
store map[string][]byte
modeSet map[string]storage.ModeSet
modeSetMu sync.Mutex
pinnedAddress []swarm.Address // Stores the pinned address
pinnedCounter []uint64 // and its respective counter. These are stored as slices to preserve the order.
pinSetMu sync.Mutex
subpull []storage.Descriptor
partialInterval bool
validator swarm.ChunkValidator
tags *tags.Tags
morePull chan struct{}
mtx sync.Mutex
quit chan struct{}
}
func WithSubscribePullChunks(chs ...storage.Descriptor) Option {
return optionFunc(func(m *MockStorer) {
m.subpull = make([]storage.Descriptor, len(chs))
for i, v := range chs {
m.subpull[i] = v
}
})
}
func WithPartialInterval(v bool) Option {
return optionFunc(func(m *MockStorer) {
m.partialInterval = v
})
}
func NewStorer(opts ...Option) *MockStorer {
s := &MockStorer{
store: make(map[string][]byte),
modeSet: make(map[string]storage.ModeSet),
modeSetMu: sync.Mutex{},
morePull: make(chan struct{}),
quit: make(chan struct{}),
}
for _, v := range opts {
v.apply(s)
}
return s
}
func NewValidatingStorer(v swarm.ChunkValidator, tags *tags.Tags) *MockStorer {
return &MockStorer{
store: make(map[string][]byte),
modeSet: make(map[string]storage.ModeSet),
modeSetMu: sync.Mutex{},
pinSetMu: sync.Mutex{},
validator: v,
tags: tags,
}
}
func (m *MockStorer) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Address) (ch swarm.Chunk, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
v, has := m.store[addr.String()]
if !has {
return nil, storage.ErrNotFound
}
return swarm.NewChunk(addr, v), nil
}
func (m *MockStorer) Put(ctx context.Context, mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
for _, ch := range chs {
if m.validator != nil {
if !m.validator.Validate(ch) {
return nil, storage.ErrInvalidChunk
}
}
m.store[ch.Address().String()] = ch.Data()
yes, err := m.has(ctx, ch.Address())
if err != nil {
exist = append(exist, false)
continue
}
if yes {
exist = append(exist, true)
} else {
exist = append(exist, false)
}
}
return exist, nil
}
func (m *MockStorer) GetMulti(ctx context.Context, mode storage.ModeGet, addrs ...swarm.Address) (ch []swarm.Chunk, err error) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) has(ctx context.Context, addr swarm.Address) (yes bool, err error) {
_, has := m.store[addr.String()]
return has, nil
}
func (m *MockStorer) Has(ctx context.Context, addr swarm.Address) (yes bool, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.has(ctx, addr)
}
func (m *MockStorer) HasMulti(ctx context.Context, addrs ...swarm.Address) (yes []bool, err error) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) Set(ctx context.Context, mode storage.ModeSet, addrs ...swarm.Address) (err error) {
m.modeSetMu.Lock()
m.pinSetMu.Lock()
defer m.modeSetMu.Unlock()
defer m.pinSetMu.Unlock()
for _, addr := range addrs {
m.modeSet[addr.String()] = mode
// if mode is set pin, increment the pin counter
if mode == storage.ModeSetPin {
var found bool
for i, ad := range m.pinnedAddress {
if addr.String() == ad.String() {
m.pinnedCounter[i] = m.pinnedCounter[i] + 1
found = true
}
}
if !found {
m.pinnedAddress = append(m.pinnedAddress, addr)
m.pinnedCounter = append(m.pinnedCounter, uint64(1))
}
}
// if mode is set unpin, decrement the pin counter and remove the address
// once it reaches zero
if mode == storage.ModeSetUnpin {
for i, ad := range m.pinnedAddress {
if addr.String() == ad.String() {
m.pinnedCounter[i] = m.pinnedCounter[i] - 1
if m.pinnedCounter[i] == 0 {
copy(m.pinnedAddress[i:], m.pinnedAddress[i+1:])
m.pinnedAddress[len(m.pinnedAddress)-1] = swarm.NewAddress([]byte{0})
m.pinnedAddress = m.pinnedAddress[:len(m.pinnedAddress)-1]
copy(m.pinnedCounter[i:], m.pinnedCounter[i+1:])
m.pinnedCounter[len(m.pinnedCounter)-1] = uint64(0)
m.pinnedCounter = m.pinnedCounter[:len(m.pinnedCounter)-1]
}
}
}
}
}
return nil
}
func (m *MockStorer) GetModeSet(addr swarm.Address) (mode storage.ModeSet) {
m.modeSetMu.Lock()
defer m.modeSetMu.Unlock()
if mode, ok := m.modeSet[addr.String()]; ok {
return mode
}
return mode
}
func (m *MockStorer) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (<-chan storage.Descriptor, <-chan struct{}, func()) {
c := make(chan storage.Descriptor)
done := make(chan struct{})
stop := func() {
close(done)
}
go func() {
defer close(c)
m.mtx.Lock()
for _, ch := range m.subpull {
select {
case c <- ch:
case <-done:
return
case <-ctx.Done():
return
case <-m.quit:
return
}
}
m.mtx.Unlock()
if m.partialInterval {
// block since we're at the top of the bin and waiting for new chunks
select {
case <-done:
return
case <-m.quit:
return
case <-ctx.Done():
return
case <-m.morePull:
}
}
m.mtx.Lock()
defer m.mtx.Unlock()
// iterate on what we have in the iterator
for _, ch := range m.subpull {
select {
case c <- ch:
case <-done:
return
case <-ctx.Done():
return
case <-m.quit:
return
}
}
}()
return c, m.quit, stop
}
func (m *MockStorer) MorePull(d ...storage.Descriptor) {
// clear out what we already have in subpull
m.mtx.Lock()
defer m.mtx.Unlock()
m.subpull = make([]storage.Descriptor, len(d))
for i, v := range d {
m.subpull[i] = v
}
close(m.morePull)
}
func (m *MockStorer) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop func()) {
panic("not implemented") // TODO: Implement
}
func (m *MockStorer) PinnedChunks(ctx context.Context, cursor swarm.Address) (pinnedChunks []*storage.Pinner, err error) {
m.pinSetMu.Lock()
defer m.pinSetMu.Unlock()
for i, addr := range m.pinnedAddress {
pi := &storage.Pinner{
Address: swarm.NewAddress(addr.Bytes()),
PinCounter: m.pinnedCounter[i],
}
pinnedChunks = append(pinnedChunks, pi)
}
if pinnedChunks == nil {
return pinnedChunks, errors.New("pin chunks: leveldb: not found")
}
return pinnedChunks, nil
}
func (m *MockStorer) PinInfo(address swarm.Address) (uint64, error) {
m.pinSetMu.Lock()
defer m.pinSetMu.Unlock()
for i, addr := range m.pinnedAddress {
if addr.String() == address.String() {
return m.pinnedCounter[i], nil
}
}
return 0, storage.ErrNotFound
}
func (m *MockStorer) Close() error {
close(m.quit)
return nil
}
type Option interface {
apply(*MockStorer)
}
type optionFunc func(*MockStorer)
func (f optionFunc) apply(r *MockStorer) { f(r) }
| 1 | 11,678 | why depend on recovery??? | ethersphere-bee | go |
@@ -388,7 +388,7 @@ func initializeDynamicConfig(
// the done channel is used by dynamic config to stop refreshing
// and CLI does not need that, so just close the done channel
- doneChan := make(chan struct{})
+ doneChan := make(chan interface{})
close(doneChan)
dynamicConfigClient, err := dynamicconfig.NewFileBasedClient(
&serviceConfig.DynamicConfigClient, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cli
import (
"strings"
"time"
"github.com/golang/mock/gomock"
"github.com/uber-go/tally"
"github.com/urfave/cli"
"go.temporal.io/api/workflowservice/v1"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common"
"go.temporal.io/server/common/archiver"
"go.temporal.io/server/common/archiver/provider"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/dynamicconfig"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/client"
"go.temporal.io/server/common/resolver"
)
const (
dependencyMaxQPS = 100
)
var (
registerNamespaceFlags = []cli.Flag{
cli.StringFlag{
Name: FlagDescriptionWithAlias,
Usage: "Namespace description",
},
cli.StringFlag{
Name: FlagOwnerEmailWithAlias,
Usage: "Owner email",
},
cli.StringFlag{
Name: FlagRetentionDaysWithAlias,
Usage: "Workflow execution retention in days",
},
cli.StringFlag{
Name: FlagActiveClusterNameWithAlias,
Usage: "Active cluster name",
},
cli.StringFlag{
// use StringFlag instead of buggy StringSliceFlag
// TODO when https://github.com/urfave/cli/pull/392 & v2 is released
// consider update urfave/cli
Name: FlagClustersWithAlias,
Usage: "Clusters",
},
cli.StringFlag{
Name: FlagIsGlobalNamespaceWithAlias,
Usage: "Flag to indicate whether namespace is a global namespace",
},
cli.StringFlag{
Name: FlagNamespaceDataWithAlias,
Usage: "Namespace data of key value pairs, in format of k1:v1,k2:v2,k3:v3",
},
cli.StringFlag{
Name: FlagSecurityTokenWithAlias,
Usage: "Optional token for security check",
},
cli.StringFlag{
Name: FlagHistoryArchivalStateWithAlias,
Usage: "Flag to set history archival state, valid values are \"disabled\" and \"enabled\"",
},
cli.StringFlag{
Name: FlagHistoryArchivalURIWithAlias,
Usage: "Optionally specify history archival URI (cannot be changed after first time archival is enabled)",
},
cli.StringFlag{
Name: FlagVisibilityArchivalStateWithAlias,
Usage: "Flag to set visibility archival state, valid values are \"disabled\" and \"enabled\"",
},
cli.StringFlag{
Name: FlagVisibilityArchivalURIWithAlias,
Usage: "Optionally specify visibility archival URI (cannot be changed after first time archival is enabled)",
},
}
updateNamespaceFlags = []cli.Flag{
cli.StringFlag{
Name: FlagDescriptionWithAlias,
Usage: "Namespace description",
},
cli.StringFlag{
Name: FlagOwnerEmailWithAlias,
Usage: "Owner email",
},
cli.StringFlag{
Name: FlagRetentionDaysWithAlias,
Usage: "Workflow execution retention in days",
},
cli.StringFlag{
Name: FlagActiveClusterNameWithAlias,
Usage: "Active cluster name",
},
cli.StringFlag{
// use StringFlag instead of buggy StringSliceFlag
// TODO when https://github.com/urfave/cli/pull/392 & v2 is released
// consider update urfave/cli
Name: FlagClustersWithAlias,
Usage: "Clusters",
},
cli.StringFlag{
Name: FlagNamespaceDataWithAlias,
Usage: "Namespace data of key value pairs, in format of k1:v1,k2:v2,k3:v3 ",
},
cli.StringFlag{
Name: FlagSecurityTokenWithAlias,
Usage: "Optional token for security check",
},
cli.StringFlag{
Name: FlagHistoryArchivalStateWithAlias,
Usage: "Flag to set history archival state, valid values are \"disabled\" and \"enabled\"",
},
cli.StringFlag{
Name: FlagHistoryArchivalURIWithAlias,
Usage: "Optionally specify history archival URI (cannot be changed after first time archival is enabled)",
},
cli.StringFlag{
Name: FlagVisibilityArchivalStateWithAlias,
Usage: "Flag to set visibility archival state, valid values are \"disabled\" and \"enabled\"",
},
cli.StringFlag{
Name: FlagVisibilityArchivalURIWithAlias,
Usage: "Optionally specify visibility archival URI (cannot be changed after first time archival is enabled)",
},
cli.StringFlag{
Name: FlagAddBadBinary,
Usage: "Binary checksum to add for resetting workflow",
},
cli.StringFlag{
Name: FlagRemoveBadBinary,
Usage: "Binary checksum to remove for resetting workflow",
},
cli.StringFlag{
Name: FlagReason,
Usage: "Reason for the operation",
},
}
describeNamespaceFlags = []cli.Flag{
cli.StringFlag{
Name: FlagNamespaceID,
Usage: "Namespace Id (required if not specify namespace)",
},
}
listNamespacesFlags = []cli.Flag{}
adminNamespaceCommonFlags = []cli.Flag{
cli.StringFlag{
Name: FlagServiceConfigDirWithAlias,
Usage: "Required service configuration dir",
},
cli.StringFlag{
Name: FlagServiceEnvWithAlias,
Usage: "Optional service env for loading service configuration",
},
cli.StringFlag{
Name: FlagServiceZoneWithAlias,
Usage: "Optional service zone for loading service configuration",
},
}
adminRegisterNamespaceFlags = append(
registerNamespaceFlags,
adminNamespaceCommonFlags...,
)
adminUpdateNamespaceFlags = append(
updateNamespaceFlags,
adminNamespaceCommonFlags...,
)
adminDescribeNamespaceFlags = append(
updateNamespaceFlags,
adminNamespaceCommonFlags...,
)
)
func initializeFrontendClient(
context *cli.Context,
) workflowservice.WorkflowServiceClient {
return cFactory.FrontendClient(context)
}
func initializeAdminNamespaceHandler(
context *cli.Context,
) namespace.Handler {
configuration := loadConfig(context)
metricsClient := initializeMetricsClient()
logger := log.NewZapLogger(log.BuildZapLogger(configuration.Log))
clusterMetadata := initializeClusterMetadata(
configuration,
logger,
)
metadataMgr := initializeMetadataMgr(
configuration,
clusterMetadata,
metricsClient,
logger,
)
dynamicConfig := initializeDynamicConfig(configuration, logger)
return initializeNamespaceHandler(
logger,
metadataMgr,
clusterMetadata,
initializeArchivalMetadata(configuration, dynamicConfig),
initializeArchivalProvider(configuration, clusterMetadata, metricsClient, logger),
)
}
func loadConfig(
context *cli.Context,
) *config.Config {
env := getEnvironment(context)
zone := getZone(context)
configDir := getConfigDir(context)
var cfg config.Config
err := config.Load(env, configDir, zone, &cfg)
if err != nil {
ErrorAndExit("Unable to load config.", err)
}
return &cfg
}
func initializeNamespaceHandler(
logger log.Logger,
metadataMgr persistence.MetadataManager,
clusterMetadata cluster.Metadata,
archivalMetadata archiver.ArchivalMetadata,
archiverProvider provider.ArchiverProvider,
) namespace.Handler {
return namespace.NewHandler(
namespace.MinRetentionDays,
dynamicconfig.GetIntPropertyFilteredByNamespace(namespace.MaxBadBinaries),
logger,
metadataMgr,
clusterMetadata,
initializeNamespaceReplicator(logger),
archivalMetadata,
archiverProvider,
)
}
func initializeMetadataMgr(
serviceConfig *config.Config,
clusterMetadata cluster.Metadata,
metricsClient metrics.Client,
logger log.Logger,
) persistence.MetadataManager {
pConfig := serviceConfig.Persistence
pConfig.VisibilityConfig = &config.VisibilityConfig{
VisibilityListMaxQPS: dynamicconfig.GetIntPropertyFilteredByNamespace(dependencyMaxQPS),
EnableSampling: dynamicconfig.GetBoolPropertyFn(false), // not used by namespace operation
ESProcessorAckTimeout: dynamicconfig.GetDurationPropertyFn(1 * time.Minute),
}
pFactory := client.NewFactory(
&pConfig,
resolver.NewNoopResolver(),
dynamicconfig.GetIntPropertyFn(dependencyMaxQPS),
nil, // TODO propagate abstract datastore factory from the CLI.
clusterMetadata.GetCurrentClusterName(),
metricsClient,
logger,
)
metadata, err := pFactory.NewMetadataManager()
if err != nil {
ErrorAndExit("Unable to initialize metadata manager.", err)
}
return metadata
}
func initializeClusterMetadata(
serviceConfig *config.Config,
logger log.Logger,
) cluster.Metadata {
clusterMetadata := serviceConfig.ClusterMetadata
return cluster.NewMetadata(
logger,
clusterMetadata.EnableGlobalNamespace,
clusterMetadata.FailoverVersionIncrement,
clusterMetadata.MasterClusterName,
clusterMetadata.CurrentClusterName,
clusterMetadata.ClusterInformation,
)
}
func initializeArchivalMetadata(
serviceConfig *config.Config,
dynamicConfig *dynamicconfig.Collection,
) archiver.ArchivalMetadata {
return archiver.NewArchivalMetadata(
dynamicConfig,
serviceConfig.Archival.History.State,
serviceConfig.Archival.History.EnableRead,
serviceConfig.Archival.Visibility.State,
serviceConfig.Archival.Visibility.EnableRead,
&serviceConfig.NamespaceDefaults.Archival,
)
}
func initializeArchivalProvider(
serviceConfig *config.Config,
clusterMetadata cluster.Metadata,
metricsClient metrics.Client,
logger log.Logger,
) provider.ArchiverProvider {
archiverProvider := provider.NewArchiverProvider(
serviceConfig.Archival.History.Provider,
serviceConfig.Archival.Visibility.Provider,
)
historyArchiverBootstrapContainer := &archiver.HistoryBootstrapContainer{
HistoryV2Manager: nil, // not used
Logger: logger,
MetricsClient: metricsClient,
ClusterMetadata: clusterMetadata,
NamespaceCache: nil, // not used
}
visibilityArchiverBootstrapContainer := &archiver.VisibilityBootstrapContainer{
Logger: logger,
MetricsClient: metricsClient,
ClusterMetadata: clusterMetadata,
NamespaceCache: nil, // not used
}
err := archiverProvider.RegisterBootstrapContainer(
common.FrontendServiceName,
historyArchiverBootstrapContainer,
visibilityArchiverBootstrapContainer,
)
if err != nil {
ErrorAndExit("Error initializing archival provider.", err)
}
return archiverProvider
}
func initializeNamespaceReplicator(
logger log.Logger,
) namespace.Replicator {
namespaceReplicationQueue := &persistence.MockNamespaceReplicationQueue{}
namespaceReplicationQueue.EXPECT().Publish(gomock.Any()).Return(nil)
return namespace.NewNamespaceReplicator(namespaceReplicationQueue, logger)
}
func initializeDynamicConfig(
serviceConfig *config.Config,
logger log.Logger,
) *dynamicconfig.Collection {
// the done channel is used by dynamic config to stop refreshing
// and CLI does not need that, so just close the done channel
doneChan := make(chan struct{})
close(doneChan)
dynamicConfigClient, err := dynamicconfig.NewFileBasedClient(
&serviceConfig.DynamicConfigClient,
logger,
doneChan,
)
if err != nil {
ErrorAndExit("Error initializing dynamic config.", err)
}
return dynamicconfig.NewCollection(dynamicConfigClient, logger)
}
func initializeMetricsClient() metrics.Client {
return metrics.NewClient(tally.NoopScope, metrics.Common)
}
func getEnvironment(c *cli.Context) string {
return strings.TrimSpace(c.String(FlagServiceEnv))
}
func getZone(c *cli.Context) string {
return strings.TrimSpace(c.String(FlagServiceZone))
}
func getConfigDir(c *cli.Context) string {
dirPath := c.String(FlagServiceConfigDir)
if len(dirPath) == 0 {
ErrorAndExit("Must provide service configuration dir path.", nil)
}
return dirPath
}
| 1 | 11,475 | the done channel only aims to be closed, so `chan struct{}` is better then `chan interface{}` | temporalio-temporal | go |
@@ -196,7 +196,7 @@ func NewGasPriceOracle(cfg *Config) (*GasPriceOracle, error) {
address := cfg.gasPriceOracleAddress
contract, err := bindings.NewGasPriceOracle(address, l2Client)
if err != nil {
- return nil, err
+ return nil, wrapErr(err, "error creating contract binding")
}
// Fetch the current gas price to use as the current price | 1 | package oracle
import (
"context"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/go/gas-oracle/bindings"
"github.com/ethereum-optimism/optimism/go/gas-oracle/gasprices"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
var (
// errInvalidSigningKey represents the error when the signing key used
// is not the Owner of the contract and therefore cannot update the gasprice
errInvalidSigningKey = errors.New("invalid signing key")
// errNoChainID represents the error when the chain id is not provided
// and it cannot be remotely fetched
errNoChainID = errors.New("no chain id provided")
// errNoPrivateKey represents the error when the private key is not provided to
// the application
errNoPrivateKey = errors.New("no private key provided")
// errWrongChainID represents the error when the configured chain id is not
// correct
errWrongChainID = errors.New("wrong chain id provided")
// errNoBaseFee represents the error when the base fee is not found on the
// block. This means that the block being queried is pre eip1559
errNoBaseFee = errors.New("base fee not found on block")
)
// GasPriceOracle manages a hot key that can update the L2 Gas Price
type GasPriceOracle struct {
l1ChainID *big.Int
l2ChainID *big.Int
ctx context.Context
stop chan struct{}
contract *bindings.GasPriceOracle
l2Backend DeployContractBackend
l1Backend bind.ContractTransactor
gasPriceUpdater *gasprices.GasPriceUpdater
config *Config
}
// Start runs the GasPriceOracle
func (g *GasPriceOracle) Start() error {
if g.config.l1ChainID == nil {
return fmt.Errorf("layer-one: %w", errNoChainID)
}
if g.config.l2ChainID == nil {
return fmt.Errorf("layer-two: %w", errNoChainID)
}
if g.config.privateKey == nil {
return errNoPrivateKey
}
address := crypto.PubkeyToAddress(g.config.privateKey.PublicKey)
log.Info("Starting Gas Price Oracle", "l1-chain-id", g.l1ChainID,
"l2-chain-id", g.l2ChainID, "address", address.Hex())
price, err := g.contract.GasPrice(&bind.CallOpts{
Context: context.Background(),
})
if err != nil {
return err
}
gasPriceGauge.Update(int64(price.Uint64()))
if g.config.enableL1BaseFee {
go g.BaseFeeLoop()
}
if g.config.enableL2GasPrice {
go g.Loop()
}
return nil
}
func (g *GasPriceOracle) Stop() {
close(g.stop)
}
func (g *GasPriceOracle) Wait() {
<-g.stop
}
// ensure makes sure that the configured private key is the owner
// of the `OVM_GasPriceOracle`. If it is not the owner, then it will
// not be able to make updates to the L2 gas price.
func (g *GasPriceOracle) ensure() error {
owner, err := g.contract.Owner(&bind.CallOpts{
Context: g.ctx,
})
if err != nil {
return err
}
address := crypto.PubkeyToAddress(g.config.privateKey.PublicKey)
if address != owner {
log.Error("Signing key does not match contract owner", "signer", address.Hex(), "owner", owner.Hex())
return errInvalidSigningKey
}
return nil
}
// Loop is the main logic of the gas-oracle
func (g *GasPriceOracle) Loop() {
timer := time.NewTicker(time.Duration(g.config.epochLengthSeconds) * time.Second)
defer timer.Stop()
for {
select {
case <-timer.C:
log.Trace("polling", "time", time.Now())
if err := g.Update(); err != nil {
log.Error("cannot update gas price", "message", err)
}
case <-g.ctx.Done():
g.Stop()
}
}
}
func (g *GasPriceOracle) BaseFeeLoop() {
timer := time.NewTicker(15 * time.Second)
defer timer.Stop()
updateBaseFee, err := wrapUpdateBaseFee(g.l1Backend, g.l2Backend, g.config)
if err != nil {
panic(err)
}
for {
select {
case <-timer.C:
if err := updateBaseFee(); err != nil {
log.Error("cannot update l1 base fee", "messgae", err)
}
case <-g.ctx.Done():
g.Stop()
}
}
}
// Update will update the gas price
func (g *GasPriceOracle) Update() error {
l2GasPrice, err := g.contract.GasPrice(&bind.CallOpts{
Context: g.ctx,
})
if err != nil {
return fmt.Errorf("cannot get gas price: %w", err)
}
if err := g.gasPriceUpdater.UpdateGasPrice(); err != nil {
return fmt.Errorf("cannot update gas price: %w", err)
}
newGasPrice, err := g.contract.GasPrice(&bind.CallOpts{
Context: g.ctx,
})
if err != nil {
return fmt.Errorf("cannot get gas price: %w", err)
}
local := g.gasPriceUpdater.GetGasPrice()
log.Info("Update", "original", l2GasPrice, "current", newGasPrice, "local", local)
return nil
}
// NewGasPriceOracle creates a new GasPriceOracle based on a Config
func NewGasPriceOracle(cfg *Config) (*GasPriceOracle, error) {
// Create the L2 client
l2Client, err := ethclient.Dial(cfg.layerTwoHttpUrl)
if err != nil {
return nil, err
}
l1Client, err := ethclient.Dial(cfg.ethereumHttpUrl)
if err != nil {
return nil, err
}
// Ensure that we can actually connect to both backends
if err := ensureConnection(l2Client); err != nil {
log.Error("Unable to connect to layer two", "addr", cfg.layerTwoHttpUrl)
}
if err := ensureConnection(l1Client); err != nil {
log.Error("Unable to connect to layer one", "addr", cfg.ethereumHttpUrl)
}
address := cfg.gasPriceOracleAddress
contract, err := bindings.NewGasPriceOracle(address, l2Client)
if err != nil {
return nil, err
}
// Fetch the current gas price to use as the current price
currentPrice, err := contract.GasPrice(&bind.CallOpts{
Context: context.Background(),
})
if err != nil {
return nil, err
}
// Create a gas pricer for the gas price updater
log.Info("Creating GasPricer", "currentPrice", currentPrice,
"floorPrice", cfg.floorPrice, "targetGasPerSecond", cfg.targetGasPerSecond,
"maxPercentChangePerEpoch", cfg.maxPercentChangePerEpoch)
gasPricer, err := gasprices.NewGasPricer(
currentPrice.Uint64(),
cfg.floorPrice,
func() float64 {
return float64(cfg.targetGasPerSecond)
},
cfg.maxPercentChangePerEpoch,
)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
if cfg.l2ChainID != nil {
if cfg.l2ChainID.Cmp(l2ChainID) != 0 {
return nil, fmt.Errorf("%w: L2: configured with %d and got %d",
errWrongChainID, cfg.l2ChainID, l2ChainID)
}
} else {
cfg.l2ChainID = l2ChainID
}
if cfg.l1ChainID != nil {
if cfg.l1ChainID.Cmp(l1ChainID) != 0 {
return nil, fmt.Errorf("%w: L1: configured with %d and got %d",
errWrongChainID, cfg.l1ChainID, l1ChainID)
}
} else {
cfg.l1ChainID = l1ChainID
}
if cfg.privateKey == nil {
return nil, errNoPrivateKey
}
tip, err := l2Client.HeaderByNumber(context.Background(), nil)
if err != nil {
return nil, err
}
// Start at the tip
epochStartBlockNumber := tip.Number.Uint64()
// getLatestBlockNumberFn is used by the GasPriceUpdater
// to get the latest block number
getLatestBlockNumberFn := wrapGetLatestBlockNumberFn(l2Client)
// updateL2GasPriceFn is used by the GasPriceUpdater to
// update the gas price
updateL2GasPriceFn, err := wrapUpdateL2GasPriceFn(l2Client, cfg)
if err != nil {
return nil, err
}
// getGasUsedByBlockFn is used by the GasPriceUpdater
// to fetch the amount of gas that a block has used
getGasUsedByBlockFn := wrapGetGasUsedByBlock(l2Client)
log.Info("Creating GasPriceUpdater", "epochStartBlockNumber", epochStartBlockNumber,
"averageBlockGasLimitPerEpoch", cfg.averageBlockGasLimitPerEpoch,
"epochLengthSeconds", cfg.epochLengthSeconds)
gasPriceUpdater, err := gasprices.NewGasPriceUpdater(
gasPricer,
epochStartBlockNumber,
cfg.averageBlockGasLimitPerEpoch,
cfg.epochLengthSeconds,
getLatestBlockNumberFn,
getGasUsedByBlockFn,
updateL2GasPriceFn,
)
if err != nil {
return nil, err
}
gpo := GasPriceOracle{
l2ChainID: l2ChainID,
l1ChainID: l1ChainID,
ctx: context.Background(),
stop: make(chan struct{}),
contract: contract,
gasPriceUpdater: gasPriceUpdater,
config: cfg,
l2Backend: l2Client,
l1Backend: l1Client,
}
if err := gpo.ensure(); err != nil {
return nil, err
}
return &gpo, nil
}
// Ensure that we can actually connect
func ensureConnection(client *ethclient.Client) error {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for ; true; <-t.C {
_, err := client.ChainID(context.Background())
if err == nil {
break
} else {
return err
}
}
return nil
}
| 1 | 21,269 | Do you mean to have gas_oracle changes in this PR? | ethereum-optimism-optimism | go |
@@ -1,6 +1,5 @@
-<%= render Blacklight::System::ModalComponent.new do |component| %>
- <% component.title { t('blacklight.email.form.title') } %>
-
- <%= render partial: '/shared/flash_msg' %>
- <span data-blacklight-modal="close"></span>
-<% end %>
+<turbo-stream action="append" target="main-flashes">
+ <template>
+ <%= render '/shared/flash_msg' %>
+ </template>
+</turbo-stream> | 1 | <%= render Blacklight::System::ModalComponent.new do |component| %>
<% component.title { t('blacklight.email.form.title') } %>
<%= render partial: '/shared/flash_msg' %>
<span data-blacklight-modal="close"></span>
<% end %>
| 1 | 8,954 | Is this effectively requiring browsers support javascript? | projectblacklight-blacklight | rb |
@@ -61,10 +61,11 @@ public class FingerprintAuthDialogFragment extends DialogFragment {
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
+ Boolean isDarkTheme = SalesforceSDKManager.getInstance().isDarkTheme(getActivity());
// Do not create a new Fragment when the Activity is re-created such as orientation changes.
setRetainInstance(true);
- setStyle(DialogFragment.STYLE_NORMAL, android.R.style.Theme_Material_Light_Dialog);
+ setStyle(DialogFragment.STYLE_NORMAL, (isDarkTheme ? R.style.SalesforceSDK_Dialog_Dark : R.style.SalesforceSDK_Dialog));
}
@Override | 1 | /*
* Copyright (c) 2016-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import android.Manifest.permission;
import android.app.Dialog;
import android.app.DialogFragment;
import android.content.Context;
import android.content.pm.PackageManager;
import android.graphics.Color;
import android.hardware.fingerprint.FingerprintManager;
import android.hardware.fingerprint.FingerprintManager.AuthenticationCallback;
import android.hardware.fingerprint.FingerprintManager.AuthenticationResult;
import android.hardware.fingerprint.FingerprintManager.CryptoObject;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.view.Window;
import android.widget.Button;
import android.widget.TextView;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import javax.crypto.Cipher;
/**
* A dialog which uses Fingerprint APIs to authenticate the user, and falls back to password
* authentication if fingerprint is not available.
*/
public class FingerprintAuthDialogFragment extends DialogFragment {
private TextView mStatusText;
private PasscodeActivity mContext;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// Do not create a new Fragment when the Activity is re-created such as orientation changes.
setRetainInstance(true);
setStyle(DialogFragment.STYLE_NORMAL, android.R.style.Theme_Material_Light_Dialog);
}
@Override
public void onResume() {
super.onResume();
FingerprintManager fingerprintManager = (FingerprintManager) mContext.getSystemService(Context.FINGERPRINT_SERVICE);
if (mContext.checkSelfPermission(permission.USE_FINGERPRINT) != PackageManager.PERMISSION_GRANTED) {
// If we got so far, we already got the permission in the PasscodeActivity. This is an OS mandated check.
return;
}
fingerprintManager.authenticate(new CryptoObject((Cipher) null), null, 0, new AuthenticationCallback() {
@Override
public void onAuthenticationError(int errorCode, CharSequence errString) {
super.onAuthenticationError(errorCode, errString);
}
@Override
public void onAuthenticationSucceeded(AuthenticationResult result) {
super.onAuthenticationSucceeded(result);
if (mStatusText != null) {
mStatusText.setText(R.string.sf__fingerprint_success);
mStatusText.setTextColor(Color.GREEN);
}
if (FingerprintAuthDialogFragment.this.getFragmentManager() != null) {
FingerprintAuthDialogFragment.this.dismiss();
}
mContext.unlockViaFingerprintScan();
}
@Override
public void onAuthenticationFailed() {
super.onAuthenticationFailed();
if (mStatusText != null) {
mStatusText.setText(R.string.sf__fingerprint_failed);
mStatusText.setTextColor(Color.RED);
}
}
@Override
public void onAuthenticationHelp(int helpCode, CharSequence helpString) {
super.onAuthenticationHelp(helpCode, helpString);
if (mStatusText != null) {
mStatusText.setText(helpString.toString());
mStatusText.setTextColor(Color.RED);
}
}
}, null);
}
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
final Dialog dialog = super.onCreateDialog(savedInstanceState);
dialog.getWindow().requestFeature(Window.FEATURE_NO_TITLE);
return dialog;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
final View v = inflater.inflate(R.layout.sf__fingerprint_dialog, container, false);
final Button cancelButton = v.findViewById(R.id.sf__use_password_button);
cancelButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dismiss();
mContext.biometricDeclined();
}
});
mStatusText = v.findViewById(R.id.sf__fingerprint_status);
TextView textView = v.findViewById(R.id.sf__fingerprint_description);
textView.setText(getString(R.string.sf__fingerprint_description, SalesforceSDKManager.getInstance().provideAppName()));
getDialog().setCanceledOnTouchOutside(false);
return v;
}
public void setContext(PasscodeActivity ctx) {
mContext = ctx;
}
}
| 1 | 17,594 | Lowercase `boolean` - use the primitive type. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -174,7 +174,12 @@ namespace Microsoft.DotNet.Build.Tasks.Feed
}
else
{
- Log.LogError($"Item '{item}' already exists in {relativeBlobPath}.");
+ bool blobExists = await feed.CheckIfBlobExists(relativeBlobPath);
+
+ if (blobExists)
+ {
+ Log.LogError($"Item '{item}' already exists in {relativeBlobPath}.");
+ }
}
}
catch (Exception exc) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.DotNet.Build.CloudTestTasks;
using Microsoft.WindowsAzure.Storage;
using Newtonsoft.Json.Linq;
using Sleet;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using System.Threading;
using System.Threading.Tasks;
using MSBuild = Microsoft.Build.Utilities;
using CloudTestTasks = Microsoft.DotNet.Build.CloudTestTasks;
namespace Microsoft.DotNet.Build.Tasks.Feed
{
sealed class BlobFeedAction
{
private MSBuild.TaskLoggingHelper Log;
private static readonly CancellationTokenSource TokenSource = new CancellationTokenSource();
private static readonly CancellationToken CancellationToken = TokenSource.Token;
private const string feedRegex = @"(?<feedurl>https:\/\/(?<accountname>[^\.-]+)(?<domain>[^\/]*)\/((?<token>[a-zA-Z0-9+\/]*?\/\d{4}-\d{2}-\d{2})\/)?(?<containername>[^\/]+)\/(?<relativepath>.*\/)?)index\.json";
private string feedUrl;
private SleetSource source;
private int retries;
private int delay;
public BlobFeed feed;
public BlobFeedAction(string expectedFeedUrl, string accountKey, MSBuild.TaskLoggingHelper Log, int retryAttempts, int retryDelay)
{
this.Log = Log;
Match m = Regex.Match(expectedFeedUrl, feedRegex);
if (m.Success)
{
string accountName = m.Groups["accountname"].Value;
string containerName = m.Groups["containername"].Value;
string relativePath = m.Groups["relativepath"].Value;
feed = new BlobFeed(accountName, accountKey, containerName, relativePath, Log);
feedUrl = m.Groups["feedurl"].Value;
retries = retryAttempts;
delay = retryDelay;
source = new SleetSource
{
Name = feed.ContainerName,
Type = "azure",
Path = feedUrl,
Container = feed.ContainerName,
FeedSubPath = feed.RelativePath,
ConnectionString = $"DefaultEndpointsProtocol=https;AccountName={feed.AccountName};AccountKey={feed.AccountKey};EndpointSuffix=core.windows.net"
};
}
else
{
throw new Exception("Unable to parse expected feed. Please check ExpectedFeedUrl.");
}
}
public async Task<bool> PushToFeed(IEnumerable<string> items, bool allowOverwrite = false)
{
if (IsSanityChecked(items))
{
if (CancellationToken.IsCancellationRequested)
{
Log.LogError("Task PushToFeed cancelled");
CancellationToken.ThrowIfCancellationRequested();
}
await PushItemsToFeedAsync(items, allowOverwrite);
}
return !Log.HasLoggedErrors;
}
public async Task<bool> PushItemsToFeedAsync(IEnumerable<string> items, bool allowOverwrite)
{
Log.LogMessage(MessageImportance.Low, $"START pushing items to feed");
Random rnd = new Random();
try
{
// In case the first Push attempt fails with an InvalidOperationException we Init the feed and retry the Push command once.
// We also retry in case Sleet is not able to get a lock on the feed since it does not retry in this case.
for (int i = 0; i < retries; i++)
{
bool requiresInit = false;
try
{
bool result = await PushAsync(items.ToList(), allowOverwrite);
return result;
}
catch (InvalidOperationException ex) when (ex.Message.Contains("init"))
{
Log.LogWarning($"Sub-feed {source.FeedSubPath} has not been initialized. Initializing now...");
requiresInit = true;
}
catch (InvalidOperationException ex) when (ex.Message.Contains("Unable to obtain a lock on the feed."))
{
Log.LogWarning($"Sleet was not able to get a lock on the feed. Sleeping {delay} seconds and retrying.");
// Pushing packages might take more than just 60 seconds, so on each iteration we multiply the defined delay to a random factor
// Using the defaults this could range from 30 seconds to 12.5 minutes.
await Task.Delay(TimeSpan.FromSeconds(rnd.Next(1, 5) * delay));
}
// If the feed has not been Init'ed this will be caught in the first iteration
if (requiresInit && i == 0)
{
// We are piggybacking on this retry so we don't have another one but in case a Init is required we do i-- so we retry the full amount
// of retries defined not one less
i--;
bool result = await InitAsync();
if (result)
{
Log.LogMessage($"Initializing sub-feed {source.FeedSubPath} succeeded!");
}
else
{
Log.LogError($"Initializing sub-feed {source.FeedSubPath} failed!");
return false;
}
}
}
Log.LogError($"Pushing packages to sub-feed {source.FeedSubPath} failed!");
return false;
}
catch (Exception e)
{
Log.LogErrorFromException(e);
}
return !Log.HasLoggedErrors;
}
public async Task UploadAssets(ITaskItem item, SemaphoreSlim clientThrottle, bool allowOverwrite = false)
{
string relativeBlobPath = item.GetMetadata("RelativeBlobPath");
if (string.IsNullOrEmpty(relativeBlobPath))
{
string fileName = Path.GetFileName(item.ItemSpec);
string recursiveDir = item.GetMetadata("RecursiveDir");
relativeBlobPath = $"{feed.RelativePath}{recursiveDir}{fileName}";
}
relativeBlobPath = relativeBlobPath.Replace("\\", "/");
Log.LogMessage($"Uploading {relativeBlobPath}");
await clientThrottle.WaitAsync();
try
{
if (allowOverwrite)
{
Log.LogMessage($"Uploading {item} to {relativeBlobPath}.");
UploadClient uploadClient = new UploadClient(Log);
await uploadClient.UploadBlockBlobAsync(
CancellationToken,
feed.AccountName,
feed.AccountKey,
feed.ContainerName,
item.ItemSpec,
relativeBlobPath);
}
else
{
Log.LogError($"Item '{item}' already exists in {relativeBlobPath}.");
}
}
catch (Exception exc)
{
Log.LogError($"Unable to upload to {relativeBlobPath} due to {exc}.");
throw;
}
finally
{
clientThrottle.Release();
}
}
public async Task CreateContainerAsync(IBuildEngine buildEngine)
{
CreateAzureContainer createContainer = new CreateAzureContainer
{
AccountKey = feed.AccountKey,
AccountName = feed.AccountName,
ContainerName = feed.ContainerName,
FailIfExists = false,
BuildEngine = buildEngine
};
await createContainer.ExecuteAsync();
}
private bool IsSanityChecked(IEnumerable<string> items)
{
Log.LogMessage(MessageImportance.Low, $"START checking sanitized items for feed");
foreach (var item in items)
{
if (items.Any(s => Path.GetExtension(item) != ".nupkg"))
{
Log.LogError($"{item} is not a nupkg");
return false;
}
}
List<string> duplicates = items.GroupBy(x => x)
.Where(group => group.Count() > 1)
.Select(group => group.Key).ToList();
if (duplicates.Count > 0)
{
Log.LogError($"Duplicates found: {duplicates}");
return false;
}
Log.LogMessage(MessageImportance.Low, $"DONE checking for sanitized items for feed");
return true;
}
private LocalSettings GetSettings()
{
SleetSettings sleetSettings = new SleetSettings()
{
Sources = new List<SleetSource>
{
source
}
};
LocalSettings settings = new LocalSettings
{
Json = JObject.FromObject(sleetSettings)
};
return settings;
}
private AzureFileSystem GetAzureFileSystem()
{
CloudStorageAccount storageAccount = CloudStorageAccount.Parse(source.ConnectionString);
AzureFileSystem fileSystem = new AzureFileSystem(new LocalCache(), new Uri(source.Path), new Uri(source.Path), storageAccount, source.Name, source.FeedSubPath);
return fileSystem;
}
private async Task<bool> PushAsync(IEnumerable<string> items, bool allowOverwrite)
{
LocalSettings settings = GetSettings();
AzureFileSystem fileSystem = GetAzureFileSystem();
bool result = await PushCommand.RunAsync(settings, fileSystem, items.ToList(), allowOverwrite, !allowOverwrite, new SleetLogger(Log));
return result;
}
private async Task<bool> InitAsync()
{
LocalSettings settings = GetSettings();
AzureFileSystem fileSystem = GetAzureFileSystem();
bool result = await InitCommand.RunAsync(settings, fileSystem, true, true, new SleetLogger(Log), CancellationToken);
return result;
}
}
}
| 1 | 14,040 | This isn't the correct logic. if allowOverride == true and !blobExists then upload. Just don't do the exist check if allowOverride is set to true. | dotnet-buildtools | .cs |
@@ -480,12 +480,15 @@ static void read_surface_net_wm_state(struct wlr_xwm *xwm,
xsurface->fullscreen = 0;
xcb_atom_t *atom = xcb_get_property_value(reply);
for (uint32_t i = 0; i < reply->value_len; i++) {
- if (atom[i] == xwm->atoms[_NET_WM_STATE_FULLSCREEN])
+ if (atom[i] == xwm->atoms[_NET_WM_STATE_FULLSCREEN]) {
xsurface->fullscreen = true;
- if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT])
+ }
+ if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT]) {
xsurface->maximized_vert = true;
- if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ])
+ }
+ if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ]) {
xsurface->maximized_horz = true;
+ }
}
}
| 1 | #ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200809L
#endif
#include <stdlib.h>
#include <unistd.h>
#include <xcb/composite.h>
#include <xcb/xfixes.h>
#include <xcb/xcb_image.h>
#include <xcb/render.h>
#include <wlr/config.h>
#include "wlr/util/log.h"
#include "wlr/util/edges.h"
#include "wlr/types/wlr_surface.h"
#include "wlr/xwayland.h"
#include "wlr/xcursor.h"
#include "wlr/xwm.h"
#ifdef WLR_HAS_XCB_ICCCM
#include <xcb/xcb_icccm.h>
#endif
const char *atom_map[ATOM_LAST] = {
"WL_SURFACE_ID",
"WM_DELETE_WINDOW",
"WM_PROTOCOLS",
"WM_HINTS",
"WM_NORMAL_HINTS",
"WM_SIZE_HINTS",
"_MOTIF_WM_HINTS",
"UTF8_STRING",
"WM_S0",
"_NET_SUPPORTED",
"_NET_WM_S0",
"_NET_WM_PID",
"_NET_WM_NAME",
"_NET_WM_STATE",
"_NET_WM_WINDOW_TYPE",
"WM_TAKE_FOCUS",
"WINDOW",
"_NET_ACTIVE_WINDOW",
"_NET_WM_MOVERESIZE",
"_NET_WM_NAME",
"_NET_SUPPORTING_WM_CHECK",
"_NET_WM_STATE_FULLSCREEN",
"_NET_WM_STATE_MAXIMIZED_VERT",
"_NET_WM_STATE_MAXIMIZED_HORZ",
"WM_STATE",
"CLIPBOARD",
"PRIMARY",
"_WL_SELECTION",
"TARGETS",
"CLIPBOARD_MANAGER",
"INCR",
"TEXT",
"TIMESTAMP",
};
/* General helpers */
// TODO: replace this with hash table?
static struct wlr_xwayland_surface *lookup_surface(struct wlr_xwm *xwm,
xcb_window_t window_id) {
struct wlr_xwayland_surface *surface;
wl_list_for_each(surface, &xwm->surfaces, link) {
if (surface->window_id == window_id) {
return surface;
}
}
return NULL;
}
static struct wlr_xwayland_surface *wlr_xwayland_surface_create(
struct wlr_xwm *xwm, xcb_window_t window_id, int16_t x, int16_t y,
uint16_t width, uint16_t height, bool override_redirect) {
struct wlr_xwayland_surface *surface =
calloc(1, sizeof(struct wlr_xwayland_surface));
if (!surface) {
wlr_log(L_ERROR, "Could not allocate wlr xwayland surface");
return NULL;
}
xcb_get_geometry_cookie_t geometry_cookie =
xcb_get_geometry(xwm->xcb_conn, window_id);
uint32_t values[1];
values[0] =
XCB_EVENT_MASK_FOCUS_CHANGE |
XCB_EVENT_MASK_PROPERTY_CHANGE;
xcb_change_window_attributes(xwm->xcb_conn, window_id,
XCB_CW_EVENT_MASK, &values);
surface->xwm = xwm;
surface->window_id = window_id;
surface->x = x;
surface->y = y;
surface->width = width;
surface->height = height;
surface->override_redirect = override_redirect;
wl_list_insert(&xwm->surfaces, &surface->link);
wl_list_init(&surface->children);
wl_list_init(&surface->parent_link);
wl_signal_init(&surface->events.destroy);
wl_signal_init(&surface->events.request_configure);
wl_signal_init(&surface->events.request_move);
wl_signal_init(&surface->events.request_resize);
wl_signal_init(&surface->events.request_maximize);
wl_signal_init(&surface->events.request_fullscreen);
wl_signal_init(&surface->events.map_notify);
wl_signal_init(&surface->events.unmap_notify);
wl_signal_init(&surface->events.set_class);
wl_signal_init(&surface->events.set_title);
wl_signal_init(&surface->events.set_parent);
wl_signal_init(&surface->events.set_pid);
wl_signal_init(&surface->events.set_window_type);
xcb_get_geometry_reply_t *geometry_reply =
xcb_get_geometry_reply(xwm->xcb_conn, geometry_cookie, NULL);
if (geometry_reply != NULL) {
surface->has_alpha = geometry_reply->depth == 32;
}
free(geometry_reply);
return surface;
}
static void xwm_set_net_active_window(struct wlr_xwm *xwm,
xcb_window_t window) {
xcb_change_property(xwm->xcb_conn, XCB_PROP_MODE_REPLACE,
xwm->screen->root, xwm->atoms[_NET_ACTIVE_WINDOW],
xwm->atoms[WINDOW], 32, 1, &window);
}
static void xwm_send_focus_window(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface) {
if (!xsurface) {
xcb_set_input_focus_checked(xwm->xcb_conn,
XCB_INPUT_FOCUS_POINTER_ROOT,
XCB_NONE, XCB_CURRENT_TIME);
return;
} else if (xsurface->override_redirect) {
return;
}
xcb_client_message_event_t client_message;
client_message.response_type = XCB_CLIENT_MESSAGE;
client_message.format = 32;
client_message.window = xsurface->window_id;
client_message.type = xwm->atoms[WM_PROTOCOLS];
client_message.data.data32[0] = xwm->atoms[WM_TAKE_FOCUS];
client_message.data.data32[1] = XCB_TIME_CURRENT_TIME;
xcb_send_event(xwm->xcb_conn, 0, xsurface->window_id,
XCB_EVENT_MASK_SUBSTRUCTURE_REDIRECT, (char*)&client_message);
xcb_set_input_focus(xwm->xcb_conn, XCB_INPUT_FOCUS_POINTER_ROOT,
xsurface->window_id, XCB_CURRENT_TIME);
uint32_t values[1];
values[0] = XCB_STACK_MODE_ABOVE;
xcb_configure_window(xwm->xcb_conn, xsurface->window_id,
XCB_CONFIG_WINDOW_STACK_MODE, values);
}
void xwm_surface_activate(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface) {
if (xwm->focus_surface == xsurface ||
(xsurface && xsurface->override_redirect)) {
return;
}
if (xsurface) {
xwm_set_net_active_window(xwm, xsurface->window_id);
} else {
xwm_set_net_active_window(xwm, XCB_WINDOW_NONE);
}
xwm_send_focus_window(xwm, xsurface);
xwm->focus_surface = xsurface;
xcb_flush(xwm->xcb_conn);
}
static void xsurface_set_net_wm_state(struct wlr_xwayland_surface *xsurface) {
struct wlr_xwm *xwm = xsurface->xwm;
uint32_t property[3];
int i;
i = 0;
if (xsurface->fullscreen) {
property[i++] = xwm->atoms[_NET_WM_STATE_FULLSCREEN];
}
if (xsurface->maximized_vert) {
property[i++] = xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT];
}
if (xsurface->maximized_horz) {
property[i++] = xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ];
}
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xsurface->window_id,
xwm->atoms[NET_WM_STATE],
XCB_ATOM_ATOM,
32, // format
i, property);
}
static void wlr_xwayland_surface_destroy(
struct wlr_xwayland_surface *xsurface) {
wl_signal_emit(&xsurface->events.destroy, xsurface);
if (xsurface == xsurface->xwm->focus_surface) {
xwm_surface_activate(xsurface->xwm, NULL);
}
wl_list_remove(&xsurface->link);
wl_list_remove(&xsurface->parent_link);
if (xsurface->surface_id) {
wl_list_remove(&xsurface->unpaired_link);
}
if (xsurface->surface) {
wl_list_remove(&xsurface->surface_destroy.link);
wlr_surface_set_role_committed(xsurface->surface, NULL, NULL);
}
free(xsurface->title);
free(xsurface->class);
free(xsurface->instance);
free(xsurface->window_type);
free(xsurface->protocols);
free(xsurface->hints);
free(xsurface->size_hints);
free(xsurface);
}
static void read_surface_class(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *surface, xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_STRING &&
reply->type != xwm->atoms[UTF8_STRING]) {
return;
}
size_t len = xcb_get_property_value_length(reply);
char *class = xcb_get_property_value(reply);
// Unpack two sequentially stored strings: instance, class
size_t instance_len = strnlen(class, len);
free(surface->instance);
if (len > 0 && instance_len < len) {
surface->instance = strndup(class, instance_len);
class += instance_len + 1;
} else {
surface->instance = NULL;
}
free(surface->class);
if (len > 0) {
surface->class = strndup(class, len);
} else {
surface->class = NULL;
}
wlr_log(L_DEBUG, "XCB_ATOM_WM_CLASS: %s %s", surface->instance,
surface->class);
wl_signal_emit(&surface->events.set_class, surface);
}
static void read_surface_title(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_STRING &&
reply->type != xwm->atoms[UTF8_STRING]) {
return;
}
// TODO: if reply->type == XCB_ATOM_STRING, uses latin1 encoding
// if reply->type == xwm->atoms[UTF8_STRING], uses utf8 encoding
size_t len = xcb_get_property_value_length(reply);
char *title = xcb_get_property_value(reply);
free(xsurface->title);
if (len > 0) {
xsurface->title = strndup(title, len);
} else {
xsurface->title = NULL;
}
wlr_log(L_DEBUG, "XCB_ATOM_WM_NAME: %s", xsurface->title);
wl_signal_emit(&xsurface->events.set_title, xsurface);
}
static void read_surface_parent(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_WINDOW) {
return;
}
xcb_window_t *xid = xcb_get_property_value(reply);
if (xid != NULL) {
xsurface->parent = lookup_surface(xwm, *xid);
} else {
xsurface->parent = NULL;
}
wl_list_remove(&xsurface->parent_link);
if (xsurface->parent != NULL) {
wl_list_insert(&xsurface->parent->children, &xsurface->parent_link);
} else {
wl_list_init(&xsurface->parent_link);
}
wlr_log(L_DEBUG, "XCB_ATOM_WM_TRANSIENT_FOR: %p", xid);
wl_signal_emit(&xsurface->events.set_parent, xsurface);
}
static void read_surface_pid(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_CARDINAL) {
return;
}
pid_t *pid = xcb_get_property_value(reply);
xsurface->pid = *pid;
wlr_log(L_DEBUG, "NET_WM_PID %d", xsurface->pid);
wl_signal_emit(&xsurface->events.set_pid, xsurface);
}
static void read_surface_window_type(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_ATOM) {
return;
}
xcb_atom_t *atoms = xcb_get_property_value(reply);
size_t atoms_len = reply->value_len;
size_t atoms_size = sizeof(xcb_atom_t) * atoms_len;
free(xsurface->window_type);
xsurface->window_type = malloc(atoms_size);
if (xsurface->window_type == NULL) {
return;
}
memcpy(xsurface->window_type, atoms, atoms_size);
xsurface->window_type_len = atoms_len;
wlr_log(L_DEBUG, "NET_WM_WINDOW_TYPE (%zu)", atoms_len);
wl_signal_emit(&xsurface->events.set_window_type, xsurface);
}
static void read_surface_protocols(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != XCB_ATOM_ATOM) {
return;
}
xcb_atom_t *atoms = xcb_get_property_value(reply);
size_t atoms_len = reply->value_len;
size_t atoms_size = sizeof(xcb_atom_t) * atoms_len;
free(xsurface->protocols);
xsurface->protocols = malloc(atoms_size);
if (xsurface->protocols == NULL) {
return;
}
memcpy(xsurface->protocols, atoms, atoms_size);
xsurface->protocols_len = atoms_len;
wlr_log(L_DEBUG, "WM_PROTOCOLS (%zu)", atoms_len);
}
#ifdef WLR_HAS_XCB_ICCCM
static void read_surface_hints(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
// According to the docs, reply->type == xwm->atoms[WM_HINTS]
// In practice, reply->type == XCB_ATOM_ATOM
if (reply->value_len == 0) {
return;
}
xcb_icccm_wm_hints_t hints;
xcb_icccm_get_wm_hints_from_reply(&hints, reply);
free(xsurface->hints);
xsurface->hints = calloc(1, sizeof(struct wlr_xwayland_surface_hints));
if (xsurface->hints == NULL) {
return;
}
memcpy(xsurface->hints, &hints, sizeof(struct wlr_xwayland_surface_hints));
xsurface->hints_urgency = xcb_icccm_wm_hints_get_urgency(&hints);
wlr_log(L_DEBUG, "WM_HINTS (%d)", reply->value_len);
}
#else
static void read_surface_hints(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
// Do nothing
}
#endif
#ifdef WLR_HAS_XCB_ICCCM
static void read_surface_normal_hints(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->type != xwm->atoms[WM_SIZE_HINTS] || reply->value_len == 0) {
return;
}
xcb_size_hints_t size_hints;
xcb_icccm_get_wm_size_hints_from_reply(&size_hints, reply);
free(xsurface->size_hints);
xsurface->size_hints =
calloc(1, sizeof(struct wlr_xwayland_surface_size_hints));
if (xsurface->size_hints == NULL) {
return;
}
memcpy(xsurface->size_hints, &size_hints,
sizeof(struct wlr_xwayland_surface_size_hints));
wlr_log(L_DEBUG, "WM_NORMAL_HINTS (%d)", reply->value_len);
}
#else
static void read_surface_normal_hints(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
// Do nothing
}
#endif
#define MWM_HINTS_FLAGS_FIELD 0
#define MWM_HINTS_DECORATIONS_FIELD 2
#define MWM_HINTS_DECORATIONS (1 << 1)
#define MWM_DECOR_ALL (1 << 0)
#define MWM_DECOR_BORDER (1 << 1)
#define MWM_DECOR_TITLE (1 << 3)
static void read_surface_motif_hints(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
if (reply->value_len < 5) {
return;
}
uint32_t *motif_hints = xcb_get_property_value(reply);
if (motif_hints[MWM_HINTS_FLAGS_FIELD] & MWM_HINTS_DECORATIONS) {
xsurface->decorations = WLR_XWAYLAND_SURFACE_DECORATIONS_ALL;
uint32_t decorations = motif_hints[MWM_HINTS_DECORATIONS_FIELD];
if ((decorations & MWM_DECOR_ALL) == 0) {
if ((decorations & MWM_DECOR_BORDER) == 0) {
xsurface->decorations |=
WLR_XWAYLAND_SURFACE_DECORATIONS_NO_BORDER;
}
if ((decorations & MWM_DECOR_TITLE) == 0) {
xsurface->decorations |=
WLR_XWAYLAND_SURFACE_DECORATIONS_NO_TITLE;
}
}
}
wlr_log(L_DEBUG, "MOTIF_WM_HINTS (%d)", reply->value_len);
}
static void read_surface_net_wm_state(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
xcb_get_property_reply_t *reply) {
xsurface->fullscreen = 0;
xcb_atom_t *atom = xcb_get_property_value(reply);
for (uint32_t i = 0; i < reply->value_len; i++) {
if (atom[i] == xwm->atoms[_NET_WM_STATE_FULLSCREEN])
xsurface->fullscreen = true;
if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT])
xsurface->maximized_vert = true;
if (atom[i] == xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ])
xsurface->maximized_horz = true;
}
}
static void read_surface_property(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface, xcb_atom_t property) {
xcb_get_property_cookie_t cookie = xcb_get_property(xwm->xcb_conn, 0,
xsurface->window_id, property, XCB_ATOM_ANY, 0, 2048);
xcb_get_property_reply_t *reply = xcb_get_property_reply(xwm->xcb_conn,
cookie, NULL);
if (reply == NULL) {
return;
}
if (property == XCB_ATOM_WM_CLASS) {
read_surface_class(xwm, xsurface, reply);
} else if (property == XCB_ATOM_WM_NAME ||
property == xwm->atoms[NET_WM_NAME]) {
read_surface_title(xwm, xsurface, reply);
} else if (property == XCB_ATOM_WM_TRANSIENT_FOR) {
read_surface_parent(xwm, xsurface, reply);
} else if (property == xwm->atoms[NET_WM_PID]) {
read_surface_pid(xwm, xsurface, reply);
} else if (property == xwm->atoms[NET_WM_WINDOW_TYPE]) {
read_surface_window_type(xwm, xsurface, reply);
} else if (property == xwm->atoms[WM_PROTOCOLS]) {
read_surface_protocols(xwm, xsurface, reply);
} else if (property == xwm->atoms[NET_WM_STATE]) {
read_surface_net_wm_state(xwm, xsurface, reply);
} else if (property == xwm->atoms[WM_HINTS]) {
read_surface_hints(xwm, xsurface, reply);
} else if (property == xwm->atoms[WM_NORMAL_HINTS]) {
read_surface_normal_hints(xwm, xsurface, reply);
} else if (property == xwm->atoms[MOTIF_WM_HINTS]) {
read_surface_motif_hints(xwm, xsurface, reply);
} else {
wlr_log(L_DEBUG, "unhandled x11 property %u", property);
}
free(reply);
}
static void handle_surface_commit(struct wlr_surface *wlr_surface,
void *role_data) {
struct wlr_xwayland_surface *xsurface = role_data;
if (!xsurface->added &&
wlr_surface_has_buffer(xsurface->surface) &&
xsurface->mapped) {
wl_signal_emit(&xsurface->xwm->xwayland->events.new_surface, xsurface);
xsurface->added = true;
}
}
static void handle_surface_destroy(struct wl_listener *listener, void *data) {
struct wlr_xwayland_surface *xsurface =
wl_container_of(listener, xsurface, surface_destroy);
xsurface->surface = NULL;
// TODO destroy xwayland surface?
}
static void xwm_map_shell_surface(struct wlr_xwm *xwm,
struct wlr_xwayland_surface *xsurface,
struct wlr_surface *surface) {
xsurface->surface = surface;
// read all surface properties
const xcb_atom_t props[] = {
XCB_ATOM_WM_CLASS,
XCB_ATOM_WM_NAME,
XCB_ATOM_WM_TRANSIENT_FOR,
xwm->atoms[WM_PROTOCOLS],
xwm->atoms[WM_HINTS],
xwm->atoms[WM_NORMAL_HINTS],
xwm->atoms[MOTIF_WM_HINTS],
xwm->atoms[NET_WM_STATE],
xwm->atoms[NET_WM_WINDOW_TYPE],
xwm->atoms[NET_WM_NAME],
xwm->atoms[NET_WM_PID],
};
for (size_t i = 0; i < sizeof(props)/sizeof(xcb_atom_t); i++) {
read_surface_property(xwm, xsurface, props[i]);
}
wlr_surface_set_role_committed(xsurface->surface, handle_surface_commit,
xsurface);
xsurface->surface_destroy.notify = handle_surface_destroy;
wl_signal_add(&surface->events.destroy, &xsurface->surface_destroy);
xsurface->mapped = true;
wl_signal_emit(&xsurface->events.map_notify, xsurface);
}
static void xwm_handle_create_notify(struct wlr_xwm *xwm,
xcb_create_notify_event_t *ev) {
wlr_log(L_DEBUG, "XCB_CREATE_NOTIFY (%u)", ev->window);
wlr_xwayland_surface_create(xwm, ev->window, ev->x, ev->y,
ev->width, ev->height, ev->override_redirect);
}
static void xwm_handle_destroy_notify(struct wlr_xwm *xwm,
xcb_destroy_notify_event_t *ev) {
wlr_log(L_DEBUG, "XCB_DESTROY_NOTIFY (%u)", ev->window);
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (xsurface == NULL) {
return;
}
wlr_xwayland_surface_destroy(xsurface);
}
static void xwm_handle_configure_request(struct wlr_xwm *xwm,
xcb_configure_request_event_t *ev) {
wlr_log(L_DEBUG, "XCB_CONFIGURE_REQUEST (%u) [%ux%u+%d,%d]", ev->window,
ev->width, ev->height, ev->x, ev->y);
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (xsurface == NULL) {
return;
}
// TODO: handle ev->{parent,sibling}?
if (xsurface->surface == NULL) {
// Surface has not been mapped yet
wlr_xwayland_surface_configure(xsurface, ev->x, ev->y,
ev->width, ev->height);
} else {
struct wlr_xwayland_surface_configure_event *wlr_event =
calloc(1, sizeof(struct wlr_xwayland_surface_configure_event));
if (wlr_event == NULL) {
return;
}
wlr_event->surface = xsurface;
wlr_event->x = ev->x;
wlr_event->y = ev->y;
wlr_event->width = ev->width;
wlr_event->height = ev->height;
wl_signal_emit(&xsurface->events.request_configure, wlr_event);
free(wlr_event);
}
}
static void xwm_handle_configure_notify(struct wlr_xwm *xwm,
xcb_configure_notify_event_t *ev) {
struct wlr_xwayland_surface *xsurface =
lookup_surface(xwm, ev->window);
if (!xsurface) {
return;
}
xsurface->x = ev->x;
xsurface->y = ev->y;
xsurface->width = ev->width;
xsurface->height = ev->height;
}
#define ICCCM_WITHDRAWN_STATE 0
#define ICCCM_NORMAL_STATE 1
#define ICCCM_ICONIC_STATE 3
static void xsurface_set_wm_state(struct wlr_xwayland_surface *xsurface,
int32_t state) {
struct wlr_xwm *xwm = xsurface->xwm;
uint32_t property[2];
property[0] = state;
property[1] = XCB_WINDOW_NONE;
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xsurface->window_id,
xwm->atoms[WM_STATE],
xwm->atoms[WM_STATE],
32, // format
2, property);
}
static void xwm_handle_map_request(struct wlr_xwm *xwm,
xcb_map_request_event_t *ev) {
wlr_log(L_DEBUG, "XCB_MAP_REQUEST (%u)", ev->window);
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (!xsurface) {
return;
}
xsurface_set_wm_state(xsurface, ICCCM_NORMAL_STATE);
xsurface_set_net_wm_state(xsurface);
xcb_map_window(xwm->xcb_conn, ev->window);
}
static void xwm_handle_map_notify(struct wlr_xwm *xwm,
xcb_map_notify_event_t *ev) {
wlr_log(L_DEBUG, "XCB_MAP_NOTIFY (%u)", ev->window);
}
static void xwm_handle_unmap_notify(struct wlr_xwm *xwm,
xcb_unmap_notify_event_t *ev) {
wlr_log(L_DEBUG, "XCB_UNMAP_NOTIFY (%u)", ev->window);
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (xsurface == NULL) {
return;
}
if (xsurface->surface_id) {
// Make sure we're not on the unpaired surface list or we
// could be assigned a surface during surface creation that
// was mapped before this unmap request.
wl_list_remove(&xsurface->unpaired_link);
xsurface->surface_id = 0;
}
if (xsurface->surface) {
wlr_surface_set_role_committed(xsurface->surface, NULL, NULL);
wl_list_remove(&xsurface->surface_destroy.link);
}
xsurface->surface = NULL;
if (xsurface->mapped) {
xsurface->mapped = false;
wl_signal_emit(&xsurface->events.unmap_notify, xsurface);
}
xsurface_set_wm_state(xsurface, ICCCM_WITHDRAWN_STATE);
}
static void xwm_handle_property_notify(struct wlr_xwm *xwm,
xcb_property_notify_event_t *ev) {
wlr_log(L_DEBUG, "XCB_PROPERTY_NOTIFY (%u)", ev->window);
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (xsurface == NULL) {
return;
}
read_surface_property(xwm, xsurface, ev->atom);
}
static void xwm_handle_surface_id_message(struct wlr_xwm *xwm,
xcb_client_message_event_t *ev) {
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (xsurface == NULL) {
wlr_log(L_DEBUG,
"client message WL_SURFACE_ID but no new window %u ?",
ev->window);
return;
}
/* Check if we got notified after wayland surface create event */
uint32_t id = ev->data.data32[0];
struct wl_resource *resource =
wl_client_get_object(xwm->xwayland->client, id);
if (resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
xsurface->surface_id = 0;
xwm_map_shell_surface(xwm, xsurface, surface);
} else {
xsurface->surface_id = id;
wl_list_insert(&xwm->unpaired_surfaces, &xsurface->unpaired_link);
}
}
#define _NET_WM_MOVERESIZE_SIZE_TOPLEFT 0
#define _NET_WM_MOVERESIZE_SIZE_TOP 1
#define _NET_WM_MOVERESIZE_SIZE_TOPRIGHT 2
#define _NET_WM_MOVERESIZE_SIZE_RIGHT 3
#define _NET_WM_MOVERESIZE_SIZE_BOTTOMRIGHT 4
#define _NET_WM_MOVERESIZE_SIZE_BOTTOM 5
#define _NET_WM_MOVERESIZE_SIZE_BOTTOMLEFT 6
#define _NET_WM_MOVERESIZE_SIZE_LEFT 7
#define _NET_WM_MOVERESIZE_MOVE 8 // movement only
#define _NET_WM_MOVERESIZE_SIZE_KEYBOARD 9 // size via keyboard
#define _NET_WM_MOVERESIZE_MOVE_KEYBOARD 10 // move via keyboard
#define _NET_WM_MOVERESIZE_CANCEL 11 // cancel operation
static enum wlr_edges net_wm_edges_to_wlr(uint32_t net_wm_edges) {
enum wlr_edges edges = WLR_EDGE_NONE;
switch(net_wm_edges) {
case _NET_WM_MOVERESIZE_SIZE_TOPLEFT:
edges = WLR_EDGE_TOP | WLR_EDGE_LEFT;
break;
case _NET_WM_MOVERESIZE_SIZE_TOP:
edges = WLR_EDGE_TOP;
break;
case _NET_WM_MOVERESIZE_SIZE_TOPRIGHT:
edges = WLR_EDGE_TOP | WLR_EDGE_RIGHT;
break;
case _NET_WM_MOVERESIZE_SIZE_RIGHT:
edges = WLR_EDGE_RIGHT;
break;
case _NET_WM_MOVERESIZE_SIZE_BOTTOMRIGHT:
edges = WLR_EDGE_BOTTOM | WLR_EDGE_RIGHT;
break;
case _NET_WM_MOVERESIZE_SIZE_BOTTOM:
edges = WLR_EDGE_BOTTOM;
break;
case _NET_WM_MOVERESIZE_SIZE_BOTTOMLEFT:
edges = WLR_EDGE_BOTTOM | WLR_EDGE_LEFT;
break;
case _NET_WM_MOVERESIZE_SIZE_LEFT:
edges = WLR_EDGE_LEFT;
break;
default:
break;
}
return edges;
}
static void xwm_handle_net_wm_moveresize_message(struct wlr_xwm *xwm,
xcb_client_message_event_t *ev) {
struct wlr_xwayland_surface *xsurface = lookup_surface(xwm, ev->window);
if (!xsurface) {
return;
}
// TODO: we should probably add input or seat info to this but we would just
// be guessing
struct wlr_xwayland_resize_event resize_event;
struct wlr_xwayland_move_event move_event;
int detail = ev->data.data32[2];
switch (detail) {
case _NET_WM_MOVERESIZE_MOVE:
move_event.surface = xsurface;
wl_signal_emit(&xsurface->events.request_move, &move_event);
break;
case _NET_WM_MOVERESIZE_SIZE_TOPLEFT:
case _NET_WM_MOVERESIZE_SIZE_TOP:
case _NET_WM_MOVERESIZE_SIZE_TOPRIGHT:
case _NET_WM_MOVERESIZE_SIZE_RIGHT:
case _NET_WM_MOVERESIZE_SIZE_BOTTOMRIGHT:
case _NET_WM_MOVERESIZE_SIZE_BOTTOM:
case _NET_WM_MOVERESIZE_SIZE_BOTTOMLEFT:
case _NET_WM_MOVERESIZE_SIZE_LEFT:
resize_event.surface = xsurface;
resize_event.edges = net_wm_edges_to_wlr(detail);
wl_signal_emit(&xsurface->events.request_resize, &resize_event);
break;
case _NET_WM_MOVERESIZE_CANCEL:
// handled by the compositor
break;
}
}
#define _NET_WM_STATE_REMOVE 0
#define _NET_WM_STATE_ADD 1
#define _NET_WM_STATE_TOGGLE 2
static bool update_state(int action, bool *state) {
int new_state, changed;
switch (action) {
case _NET_WM_STATE_REMOVE:
new_state = false;
break;
case _NET_WM_STATE_ADD:
new_state = true;
break;
case _NET_WM_STATE_TOGGLE:
new_state = !*state;
break;
default:
return false;
}
changed = (*state != new_state);
*state = new_state;
return changed;
}
static inline bool xsurface_is_maximized(
struct wlr_xwayland_surface *xsurface) {
return xsurface->maximized_horz && xsurface->maximized_vert;
}
static void xwm_handle_net_wm_state_message(struct wlr_xwm *xwm,
xcb_client_message_event_t *client_message) {
struct wlr_xwayland_surface *xsurface =
lookup_surface(xwm, client_message->window);
if (!xsurface) {
return;
}
if (client_message->format != 32) {
return;
}
bool fullscreen = xsurface->fullscreen;
bool maximized = xsurface_is_maximized(xsurface);
uint32_t action = client_message->data.data32[0];
for (size_t i = 0; i < 2; ++i) {
uint32_t property = client_message->data.data32[1 + i];
if (property == xwm->atoms[_NET_WM_STATE_FULLSCREEN] &&
update_state(action, &xsurface->fullscreen)) {
xsurface_set_net_wm_state(xsurface);
} else if (property == xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT] &&
update_state(action, &xsurface->maximized_vert)) {
xsurface_set_net_wm_state(xsurface);
} else if (property == xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ] &&
update_state(action, &xsurface->maximized_horz)) {
xsurface_set_net_wm_state(xsurface);
}
}
// client_message->data.data32[3] is the source indication
// all other values are set to 0
if (fullscreen != xsurface->fullscreen) {
if (xsurface->fullscreen) {
xsurface->saved_width = xsurface->width;
xsurface->saved_height = xsurface->height;
}
wl_signal_emit(&xsurface->events.request_fullscreen, xsurface);
}
if (maximized != xsurface_is_maximized(xsurface)) {
if (xsurface_is_maximized(xsurface)) {
xsurface->saved_width = xsurface->width;
xsurface->saved_height = xsurface->height;
}
wl_signal_emit(&xsurface->events.request_maximize, xsurface);
}
}
static void xwm_handle_client_message(struct wlr_xwm *xwm,
xcb_client_message_event_t *ev) {
wlr_log(L_DEBUG, "XCB_CLIENT_MESSAGE (%u)", ev->window);
if (ev->type == xwm->atoms[WL_SURFACE_ID]) {
xwm_handle_surface_id_message(xwm, ev);
} else if (ev->type == xwm->atoms[NET_WM_STATE]) {
xwm_handle_net_wm_state_message(xwm, ev);
} else if (ev->type == xwm->atoms[_NET_WM_MOVERESIZE]) {
xwm_handle_net_wm_moveresize_message(xwm, ev);
} else {
wlr_log(L_DEBUG, "unhandled x11 client message %u", ev->type);
}
}
static void xwm_handle_focus_in(struct wlr_xwm *xwm,
xcb_focus_in_event_t *ev) {
// Do not interfere with grabs
if (ev->mode == XCB_NOTIFY_MODE_GRAB ||
ev->mode == XCB_NOTIFY_MODE_UNGRAB) {
return;
}
// Do not let X clients change the focus behind the compositor's
// back. Reset the focus to the old one if it changed.
if (!xwm->focus_surface || ev->event != xwm->focus_surface->window_id) {
xwm_send_focus_window(xwm, xwm->focus_surface);
}
}
/* This is in xcb/xcb_event.h, but pulling xcb-util just for a constant
* others redefine anyway is meh
*/
#define XCB_EVENT_RESPONSE_TYPE_MASK (0x7f)
static int x11_event_handler(int fd, uint32_t mask, void *data) {
int count = 0;
xcb_generic_event_t *event;
struct wlr_xwm *xwm = data;
while ((event = xcb_poll_for_event(xwm->xcb_conn))) {
count++;
if (xwm->xwayland->user_event_handler &&
xwm->xwayland->user_event_handler(xwm, event)) {
break;
}
if (xwm_handle_selection_event(xwm, event)) {
free(event);
continue;
}
switch (event->response_type & XCB_EVENT_RESPONSE_TYPE_MASK) {
case XCB_CREATE_NOTIFY:
xwm_handle_create_notify(xwm, (xcb_create_notify_event_t *)event);
break;
case XCB_DESTROY_NOTIFY:
xwm_handle_destroy_notify(xwm, (xcb_destroy_notify_event_t *)event);
break;
case XCB_CONFIGURE_REQUEST:
xwm_handle_configure_request(xwm,
(xcb_configure_request_event_t *)event);
break;
case XCB_CONFIGURE_NOTIFY:
xwm_handle_configure_notify(xwm,
(xcb_configure_notify_event_t *)event);
break;
case XCB_MAP_REQUEST:
xwm_handle_map_request(xwm, (xcb_map_request_event_t *)event);
break;
case XCB_MAP_NOTIFY:
xwm_handle_map_notify(xwm, (xcb_map_notify_event_t *)event);
break;
case XCB_UNMAP_NOTIFY:
xwm_handle_unmap_notify(xwm, (xcb_unmap_notify_event_t *)event);
break;
case XCB_PROPERTY_NOTIFY:
xwm_handle_property_notify(xwm,
(xcb_property_notify_event_t *)event);
break;
case XCB_CLIENT_MESSAGE:
xwm_handle_client_message(xwm, (xcb_client_message_event_t *)event);
break;
case XCB_FOCUS_IN:
xwm_handle_focus_in(xwm, (xcb_focus_in_event_t *)event);
break;
default:
wlr_log(L_DEBUG, "X11 event: %d",
event->response_type & XCB_EVENT_RESPONSE_TYPE_MASK);
break;
}
free(event);
}
if (count) {
xcb_flush(xwm->xcb_conn);
}
return count;
}
static void handle_compositor_surface_create(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface = data;
struct wlr_xwm *xwm =
wl_container_of(listener, xwm, compositor_surface_create);
if (wl_resource_get_client(surface->resource) != xwm->xwayland->client) {
return;
}
wlr_log(L_DEBUG, "New xwayland surface: %p", surface);
uint32_t surface_id = wl_resource_get_id(surface->resource);
struct wlr_xwayland_surface *xsurface;
wl_list_for_each(xsurface, &xwm->unpaired_surfaces, unpaired_link) {
if (xsurface->surface_id == surface_id) {
xwm_map_shell_surface(xwm, xsurface, surface);
xsurface->surface_id = 0;
wl_list_remove(&xsurface->unpaired_link);
xcb_flush(xwm->xcb_conn);
return;
}
}
}
void wlr_xwayland_surface_activate(struct wlr_xwayland_surface *xsurface,
bool activated) {
struct wlr_xwayland_surface *focused = xsurface->xwm->focus_surface;
if (activated) {
xwm_surface_activate(xsurface->xwm, xsurface);
} else if (focused == xsurface) {
xwm_surface_activate(xsurface->xwm, NULL);
}
}
void wlr_xwayland_surface_configure(struct wlr_xwayland_surface *xsurface,
int16_t x, int16_t y, uint16_t width, uint16_t height) {
xsurface->x = x;
xsurface->y = y;
xsurface->width = width;
xsurface->height = height;
struct wlr_xwm *xwm = xsurface->xwm;
uint32_t mask = XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y |
XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT |
XCB_CONFIG_WINDOW_BORDER_WIDTH;
uint32_t values[] = {x, y, width, height, 0};
xcb_configure_window(xwm->xcb_conn, xsurface->window_id, mask, values);
xcb_flush(xwm->xcb_conn);
}
void wlr_xwayland_surface_close(struct wlr_xwayland_surface *xsurface) {
struct wlr_xwm *xwm = xsurface->xwm;
bool supports_delete = false;
for (size_t i = 0; i < xsurface->protocols_len; i++) {
if (xsurface->protocols[i] == xwm->atoms[WM_DELETE_WINDOW]) {
supports_delete = true;
break;
}
}
if (supports_delete) {
xcb_client_message_event_t ev = {0};
ev.response_type = XCB_CLIENT_MESSAGE;
ev.window = xsurface->window_id;
ev.format = 32;
ev.sequence = 0;
ev.type = xwm->atoms[WM_PROTOCOLS];
ev.data.data32[0] = xwm->atoms[WM_DELETE_WINDOW];
ev.data.data32[1] = XCB_CURRENT_TIME;
xcb_send_event(xwm->xcb_conn, 0,
xsurface->window_id,
XCB_EVENT_MASK_NO_EVENT,
(char *)&ev);
} else {
xcb_kill_client(xwm->xcb_conn, xsurface->window_id);
}
xcb_flush(xwm->xcb_conn);
}
void xwm_destroy(struct wlr_xwm *xwm) {
if (!xwm) {
return;
}
xwm_selection_finish(xwm);
if (xwm->cursor) {
xcb_free_cursor(xwm->xcb_conn, xwm->cursor);
}
if (xwm->colormap) {
xcb_free_colormap(xwm->xcb_conn, xwm->colormap);
}
if (xwm->window) {
xcb_destroy_window(xwm->xcb_conn, xwm->window);
}
if (xwm->event_source) {
wl_event_source_remove(xwm->event_source);
}
struct wlr_xwayland_surface *xsurface, *tmp;
wl_list_for_each_safe(xsurface, tmp, &xwm->surfaces, link) {
wlr_xwayland_surface_destroy(xsurface);
}
wl_list_for_each_safe(xsurface, tmp, &xwm->unpaired_surfaces, link) {
wlr_xwayland_surface_destroy(xsurface);
}
wl_list_remove(&xwm->compositor_surface_create.link);
xcb_disconnect(xwm->xcb_conn);
free(xwm);
}
static void xwm_get_resources(struct wlr_xwm *xwm) {
xcb_prefetch_extension_data(xwm->xcb_conn, &xcb_xfixes_id);
xcb_prefetch_extension_data(xwm->xcb_conn, &xcb_composite_id);
size_t i;
xcb_intern_atom_cookie_t cookies[ATOM_LAST];
for (i = 0; i < ATOM_LAST; i++) {
cookies[i] =
xcb_intern_atom(xwm->xcb_conn, 0, strlen(atom_map[i]), atom_map[i]);
}
for (i = 0; i < ATOM_LAST; i++) {
xcb_generic_error_t *error;
xcb_intern_atom_reply_t *reply =
xcb_intern_atom_reply(xwm->xcb_conn, cookies[i], &error);
if (reply && !error) {
xwm->atoms[i] = reply->atom;
}
free(reply);
if (error) {
wlr_log(L_ERROR, "could not resolve atom %s, x11 error code %d",
atom_map[i], error->error_code);
free(error);
return;
}
}
xwm->xfixes = xcb_get_extension_data(xwm->xcb_conn, &xcb_xfixes_id);
if (!xwm->xfixes || !xwm->xfixes->present) {
wlr_log(L_DEBUG, "xfixes not available");
}
xcb_xfixes_query_version_cookie_t xfixes_cookie;
xcb_xfixes_query_version_reply_t *xfixes_reply;
xfixes_cookie =
xcb_xfixes_query_version(xwm->xcb_conn, XCB_XFIXES_MAJOR_VERSION,
XCB_XFIXES_MINOR_VERSION);
xfixes_reply =
xcb_xfixes_query_version_reply(xwm->xcb_conn, xfixes_cookie, NULL);
wlr_log(L_DEBUG, "xfixes version: %d.%d",
xfixes_reply->major_version, xfixes_reply->minor_version);
free(xfixes_reply);
}
static void xwm_create_wm_window(struct wlr_xwm *xwm) {
static const char name[] = "wlroots wm";
xwm->window = xcb_generate_id(xwm->xcb_conn);
xcb_create_window(xwm->xcb_conn,
XCB_COPY_FROM_PARENT,
xwm->window,
xwm->screen->root,
0, 0,
10, 10,
0,
XCB_WINDOW_CLASS_INPUT_OUTPUT,
xwm->screen->root_visual,
0, NULL);
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xwm->window,
xwm->atoms[_NET_WM_NAME],
xwm->atoms[UTF8_STRING],
8, // format
strlen(name), name);
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xwm->screen->root,
xwm->atoms[_NET_SUPPORTING_WM_CHECK],
XCB_ATOM_WINDOW,
32, // format
1, &xwm->window);
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xwm->window,
xwm->atoms[_NET_SUPPORTING_WM_CHECK],
XCB_ATOM_WINDOW,
32, // format
1, &xwm->window);
xcb_set_selection_owner(xwm->xcb_conn,
xwm->window,
xwm->atoms[WM_S0],
XCB_CURRENT_TIME);
xcb_set_selection_owner(xwm->xcb_conn,
xwm->window,
xwm->atoms[NET_WM_S0],
XCB_CURRENT_TIME);
}
// TODO use me to support 32 bit color somehow
static void xwm_get_visual_and_colormap(struct wlr_xwm *xwm) {
xcb_depth_iterator_t d_iter;
xcb_visualtype_iterator_t vt_iter;
xcb_visualtype_t *visualtype;
d_iter = xcb_screen_allowed_depths_iterator(xwm->screen);
visualtype = NULL;
while (d_iter.rem > 0) {
if (d_iter.data->depth == 32) {
vt_iter = xcb_depth_visuals_iterator(d_iter.data);
visualtype = vt_iter.data;
break;
}
xcb_depth_next(&d_iter);
}
if (visualtype == NULL) {
wlr_log(L_DEBUG, "No 32 bit visualtype\n");
return;
}
xwm->visual_id = visualtype->visual_id;
xwm->colormap = xcb_generate_id(xwm->xcb_conn);
xcb_create_colormap(xwm->xcb_conn,
XCB_COLORMAP_ALLOC_NONE,
xwm->colormap,
xwm->screen->root,
xwm->visual_id);
}
static void xwm_get_render_format(struct wlr_xwm *xwm) {
xcb_render_query_pict_formats_cookie_t cookie =
xcb_render_query_pict_formats(xwm->xcb_conn);
xcb_render_query_pict_formats_reply_t *reply =
xcb_render_query_pict_formats_reply(xwm->xcb_conn, cookie, NULL);
if (!reply) {
wlr_log(L_ERROR, "Did not get any reply from xcb_render_query_pict_formats");
return;
}
xcb_render_pictforminfo_iterator_t iter =
xcb_render_query_pict_formats_formats_iterator(reply);
xcb_render_pictforminfo_t *format = NULL;
while (iter.rem > 0) {
if (iter.data->depth == 32) {
format = iter.data;
break;
}
xcb_render_pictforminfo_next(&iter);
}
if (format == NULL) {
wlr_log(L_DEBUG, "No 32 bit render format");
free(reply);
return;
}
xwm->render_format_id = format->id;
free(reply);
}
void xwm_set_cursor(struct wlr_xwm *xwm, const uint8_t *pixels, uint32_t stride,
uint32_t width, uint32_t height, int32_t hotspot_x, int32_t hotspot_y) {
if (!xwm->render_format_id) {
wlr_log(L_ERROR, "Cannot set xwm cursor: no render format available");
return;
}
if (xwm->cursor) {
xcb_free_cursor(xwm->xcb_conn, xwm->cursor);
}
stride *= 4;
int depth = 32;
xcb_pixmap_t pix = xcb_generate_id(xwm->xcb_conn);
xcb_create_pixmap(xwm->xcb_conn, depth, pix, xwm->screen->root, width,
height);
xcb_render_picture_t pic = xcb_generate_id(xwm->xcb_conn);
xcb_render_create_picture(xwm->xcb_conn, pic, pix, xwm->render_format_id,
0, 0);
xcb_gcontext_t gc = xcb_generate_id(xwm->xcb_conn);
xcb_create_gc(xwm->xcb_conn, gc, pix, 0, NULL);
xcb_put_image(xwm->xcb_conn, XCB_IMAGE_FORMAT_Z_PIXMAP, pix, gc,
width, height, 0, 0, 0, depth, stride * height * sizeof(uint8_t),
pixels);
xcb_free_gc(xwm->xcb_conn, gc);
xwm->cursor = xcb_generate_id(xwm->xcb_conn);
xcb_render_create_cursor(xwm->xcb_conn, xwm->cursor, pic, hotspot_x,
hotspot_y);
xcb_free_pixmap(xwm->xcb_conn, pix);
uint32_t values[] = {xwm->cursor};
xcb_change_window_attributes(xwm->xcb_conn, xwm->screen->root,
XCB_CW_CURSOR, values);
xcb_flush(xwm->xcb_conn);
}
struct wlr_xwm *xwm_create(struct wlr_xwayland *wlr_xwayland) {
struct wlr_xwm *xwm = calloc(1, sizeof(struct wlr_xwm));
if (xwm == NULL) {
return NULL;
}
xwm->xwayland = wlr_xwayland;
wl_list_init(&xwm->surfaces);
wl_list_init(&xwm->unpaired_surfaces);
xwm->xcb_conn = xcb_connect_to_fd(wlr_xwayland->wm_fd[0], NULL);
int rc = xcb_connection_has_error(xwm->xcb_conn);
if (rc) {
wlr_log(L_ERROR, "xcb connect failed: %d", rc);
close(wlr_xwayland->wm_fd[0]);
free(xwm);
return NULL;
}
xcb_screen_iterator_t screen_iterator =
xcb_setup_roots_iterator(xcb_get_setup(xwm->xcb_conn));
xwm->screen = screen_iterator.data;
struct wl_event_loop *event_loop = wl_display_get_event_loop(
wlr_xwayland->wl_display);
xwm->event_source =
wl_event_loop_add_fd(event_loop,
wlr_xwayland->wm_fd[0],
WL_EVENT_READABLE,
x11_event_handler,
xwm);
wl_event_source_check(xwm->event_source);
xwm_get_resources(xwm);
xwm_get_visual_and_colormap(xwm);
xwm_get_render_format(xwm);
uint32_t values[] = {
XCB_EVENT_MASK_SUBSTRUCTURE_NOTIFY |
XCB_EVENT_MASK_SUBSTRUCTURE_REDIRECT |
XCB_EVENT_MASK_PROPERTY_CHANGE,
};
xcb_change_window_attributes(xwm->xcb_conn,
xwm->screen->root,
XCB_CW_EVENT_MASK,
values);
xcb_composite_redirect_subwindows(xwm->xcb_conn,
xwm->screen->root,
XCB_COMPOSITE_REDIRECT_MANUAL);
xcb_atom_t supported[] = {
xwm->atoms[NET_WM_STATE],
xwm->atoms[_NET_ACTIVE_WINDOW],
xwm->atoms[_NET_WM_MOVERESIZE],
xwm->atoms[_NET_WM_STATE_FULLSCREEN],
xwm->atoms[_NET_WM_STATE_MAXIMIZED_VERT],
xwm->atoms[_NET_WM_STATE_MAXIMIZED_HORZ],
};
xcb_change_property(xwm->xcb_conn,
XCB_PROP_MODE_REPLACE,
xwm->screen->root,
xwm->atoms[NET_SUPPORTED],
XCB_ATOM_ATOM,
32,
sizeof(supported)/sizeof(*supported),
supported);
xcb_flush(xwm->xcb_conn);
xwm_set_net_active_window(xwm, XCB_WINDOW_NONE);
xwm_selection_init(xwm);
xwm->compositor_surface_create.notify = handle_compositor_surface_create;
wl_signal_add(&wlr_xwayland->compositor->events.create_surface,
&xwm->compositor_surface_create);
xwm_create_wm_window(xwm);
xcb_flush(xwm->xcb_conn);
return xwm;
}
void wlr_xwayland_surface_set_maximized(struct wlr_xwayland_surface *surface,
bool maximized) {
surface->maximized_horz = maximized;
surface->maximized_vert = maximized;
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
}
void wlr_xwayland_surface_set_fullscreen(struct wlr_xwayland_surface *surface,
bool fullscreen) {
surface->fullscreen = fullscreen;
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
}
| 1 | 9,677 | Can you replace these by `else if` please? | swaywm-wlroots | c |
@@ -0,0 +1,12 @@
+/**
+ * Changes the currently selected date range.
+ *
+ * Currently only identifiable by the option values.
+ *
+ * @param {string} fromRange The currently selected date range.
+ * @param {string} toRange The new date range to select.
+ */
+export async function switchDateRange( fromRange, toRange ) {
+ await expect( page ).toClick( '.mdc-select__selected-text', { text: new RegExp( fromRange, 'i' ) } );
+ await expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: new RegExp( toRange, 'i' ) } );
+} | 1 | 1 | 25,040 | Would be nice if the field had a unique class name that could be used to target it, instead of `fromRange` But doesn't seem to be a common thing in the code base. | google-site-kit-wp | js |
|
@@ -30,7 +30,10 @@ var tableGrid = tableUtils.toGrid(node);
// Look for all the bad headers
var out = headers.reduce(function (res, header) {
- if (header.id && reffedHeaders.indexOf(header.id) !== -1) {
+ if (
+ header.getAttribute('id') &&
+ reffedHeaders.includes(header.getAttribute('id'))
+ ) {
return (!res ? res : true);
}
| 1 | var tableUtils = axe.commons.table;
var cells = tableUtils.getAllCells(node);
var checkResult = this;
// Get a list of all headers reffed to in this rule
var reffedHeaders = [];
cells.forEach(function (cell) {
var headers = cell.getAttribute('headers');
if (headers) {
reffedHeaders = reffedHeaders.concat(headers.split(/\s+/));
}
var ariaLabel = cell.getAttribute('aria-labelledby');
if (ariaLabel) {
reffedHeaders = reffedHeaders.concat(ariaLabel.split(/\s+/));
}
});
// Get all the headers
var headers = cells.filter(function (cell) {
if (axe.commons.text.sanitize(cell.textContent) === '') {
return false;
}
return (cell.nodeName.toUpperCase() === 'TH' ||
['rowheader', 'columnheader'].indexOf(cell.getAttribute('role')) !== -1);
});
var tableGrid = tableUtils.toGrid(node);
// Look for all the bad headers
var out = headers.reduce(function (res, header) {
if (header.id && reffedHeaders.indexOf(header.id) !== -1) {
return (!res ? res : true);
}
var hasCell = false;
var pos = tableUtils.getCellPosition(header, tableGrid);
// Look for any data cells or row headers that this might refer to
if (tableUtils.isColumnHeader(header)) {
hasCell = tableUtils.traverse('down', pos, tableGrid)
.reduce((out, cell) => {
return (out || (
axe.commons.dom.hasContent(cell) &&
!tableUtils.isColumnHeader(cell))
);
}, false);
}
// Look for any data cells or column headers that this might refer to
if (!hasCell && tableUtils.isRowHeader(header)) {
hasCell = tableUtils.traverse('right', pos, tableGrid)
.reduce((out, cell) => {
return out || (
axe.commons.dom.hasContent(cell) &&
!tableUtils.isRowHeader(cell)
);
}, false);
}
// report the node as having failed
if (!hasCell) {
checkResult.relatedNodes(header);
}
return res && hasCell;
}, true);
return out ? true : undefined;
| 1 | 11,198 | Does this code fit on one line under 80 characters? It would be more consistent with our existing style. | dequelabs-axe-core | js |
@@ -61,8 +61,13 @@ func ParseArtifactListFromMultipleYamls(multipleYamls MultiYamlFetcher) (artifac
// installed
func RegisteredArtifactsFor070() (list ArtifactList) {
+ //Note: CRDs have to be installed first. Keep this at top of the list.
+ list.Items = append(list.Items, OpenEBSCRDArtifactsFor070().Items...)
+
list.Items = append(list.Items, JivaVolumeArtifactsFor070().Items...)
//Contains the read/list/delete CAST for supporting older volumes
+ //The CAST defined here are provided as fallback options to corresponding
+ //0.7.0 CAST
list.Items = append(list.Items, JivaVolumeArtifactsFor060().Items...)
list.Items = append(list.Items, JivaPoolArtifactsFor070().Items...)
| 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"strings"
)
// MultiYamlFetcher abstracts aggregating and returning multiple yaml documents
// as a string
type MultiYamlFetcher func() string
// ArtifactListPredicate abstracts evaluating a condition against the provided
// artifact list
type ArtifactListPredicate func() bool
// ParseArtifactListFromMultipleYamlConditional will help in adding a list of yamls that should be installed
// by the installer
// ParseArtifactListFromMultipleYamlConditional acts on ArtifactListPredicate return value, if true the yaml
// gets added to installation list else it is not added.
func ParseArtifactListFromMultipleYamlConditional(multipleYamls MultiYamlFetcher, p ArtifactListPredicate) (artifacts []*Artifact) {
if p() {
return ParseArtifactListFromMultipleYamls(multipleYamls)
}
return
}
// ParseArtifactListFromMultipleYamls generates a list of Artifacts from the
// yaml documents.
//
// NOTE:
// Each YAML document is assumed to be separated via "---"
func ParseArtifactListFromMultipleYamls(multipleYamls MultiYamlFetcher) (artifacts []*Artifact) {
docs := strings.Split(multipleYamls(), "---")
for _, doc := range docs {
doc = strings.TrimSpace(doc)
if len(doc) == 0 {
continue
}
artifacts = append(artifacts, &Artifact{Doc: doc})
}
return
}
// RegisteredArtifactsFor070 returns the list of 0.7.0 Artifacts that will get
// installed
func RegisteredArtifactsFor070() (list ArtifactList) {
list.Items = append(list.Items, JivaVolumeArtifactsFor070().Items...)
//Contains the read/list/delete CAST for supporting older volumes
list.Items = append(list.Items, JivaVolumeArtifactsFor060().Items...)
list.Items = append(list.Items, JivaPoolArtifactsFor070().Items...)
list.Items = append(list.Items, CstorPoolArtifactsFor070().Items...)
list.Items = append(list.Items, CstorVolumeArtifactsFor070().Items...)
list.Items = append(list.Items, CstorSparsePoolSpc070().Items...)
return
}
| 1 | 9,536 | Not a requirement here. But i see a change in naming convention. We can rename the function to `CstorSparsePoolArtifactsFor070`. | openebs-maya | go |
@@ -0,0 +1,9 @@
+// +build pico
+
+package main
+
+import "machine"
+
+var (
+ interruptPin = machine.GP10
+) | 1 | 1 | 12,926 | Why do you configure the UART like this? It's already configured by default. | tinygo-org-tinygo | go |
|
@@ -39,7 +39,8 @@
#include "opae/access.h"
#include "opae/utils.h"
#include "opae/manage.h"
-#include "opae/manage.h"
+#include "opae/enum.h"
+#include "opae/properties.h"
#include "bitstream_int.h"
#include "common_int.h"
#include "intel-fpga.h" | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/types.h>
#include "safe_string/safe_string.h"
#include "opae/access.h"
#include "opae/utils.h"
#include "opae/manage.h"
#include "opae/manage.h"
#include "bitstream_int.h"
#include "common_int.h"
#include "intel-fpga.h"
#include "usrclk/user_clk_pgm_uclock.h"
// sysfs attributes
#define PORT_SYSFS_ERRORS "errors/errors"
#define PORT_SYSFS_ERR_CLEAR "errors/clear"
#define PWRMGMT_THRESHOLD1 "power_mgmt/threshold1"
#define PWRMGMT_THRESHOLD2 "power_mgmt/threshold2"
// Max power values
#define FPGA_BBS_IDLE_POWER 30 // watts
#define FPGA_MAX_POWER 90 // watts
#define FPGA_GBS_MAX_POWER 60 // watts
#define FPGA_THRESHOLD2(x) ((x*10)/100) // threshold1 + 10%
#pragma pack(push, 1)
// GBS Header
struct bitstream_header {
uint32_t magic;
uint64_t ifid_l;
uint64_t ifid_h;
};
#pragma pack(pop)
// Reconnfigure Error CSR
struct reconf_error {
union {
uint64_t csr;
struct {
uint64_t reconf_operation_error:1; /* PR operation error detected */
uint64_t reconf_CRC_error:1; /* PR CRC error detected*/
uint64_t reconf_incompatible_bitstream_error:1; /* PR incompatible bitstream error detected */
uint64_t reconf_IP_protocol_error:1; /* PR IP protocol error detected */
uint64_t reconf_FIFO_overflow_error:1; /* PR FIFO overflow error detected */
uint64_t reconf_timeout_error:1; /* PR timeout error detected */
uint64_t reconf_secure_load_error:1; /* PR secure load error detected */
uint64_t rsvd:57; /* Reserved */
};
};
};
static fpga_result validate_bitstream(fpga_handle handle,
const uint8_t *bitstream, size_t bitstream_len,
int *header_len)
{
struct bitstream_header bts_hdr = {0};
if (bitstream == NULL) {
FPGA_MSG("Bitstream is NULL");
return FPGA_INVALID_PARAM;
}
if (bitstream_len <= 0 ||
bitstream_len <= sizeof(struct bitstream_header)) {
FPGA_MSG("Invalid bitstream size");
return FPGA_INVALID_PARAM;
}
if (check_bitstream_guid(bitstream) == FPGA_OK) {
*header_len = get_bitstream_header_len(bitstream);
if (*header_len < 0) {
FPGA_MSG("Invalid bitstream header length");
return FPGA_EXCEPTION;
}
if (validate_bitstream_metadata(handle, bitstream) != FPGA_OK) {
FPGA_MSG("Invalid JSON data");
return FPGA_EXCEPTION;
}
return FPGA_OK;
} else {
errno_t e;
// TODO: This is needed for legacy bitstreams since they
// do not have new metadata with GUID. Remove once
// all bitstreams conform to new metadata format.
*header_len = sizeof(struct bitstream_header);
e = memcpy_s(&bts_hdr, sizeof(struct bitstream_header),
bitstream, sizeof(struct bitstream_header));
if (EOK != e) {
FPGA_ERR("memcpy_s failed");
return FPGA_EXCEPTION;
}
return check_interface_id(handle, bts_hdr.magic, bts_hdr.ifid_l,
bts_hdr.ifid_h);
}
}
// clears port errors
static fpga_result clear_port_errors(fpga_handle handle)
{
char syfs_path[SYSFS_PATH_MAX] = {0};
char syfs_errpath[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
uint64_t error = 0 ;
result = get_port_sysfs(handle, syfs_path);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get port syfs path");
return result;
}
snprintf_s_ss(syfs_errpath, sizeof(syfs_errpath), "%s/%s", syfs_path, PORT_SYSFS_ERRORS);
// Read port error.
result = sysfs_read_u64(syfs_errpath, &error);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get port errors");
return result;
}
snprintf_s_ss(syfs_errpath, sizeof(syfs_errpath), "%s/%s", syfs_path, PORT_SYSFS_ERR_CLEAR);
// Clear port error.
result = sysfs_write_u64(syfs_errpath, error);
if (result != FPGA_OK) {
FPGA_ERR("Failed to clear port errors");
return result;
}
return result;
}
// set afu user clock
fpga_result set_afu_userclock(fpga_handle handle,
uint64_t usrlclock_high,
uint64_t usrlclock_low)
{
char syfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
uint64_t userclk_high = 0;
uint64_t userclk_low = 0;
// Read port sysfs path
result = get_port_sysfs(handle, syfs_path);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get port syfs path");
return result;
}
// set user clock
result = set_userclock(syfs_path, usrlclock_high, usrlclock_low);
if (result != FPGA_OK) {
FPGA_ERR("Failed to set user clock");
return result;
}
// read user clock
result = get_userclock(syfs_path, &userclk_high, &userclk_low);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get user clock");
return result;
}
return result;
}
// Sets FPGA threshold power values
fpga_result set_fpga_pwr_threshold(fpga_handle handle,
uint64_t gbs_power)
{
char sysfs_path[SYSFS_PATH_MAX] = {0};
fpga_result result = FPGA_OK;
uint64_t fpga_power = 0;
struct _fpga_token *_token = NULL;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
if (_handle == NULL) {
FPGA_ERR("Invalid handle");
return FPGA_INVALID_PARAM;
}
_token = (struct _fpga_token *)_handle->token;
if (_token == NULL) {
FPGA_ERR("Invalid token within handle");
return FPGA_INVALID_PARAM;
}
// Set max power if not specified by gbs
if (gbs_power == 0) {
gbs_power = FPGA_GBS_MAX_POWER;
}
// verify gbs power limits
if (gbs_power > FPGA_GBS_MAX_POWER) {
FPGA_ERR("Invalid GBS power value");
result = FPGA_NOT_SUPPORTED;
return result;
}
// FPGA threshold1 = BBS Idle power + GBS power
fpga_power = gbs_power + FPGA_BBS_IDLE_POWER;
if (fpga_power > FPGA_MAX_POWER) {
FPGA_ERR("Total power requirements exceed FPGA maximum");
result = FPGA_NOT_SUPPORTED;
return result;
}
// set fpga threshold 1
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", _token->sysfspath, PWRMGMT_THRESHOLD1);
FPGA_DBG(" FPGA Threshold1 :%ld watts\n", fpga_power);
result = sysfs_write_u64(sysfs_path, fpga_power);
if (result != FPGA_OK) {
FPGA_ERR("Failed to write power threshold 1");
return result;
}
return result;
}
fpga_result __FPGA_API__ fpgaReconfigureSlot(fpga_handle fpga,
uint32_t slot,
const uint8_t *bitstream,
size_t bitstream_len,
int flags)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)fpga;
fpga_result result = FPGA_OK;
struct fpga_fme_port_pr port_pr = {0};
struct reconf_error error = { {0} };
struct gbs_metadata metadata;
int bitstream_header_len = 0;
uint64_t deviceid = 0;
int err = 0;
UNUSED_PARAM(flags);
result = handle_check_and_lock(_handle);
if (result)
return result;
if (_handle->fddev < 0) {
FPGA_ERR("Invalid handle file descriptor");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (validate_bitstream(fpga, bitstream, bitstream_len,
&bitstream_header_len) != FPGA_OK) {
FPGA_MSG("Invalid bitstream");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
// Clear port errors
result = clear_port_errors(fpga);
if (result != FPGA_OK) {
FPGA_ERR("Failed to clear port errors.");
}
if (get_bitstream_json_len(bitstream) > 0) {
// Read GBS json metadata
memset(&metadata, 0, sizeof(metadata));
result = read_gbs_metadata(bitstream, &metadata);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read metadata");
goto out_unlock;
}
FPGA_DBG(" Version :%f\n", metadata.version);
FPGA_DBG(" Magic Num :%ld\n",
metadata.afu_image.magic_num);
FPGA_DBG(" Interface Id :%s\n",
metadata.afu_image.interface_uuid);
FPGA_DBG(" Clock_frequency_high :%d\n",
metadata.afu_image.clock_frequency_high);
FPGA_DBG(" Clock_frequency_low :%d\n",
metadata.afu_image.clock_frequency_low);
FPGA_DBG(" Power :%d\n",
metadata.afu_image.power);
FPGA_DBG(" Name :%s\n",
metadata.afu_image.afu_clusters.name);
FPGA_DBG(" Total_contexts :%d\n",
metadata.afu_image.afu_clusters.total_contexts);
FPGA_DBG(" AFU_uuid :%s\n",
metadata.afu_image.afu_clusters.afu_uuid);
// Set AFU user clock
if (metadata.afu_image.clock_frequency_high > 0 || metadata.afu_image.clock_frequency_low > 0) {
result = set_afu_userclock(fpga, metadata.afu_image.clock_frequency_high, metadata.afu_image.clock_frequency_low);
if (result != FPGA_OK) {
FPGA_ERR("Failed to set user clock");
goto out_unlock;
}
}
// get fpga device id.
result = get_fpga_deviceid(fpga, &deviceid);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read device id.");
goto out_unlock;
}
// Set power threshold for integrated fpga.
if (deviceid == FPGA_INTEGRATED_DEVICEID) {
result = set_fpga_pwr_threshold(fpga, metadata.afu_image.power);
if (result != FPGA_OK) {
FPGA_ERR("Failed to set threshold.");
goto out_unlock;
}
} // device id
}
port_pr.flags = 0;
port_pr.argsz = sizeof(struct fpga_fme_port_pr);
port_pr.buffer_address = (__u64)bitstream + bitstream_header_len;
port_pr.buffer_size = (__u32) bitstream_len - bitstream_header_len;
port_pr.port_id = slot;
result = ioctl(_handle->fddev, FPGA_FME_PORT_PR, &port_pr);
if (result != 0) {
FPGA_ERR("Failed to reconfigure bitstream: %s",
strerror(errno));
if ((errno == EINVAL) || (errno == EFAULT)) {
result = FPGA_INVALID_PARAM;
} else {
result = FPGA_EXCEPTION;
}
}
// PR error
error.csr = port_pr.status;
if (error.reconf_operation_error == 0x1) {
FPGA_ERR("PR operation error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_CRC_error == 0x1) {
FPGA_ERR("PR CRC error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_incompatible_bitstream_error == 0x1) {
FPGA_ERR("PR incompatible bitstream error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_IP_protocol_error == 0x1) {
FPGA_ERR("PR IP protocol error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_FIFO_overflow_error == 0x1) {
FPGA_ERR("PR FIFO overflow error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_timeout_error == 0x1) {
FPGA_ERR("PR timeout error detected");
result = FPGA_RECONF_ERROR;
}
if (error.reconf_secure_load_error == 0x1) {
FPGA_ERR("PR secure load error detected");
result = FPGA_RECONF_ERROR;
}
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
return result;
}
| 1 | 15,535 | Wow. Good catch. "This `#include` was brought to you by the department of redundancy department." ;) | OPAE-opae-sdk | c |
@@ -20,10 +20,11 @@ import (
"context"
"time"
+ "github.com/go-logr/logr"
"golang.org/x/crypto/acme"
- "k8s.io/klog"
"github.com/jetstack/cert-manager/pkg/acme/client"
+ logf "github.com/jetstack/cert-manager/pkg/logs"
)
const ( | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package middleware
import (
"context"
"time"
"golang.org/x/crypto/acme"
"k8s.io/klog"
"github.com/jetstack/cert-manager/pkg/acme/client"
)
const (
timeout = time.Second * 10
)
func NewLogger(baseCl client.Interface) client.Interface {
return &Logger{baseCl: baseCl}
}
// Logger is a glog based logging middleware for an ACME client
type Logger struct {
baseCl client.Interface
}
var _ client.Interface = &Logger{}
func (l *Logger) AuthorizeOrder(ctx context.Context, id []acme.AuthzID, opt ...acme.OrderOption) (*acme.Order, error) {
klog.Infof("Calling CreateOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.AuthorizeOrder(ctx, id, opt...)
}
func (l *Logger) GetOrder(ctx context.Context, url string) (*acme.Order, error) {
klog.Infof("Calling GetOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetOrder(ctx, url)
}
func (l *Logger) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
klog.Infof("Calling GetCertificate")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.FetchCert(ctx, url, bundle)
}
func (l *Logger) WaitOrder(ctx context.Context, url string) (*acme.Order, error) {
klog.Infof("Calling WaitOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.WaitOrder(ctx, url)
}
func (l *Logger) CreateOrderCert(ctx context.Context, finalizeURL string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) {
klog.Infof("Calling FinalizeOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.CreateOrderCert(ctx, finalizeURL, csr, bundle)
}
func (l *Logger) Accept(ctx context.Context, chal *acme.Challenge) (*acme.Challenge, error) {
klog.Infof("Calling AcceptChallenge")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Accept(ctx, chal)
}
func (l *Logger) GetChallenge(ctx context.Context, url string) (*acme.Challenge, error) {
klog.Infof("Calling GetChallenge")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetChallenge(ctx, url)
}
func (l *Logger) GetAuthorization(ctx context.Context, url string) (*acme.Authorization, error) {
klog.Infof("Calling GetAuthorization")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetAuthorization(ctx, url)
}
func (l *Logger) WaitAuthorization(ctx context.Context, url string) (*acme.Authorization, error) {
klog.Infof("Calling WaitAuthorization")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.WaitAuthorization(ctx, url)
}
func (l *Logger) Register(ctx context.Context, a *acme.Account, prompt func(tosURL string) bool) (*acme.Account, error) {
klog.Infof("Calling CreateAccount")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Register(ctx, a, prompt)
}
func (l *Logger) GetReg(ctx context.Context, url string) (*acme.Account, error) {
klog.Infof("Calling GetAccount")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetReg(ctx, url)
}
func (l *Logger) HTTP01ChallengeResponse(token string) (string, error) {
klog.Infof("Calling HTTP01ChallengeResponse")
return l.baseCl.HTTP01ChallengeResponse(token)
}
func (l *Logger) DNS01ChallengeRecord(token string) (string, error) {
klog.Infof("Calling DNS01ChallengeRecord")
return l.baseCl.DNS01ChallengeRecord(token)
}
func (l *Logger) Discover(ctx context.Context) (acme.Directory, error) {
klog.Infof("Calling Discover")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Discover(ctx)
}
func (l *Logger) UpdateReg(ctx context.Context, a *acme.Account) (*acme.Account, error) {
klog.Infof("Calling UpdateAccount")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.UpdateReg(ctx, a)
}
| 1 | 22,862 | Well it's actually calling `AuthorizeOrder` and this seems like a DebugLevel message to me, maybe it should be dropped entirely. | jetstack-cert-manager | go |
@@ -917,6 +917,7 @@ void *cuda_resize_scratch_space(std::int64_t bytes, bool force_shrink) {
void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
bool to_device) {
+ if ((ptr == nullptr) || (bytes == 0)) return;
cudaPointerAttributes attr;
cudaPointerGetAttributes(&attr, ptr);
// I measured this and it turns out prefetching towards the host slows | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_CUDA
#include <cstdlib>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <algorithm>
#include <atomic>
#include <Kokkos_Core.hpp>
#include <Kokkos_Cuda.hpp>
#include <Kokkos_CudaSpace.hpp>
//#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_MemorySpace.hpp>
#if defined(KOKKOS_ENABLE_PROFILING)
#include <impl/Kokkos_Profiling_Interface.hpp>
#endif
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
namespace Impl {
namespace {
static std::atomic<int> num_uvm_allocations(0);
cudaStream_t get_deep_copy_stream() {
static cudaStream_t s = 0;
if (s == 0) {
cudaStreamCreate(&s);
}
return s;
}
} // namespace
DeepCopy<CudaSpace, CudaSpace, Cuda>::DeepCopy(void *dst, const void *src,
size_t n) {
CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
}
DeepCopy<HostSpace, CudaSpace, Cuda>::DeepCopy(void *dst, const void *src,
size_t n) {
CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
}
DeepCopy<CudaSpace, HostSpace, Cuda>::DeepCopy(void *dst, const void *src,
size_t n) {
CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
}
DeepCopy<CudaSpace, CudaSpace, Cuda>::DeepCopy(const Cuda &instance, void *dst,
const void *src, size_t n) {
CUDA_SAFE_CALL(
cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
}
DeepCopy<HostSpace, CudaSpace, Cuda>::DeepCopy(const Cuda &instance, void *dst,
const void *src, size_t n) {
CUDA_SAFE_CALL(
cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
}
DeepCopy<CudaSpace, HostSpace, Cuda>::DeepCopy(const Cuda &instance, void *dst,
const void *src, size_t n) {
CUDA_SAFE_CALL(
cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
}
void DeepCopyAsyncCuda(void *dst, const void *src, size_t n) {
cudaStream_t s = get_deep_copy_stream();
CUDA_SAFE_CALL(cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, s));
cudaStreamSynchronize(s);
}
} // namespace Impl
} // namespace Kokkos
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
void CudaSpace::access_error() {
const std::string msg(
"Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
"non-Cuda space");
Kokkos::Impl::throw_runtime_exception(msg);
}
void CudaSpace::access_error(const void *const) {
const std::string msg(
"Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
"non-Cuda space");
Kokkos::Impl::throw_runtime_exception(msg);
}
/*--------------------------------------------------------------------------*/
bool CudaUVMSpace::available() {
#if defined(CUDA_VERSION) && !defined(__APPLE__)
enum { UVM_available = true };
#else
enum { UVM_available = false };
#endif
return UVM_available;
}
/*--------------------------------------------------------------------------*/
int CudaUVMSpace::number_of_allocations() {
return Kokkos::Impl::num_uvm_allocations.load();
}
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
// The purpose of the following variable is to allow a state-based choice
// for pinning UVM allocations to the CPU. For now this is considered
// an experimental debugging capability - with the potential to work around
// some CUDA issues.
bool CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = false;
bool CudaUVMSpace::cuda_pin_uvm_to_host() {
return CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v;
}
void CudaUVMSpace::cuda_set_pin_uvm_to_host(bool val) {
CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = val;
}
#endif
} // namespace Kokkos
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
bool kokkos_impl_cuda_pin_uvm_to_host() {
return Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host();
}
void kokkos_impl_cuda_set_pin_uvm_to_host(bool val) {
Kokkos::CudaUVMSpace::cuda_set_pin_uvm_to_host(val);
}
#endif
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
CudaSpace::CudaSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
CudaUVMSpace::CudaUVMSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
CudaHostPinnedSpace::CudaHostPinnedSpace() {}
//==============================================================================
// <editor-fold desc="allocate()"> {{{1
void *CudaSpace::allocate(const size_t arg_alloc_size) const {
void *ptr = nullptr;
auto error_code = cudaMalloc(&ptr, arg_alloc_size);
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error, which
// we should do here since we're turning it into an
// exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaMalloc);
}
return ptr;
}
void *CudaUVMSpace::allocate(const size_t arg_alloc_size) const {
void *ptr = nullptr;
Cuda::impl_static_fence();
if (arg_alloc_size > 0) {
Kokkos::Impl::num_uvm_allocations++;
auto error_code =
cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
if (Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host())
cudaMemAdvise(ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation,
cudaCpuDeviceId);
#endif
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error,
// which we should do here since we're turning it
// into an exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaMallocManaged);
}
}
Cuda::impl_static_fence();
return ptr;
}
void *CudaHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
void *ptr = nullptr;
auto error_code = cudaHostAlloc(&ptr, arg_alloc_size, cudaHostAllocDefault);
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error, which
// we should do here since we're turning it into an
// exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaHostAlloc);
}
return ptr;
}
// </editor-fold> end allocate() }}}1
//==============================================================================
void CudaSpace::deallocate(void *const arg_alloc_ptr,
const size_t /* arg_alloc_size */) const {
try {
CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
} catch (...) {
}
}
void CudaUVMSpace::deallocate(void *const arg_alloc_ptr,
const size_t /* arg_alloc_size */) const {
Cuda::impl_static_fence();
try {
if (arg_alloc_ptr != nullptr) {
Kokkos::Impl::num_uvm_allocations--;
CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
}
} catch (...) {
}
Cuda::impl_static_fence();
}
void CudaHostPinnedSpace::deallocate(void *const arg_alloc_ptr,
const size_t /* arg_alloc_size */) const {
try {
CUDA_SAFE_CALL(cudaFreeHost(arg_alloc_ptr));
} catch (...) {
}
}
} // namespace Kokkos
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
#ifdef KOKKOS_DEBUG
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record;
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record;
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::s_root_record;
#endif
::cudaTextureObject_t
SharedAllocationRecord<Kokkos::CudaSpace, void>::attach_texture_object(
const unsigned sizeof_alias, void *const alloc_ptr,
size_t const alloc_size) {
enum { TEXTURE_BOUND_1D = 1u << 27 };
if ((alloc_ptr == 0) || (sizeof_alias * TEXTURE_BOUND_1D <= alloc_size)) {
std::ostringstream msg;
msg << "Kokkos::CudaSpace ERROR: Cannot attach texture object to"
<< " alloc_ptr(" << alloc_ptr << ")"
<< " alloc_size(" << alloc_size << ")"
<< " max_size(" << (sizeof_alias * TEXTURE_BOUND_1D) << ")";
std::cerr << msg.str() << std::endl;
std::cerr.flush();
Kokkos::Impl::throw_runtime_exception(msg.str());
}
::cudaTextureObject_t tex_obj;
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.desc =
(sizeof_alias == 4
? cudaCreateChannelDesc<int>()
: (sizeof_alias == 8
? cudaCreateChannelDesc< ::int2>()
:
/* sizeof_alias == 16 */ cudaCreateChannelDesc< ::int4>()));
resDesc.res.linear.sizeInBytes = alloc_size;
resDesc.res.linear.devPtr = alloc_ptr;
CUDA_SAFE_CALL(
cudaCreateTextureObject(&tex_obj, &resDesc, &texDesc, nullptr));
return tex_obj;
}
//==============================================================================
// <editor-fold desc="SharedAllocationRecord::get_label()"> {{{1
std::string SharedAllocationRecord<Kokkos::CudaSpace, void>::get_label() const {
SharedAllocationHeader header;
Kokkos::Impl::DeepCopy<Kokkos::HostSpace, Kokkos::CudaSpace>(
&header, RecordBase::head(), sizeof(SharedAllocationHeader));
return std::string(header.m_label);
}
std::string SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::get_label()
const {
return std::string(RecordBase::head()->m_label);
}
std::string
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::get_label() const {
return std::string(RecordBase::head()->m_label);
}
// </editor-fold> end SharedAllocationRecord::get_label() }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord allocate()"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void>
*SharedAllocationRecord<Kokkos::CudaSpace, void>::allocate(
const Kokkos::CudaSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size) {
return new SharedAllocationRecord(arg_space, arg_label, arg_alloc_size);
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>
*SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::allocate(
const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size) {
return new SharedAllocationRecord(arg_space, arg_label, arg_alloc_size);
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>
*SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::allocate(
const Kokkos::CudaHostPinnedSpace &arg_space,
const std::string &arg_label, const size_t arg_alloc_size) {
return new SharedAllocationRecord(arg_space, arg_label, arg_alloc_size);
}
// </editor-fold> end SharedAllocationRecord allocate() }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord deallocate"> {{{1
void SharedAllocationRecord<Kokkos::CudaSpace, void>::deallocate(
SharedAllocationRecord<void, void> *arg_rec) {
delete static_cast<SharedAllocationRecord *>(arg_rec);
}
void SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::deallocate(
SharedAllocationRecord<void, void> *arg_rec) {
delete static_cast<SharedAllocationRecord *>(arg_rec);
}
void SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::deallocate(
SharedAllocationRecord<void, void> *arg_rec) {
delete static_cast<SharedAllocationRecord *>(arg_rec);
}
// </editor-fold> end SharedAllocationRecord deallocate }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord destructors"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void>::~SharedAllocationRecord() {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
SharedAllocationHeader header;
Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(
&header, RecordBase::m_alloc_ptr, sizeof(SharedAllocationHeader));
Kokkos::Profiling::deallocateData(
Kokkos::Profiling::SpaceHandle(Kokkos::CudaSpace::name()),
header.m_label, data(), size());
}
#endif
m_space.deallocate(SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size);
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::~SharedAllocationRecord() {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
Cuda::impl_static_fence(); // Make sure I can access the label ...
Kokkos::Profiling::deallocateData(
Kokkos::Profiling::SpaceHandle(Kokkos::CudaUVMSpace::name()),
RecordBase::m_alloc_ptr->m_label, data(), size());
}
#endif
m_space.deallocate(SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size);
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>::~SharedAllocationRecord() {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
Kokkos::Profiling::deallocateData(
Kokkos::Profiling::SpaceHandle(Kokkos::CudaHostPinnedSpace::name()),
RecordBase::m_alloc_ptr->m_label, data(), size());
}
#endif
m_space.deallocate(SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size);
}
// </editor-fold> end SharedAllocationRecord destructors }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord constructors"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
const Kokkos::CudaSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: SharedAllocationRecord<void, void>(
#ifdef KOKKOS_DEBUG
&SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_tex_obj(0),
m_space(arg_space) {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
Kokkos::Profiling::allocateData(
Kokkos::Profiling::SpaceHandle(arg_space.name()), arg_label, data(),
arg_alloc_size);
}
#endif
SharedAllocationHeader header;
// Fill in the Header information
header.m_record = static_cast<SharedAllocationRecord<void, void> *>(this);
strncpy(header.m_label, arg_label.c_str(),
SharedAllocationHeader::maximum_label_length);
// Set last element zero, in case c_str is too long
header.m_label[SharedAllocationHeader::maximum_label_length - 1] = (char)0;
// Copy to device memory
Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(RecordBase::m_alloc_ptr, &header,
sizeof(SharedAllocationHeader));
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::SharedAllocationRecord(
const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: SharedAllocationRecord<void, void>(
#ifdef KOKKOS_DEBUG
&SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_tex_obj(0),
m_space(arg_space) {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
Kokkos::Profiling::allocateData(
Kokkos::Profiling::SpaceHandle(arg_space.name()), arg_label, data(),
arg_alloc_size);
}
#endif
// Fill in the Header information, directly accessible via UVM
RecordBase::m_alloc_ptr->m_record = this;
strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
SharedAllocationHeader::maximum_label_length);
// Set last element zero, in case c_str is too long
RecordBase::m_alloc_ptr
->m_label[SharedAllocationHeader::maximum_label_length - 1] = (char)0;
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::
SharedAllocationRecord(
const Kokkos::CudaHostPinnedSpace &arg_space,
const std::string &arg_label, const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: SharedAllocationRecord<void, void>(
#ifdef KOKKOS_DEBUG
&SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_space(arg_space) {
#if defined(KOKKOS_ENABLE_PROFILING)
if (Kokkos::Profiling::profileLibraryLoaded()) {
Kokkos::Profiling::allocateData(
Kokkos::Profiling::SpaceHandle(arg_space.name()), arg_label, data(),
arg_alloc_size);
}
#endif
// Fill in the Header information, directly accessible on the host
RecordBase::m_alloc_ptr->m_record = this;
strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
SharedAllocationHeader::maximum_label_length);
// Set last element zero, in case c_str is too long
RecordBase::m_alloc_ptr
->m_label[SharedAllocationHeader::maximum_label_length - 1] = (char)0;
}
// </editor-fold> end SharedAllocationRecord constructors }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecored::(re|de|)allocate_tracked"> {{{1
void *SharedAllocationRecord<Kokkos::CudaSpace, void>::allocate_tracked(
const Kokkos::CudaSpace &arg_space, const std::string &arg_alloc_label,
const size_t arg_alloc_size) {
if (!arg_alloc_size) return (void *)0;
SharedAllocationRecord *const r =
allocate(arg_space, arg_alloc_label, arg_alloc_size);
RecordBase::increment(r);
return r->data();
}
void SharedAllocationRecord<Kokkos::CudaSpace, void>::deallocate_tracked(
void *const arg_alloc_ptr) {
if (arg_alloc_ptr != 0) {
SharedAllocationRecord *const r = get_record(arg_alloc_ptr);
RecordBase::decrement(r);
}
}
void *SharedAllocationRecord<Kokkos::CudaSpace, void>::reallocate_tracked(
void *const arg_alloc_ptr, const size_t arg_alloc_size) {
SharedAllocationRecord *const r_old = get_record(arg_alloc_ptr);
SharedAllocationRecord *const r_new =
allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
Kokkos::Impl::DeepCopy<CudaSpace, CudaSpace>(
r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
RecordBase::increment(r_new);
RecordBase::decrement(r_old);
return r_new->data();
}
void *SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::allocate_tracked(
const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_alloc_label,
const size_t arg_alloc_size) {
if (!arg_alloc_size) return (void *)0;
SharedAllocationRecord *const r =
allocate(arg_space, arg_alloc_label, arg_alloc_size);
RecordBase::increment(r);
return r->data();
}
void SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::deallocate_tracked(
void *const arg_alloc_ptr) {
if (arg_alloc_ptr != 0) {
SharedAllocationRecord *const r = get_record(arg_alloc_ptr);
RecordBase::decrement(r);
}
}
void *SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::reallocate_tracked(
void *const arg_alloc_ptr, const size_t arg_alloc_size) {
SharedAllocationRecord *const r_old = get_record(arg_alloc_ptr);
SharedAllocationRecord *const r_new =
allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
Kokkos::Impl::DeepCopy<CudaUVMSpace, CudaUVMSpace>(
r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
RecordBase::increment(r_new);
RecordBase::decrement(r_old);
return r_new->data();
}
void *
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::allocate_tracked(
const Kokkos::CudaHostPinnedSpace &arg_space,
const std::string &arg_alloc_label, const size_t arg_alloc_size) {
if (!arg_alloc_size) return (void *)0;
SharedAllocationRecord *const r =
allocate(arg_space, arg_alloc_label, arg_alloc_size);
RecordBase::increment(r);
return r->data();
}
void SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>::deallocate_tracked(void *const
arg_alloc_ptr) {
if (arg_alloc_ptr != 0) {
SharedAllocationRecord *const r = get_record(arg_alloc_ptr);
RecordBase::decrement(r);
}
}
void *
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::reallocate_tracked(
void *const arg_alloc_ptr, const size_t arg_alloc_size) {
SharedAllocationRecord *const r_old = get_record(arg_alloc_ptr);
SharedAllocationRecord *const r_new =
allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
Kokkos::Impl::DeepCopy<CudaHostPinnedSpace, CudaHostPinnedSpace>(
r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
RecordBase::increment(r_new);
RecordBase::decrement(r_old);
return r_new->data();
}
// </editor-fold> end SharedAllocationRecored::(re|de|)allocate_tracked }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord::get_record()"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void> *
SharedAllocationRecord<Kokkos::CudaSpace, void>::get_record(void *alloc_ptr) {
using RecordCuda = SharedAllocationRecord<Kokkos::CudaSpace, void>;
using Header = SharedAllocationHeader;
// Copy the header from the allocation
Header head;
Header const *const head_cuda =
alloc_ptr ? Header::get_header(alloc_ptr) : (Header *)0;
if (alloc_ptr) {
Kokkos::Impl::DeepCopy<HostSpace, CudaSpace>(
&head, head_cuda, sizeof(SharedAllocationHeader));
}
RecordCuda *const record =
alloc_ptr ? static_cast<RecordCuda *>(head.m_record) : (RecordCuda *)0;
if (!alloc_ptr || record->m_alloc_ptr != head_cuda) {
Kokkos::Impl::throw_runtime_exception(
std::string("Kokkos::Impl::SharedAllocationRecord< Kokkos::CudaSpace , "
"void >::get_record ERROR"));
}
return record;
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void> *SharedAllocationRecord<
Kokkos::CudaUVMSpace, void>::get_record(void *alloc_ptr) {
using Header = SharedAllocationHeader;
using RecordCuda = SharedAllocationRecord<Kokkos::CudaUVMSpace, void>;
Header *const h =
alloc_ptr ? reinterpret_cast<Header *>(alloc_ptr) - 1 : (Header *)0;
if (!alloc_ptr || h->m_record->m_alloc_ptr != h) {
Kokkos::Impl::throw_runtime_exception(
std::string("Kokkos::Impl::SharedAllocationRecord< "
"Kokkos::CudaUVMSpace , void >::get_record ERROR"));
}
return static_cast<RecordCuda *>(h->m_record);
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>
*SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::get_record(
void *alloc_ptr) {
using Header = SharedAllocationHeader;
using RecordCuda = SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>;
Header *const h =
alloc_ptr ? reinterpret_cast<Header *>(alloc_ptr) - 1 : (Header *)0;
if (!alloc_ptr || h->m_record->m_alloc_ptr != h) {
Kokkos::Impl::throw_runtime_exception(
std::string("Kokkos::Impl::SharedAllocationRecord< "
"Kokkos::CudaHostPinnedSpace , void >::get_record ERROR"));
}
return static_cast<RecordCuda *>(h->m_record);
}
// </editor-fold> end SharedAllocationRecord::get_record() }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord::print_records()"> {{{1
// Iterate records to print orphaned memory ...
void SharedAllocationRecord<Kokkos::CudaSpace, void>::print_records(
std::ostream &s, const Kokkos::CudaSpace &, bool detail) {
(void)s;
(void)detail;
#ifdef KOKKOS_DEBUG
SharedAllocationRecord<void, void> *r = &s_root_record;
char buffer[256];
SharedAllocationHeader head;
if (detail) {
do {
if (r->m_alloc_ptr) {
Kokkos::Impl::DeepCopy<HostSpace, CudaSpace>(
&head, r->m_alloc_ptr, sizeof(SharedAllocationHeader));
} else {
head.m_label[0] = 0;
}
// Formatting dependent on sizeof(uintptr_t)
const char *format_string;
if (sizeof(uintptr_t) == sizeof(unsigned long)) {
format_string =
"Cuda addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx "
"+ %.8ld ] count(%d) dealloc(0x%.12lx) %s\n";
} else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
format_string =
"Cuda addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ "
"0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n";
}
snprintf(buffer, 256, format_string, reinterpret_cast<uintptr_t>(r),
reinterpret_cast<uintptr_t>(r->m_prev),
reinterpret_cast<uintptr_t>(r->m_next),
reinterpret_cast<uintptr_t>(r->m_alloc_ptr), r->m_alloc_size,
r->m_count, reinterpret_cast<uintptr_t>(r->m_dealloc),
head.m_label);
s << buffer;
r = r->m_next;
} while (r != &s_root_record);
} else {
do {
if (r->m_alloc_ptr) {
Kokkos::Impl::DeepCopy<HostSpace, CudaSpace>(
&head, r->m_alloc_ptr, sizeof(SharedAllocationHeader));
// Formatting dependent on sizeof(uintptr_t)
const char *format_string;
if (sizeof(uintptr_t) == sizeof(unsigned long)) {
format_string = "Cuda [ 0x%.12lx + %ld ] %s\n";
} else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
format_string = "Cuda [ 0x%.12llx + %ld ] %s\n";
}
snprintf(buffer, 256, format_string,
reinterpret_cast<uintptr_t>(r->data()), r->size(),
head.m_label);
} else {
snprintf(buffer, 256, "Cuda [ 0 + 0 ]\n");
}
s << buffer;
r = r->m_next;
} while (r != &s_root_record);
}
#else
Kokkos::Impl::throw_runtime_exception(
"SharedAllocationHeader<CudaSpace>::print_records only works with "
"KOKKOS_DEBUG enabled");
#endif
}
void SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::print_records(
std::ostream &s, const Kokkos::CudaUVMSpace &, bool detail) {
(void)s;
(void)detail;
#ifdef KOKKOS_DEBUG
SharedAllocationRecord<void, void>::print_host_accessible_records(
s, "CudaUVM", &s_root_record, detail);
#else
Kokkos::Impl::throw_runtime_exception(
"SharedAllocationHeader<CudaSpace>::print_records only works with "
"KOKKOS_DEBUG enabled");
#endif
}
void SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::print_records(
std::ostream &s, const Kokkos::CudaHostPinnedSpace &, bool detail) {
(void)s;
(void)detail;
#ifdef KOKKOS_DEBUG
SharedAllocationRecord<void, void>::print_host_accessible_records(
s, "CudaHostPinned", &s_root_record, detail);
#else
Kokkos::Impl::throw_runtime_exception(
"SharedAllocationHeader<CudaSpace>::print_records only works with "
"KOKKOS_DEBUG enabled");
#endif
}
// </editor-fold> end SharedAllocationRecord::print_records() }}}1
//==============================================================================
void *cuda_resize_scratch_space(std::int64_t bytes, bool force_shrink) {
static void *ptr = nullptr;
static std::int64_t current_size = 0;
if (current_size == 0) {
current_size = bytes;
ptr = Kokkos::kokkos_malloc<Kokkos::CudaSpace>("CudaSpace::ScratchMemory",
current_size);
}
if (bytes > current_size) {
current_size = bytes;
Kokkos::kokkos_free<Kokkos::CudaSpace>(ptr);
ptr = Kokkos::kokkos_malloc<Kokkos::CudaSpace>("CudaSpace::ScratchMemory",
current_size);
}
if ((bytes < current_size) && (force_shrink)) {
current_size = bytes;
Kokkos::kokkos_free<Kokkos::CudaSpace>(ptr);
ptr = Kokkos::kokkos_malloc<Kokkos::CudaSpace>("CudaSpace::ScratchMemory",
current_size);
}
return ptr;
}
void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
bool to_device) {
cudaPointerAttributes attr;
cudaPointerGetAttributes(&attr, ptr);
// I measured this and it turns out prefetching towards the host slows
// DualView syncs down. Probably because the latency is not too bad in the
// first place for the pull down. If we want to change that provde
// cudaCpuDeviceId as the device if to_device is false
#if CUDA_VERSION < 10000
if (to_device && attr.isManaged &&
#else
if (to_device && (attr.type == cudaMemoryTypeManaged) &&
#endif
space.cuda_device_prop().concurrentManagedAccess) {
cudaMemPrefetchAsync(ptr, bytes, space.cuda_device(), space.cuda_stream());
}
}
} // namespace Impl
} // namespace Kokkos
#else
void KOKKOS_CORE_SRC_CUDA_CUDASPACE_PREVENT_LINK_ERROR() {}
#endif // KOKKOS_ENABLE_CUDA
| 1 | 22,726 | Should this be using the `CUDA_SAFE_CALL` macro here? | kokkos-kokkos | cpp |
@@ -298,8 +298,8 @@ namespace Xunit.ConsoleClient
{
lock (consoleLock)
{
- Console.ForegroundColor = ConsoleColor.Red;
- Console.WriteLine("ERROR: {0} has no tests to run", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("Warning: {0} has no tests to run", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
Console.ForegroundColor = ConsoleColor.Gray;
}
} | 1 | using System;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace Xunit.ConsoleClient
{
public class Program
{
volatile static bool cancel;
static bool failed;
static readonly ConcurrentDictionary<string, ExecutionSummary> completionMessages = new ConcurrentDictionary<string, ExecutionSummary>();
[STAThread]
public static int Main(string[] args)
{
try
{
Console.ForegroundColor = ConsoleColor.White;
#if !NETCORE
var netVersion = Environment.Version;
#else
var netVersion = "Core";
#endif
Console.WriteLine("xUnit.net console test runner ({0}-bit .NET {1})", IntPtr.Size * 8, netVersion);
Console.WriteLine("Copyright (C) 2014 Outercurve Foundation.");
Console.WriteLine();
Console.ForegroundColor = ConsoleColor.Gray;
if (args.Length == 0 || args[0] == "-?")
{
PrintUsage();
return 1;
}
#if !NETCORE
AppDomain.CurrentDomain.UnhandledException += OnUnhandledException;
Console.CancelKeyPress += (sender, e) =>
{
if (!cancel)
{
Console.WriteLine("Canceling... (Press Ctrl+C again to terminate)");
cancel = true;
e.Cancel = true;
}
};
#else
cancel = false;
#endif
var defaultDirectory = Directory.GetCurrentDirectory();
if (!defaultDirectory.EndsWith(new String(new[] { Path.DirectorySeparatorChar })))
defaultDirectory += Path.DirectorySeparatorChar;
var commandLine = CommandLine.Parse(args);
var failCount = RunProject(defaultDirectory, commandLine.Project, commandLine.TeamCity, commandLine.AppVeyor,
commandLine.ParallelizeAssemblies, commandLine.ParallelizeTestCollections,
commandLine.MaxParallelThreads);
if (commandLine.Wait)
{
Console.WriteLine();
Console.Write("Press enter key to continue...");
Console.ReadLine();
Console.WriteLine();
}
return failCount;
}
catch (ArgumentException ex)
{
Console.WriteLine("error: {0}", ex.Message);
return 1;
}
catch (BadImageFormatException ex)
{
Console.WriteLine("{0}", ex.Message);
return 1;
}
finally
{
Console.ResetColor();
}
}
#if !NETCORE
static void OnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
var ex = e.ExceptionObject as Exception;
if (ex != null)
Console.WriteLine(ex.ToString());
else
Console.WriteLine("Error of unknown type thrown in application domain");
Environment.Exit(1);
}
#endif
static void PrintUsage()
{
#if !NETCORE
var executableName = Path.GetFileNameWithoutExtension(Assembly.GetExecutingAssembly().GetLocalCodeBase());
#else
var executableName = "xunit.console.netcore";
#endif
Console.WriteLine("usage: {0} <assemblyFile> [configFile] [assemblyFile [configFile]...] [options]", executableName);
Console.WriteLine();
Console.WriteLine("Note: Configuration files must end in .config");
Console.WriteLine();
Console.WriteLine("Valid options:");
Console.WriteLine(" -parallel option : set parallelization based on option");
Console.WriteLine(" : none - turn off all parallelization");
Console.WriteLine(" : collections - only parallelize collections");
Console.WriteLine(" : assemblies - only parallelize assemblies");
Console.WriteLine(" : all - parallelize assemblies & collections");
Console.WriteLine(" -maxthreads count : maximum thread count for collection parallelization");
Console.WriteLine(" : 0 - run with unbounded thread count");
Console.WriteLine(" : >0 - limit task thread pool size to 'count'");
Console.WriteLine(" -noshadow : do not shadow copy assemblies");
#if !NETCORE
Console.WriteLine(" -teamcity : forces TeamCity mode (normally auto-detected)");
Console.WriteLine(" -appveyor : forces AppVeyor CI mode (normally auto-detected)");
#endif
Console.WriteLine(" -wait : wait for input after completion");
Console.WriteLine(" -trait \"name=value\" : only run tests with matching name/value traits");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
Console.WriteLine(" -notrait \"name=value\" : do not run tests with matching name/value traits");
Console.WriteLine(" : if specified more than once, acts as an AND operation");
Console.WriteLine(" -method \"name\" : run a given test method (should be fully specified;");
Console.WriteLine(" : i.e., 'MyNamespace.MyClass.MyTestMethod')");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
Console.WriteLine(" -class \"name\" : run all methods in a given test class (should be fully");
Console.WriteLine(" : specified; i.e., 'MyNamespace.MyClass')");
Console.WriteLine(" : if specified more than once, acts as an OR operation");
TransformFactory.AvailableTransforms.ForEach(
transform => Console.WriteLine(" {0} : {1}",
String.Format("-{0} <filename>", transform.CommandLine).PadRight(22).Substring(0, 22),
transform.Description)
);
}
static int RunProject(string defaultDirectory, XunitProject project, bool teamcity, bool appVeyor, bool? parallelizeAssemblies, bool? parallelizeTestCollections, int? maxThreadCount)
{
XElement assembliesElement = null;
var xmlTransformers = TransformFactory.GetXmlTransformers(project);
var needsXml = xmlTransformers.Count > 0;
var consoleLock = new object();
if (!parallelizeAssemblies.HasValue)
parallelizeAssemblies = project.All(assembly => assembly.Configuration.ParallelizeAssembly);
if (needsXml)
assembliesElement = new XElement("assemblies");
var originalWorkingFolder = Directory.GetCurrentDirectory();
using (AssemblyHelper.SubscribeResolve())
{
var clockTime = Stopwatch.StartNew();
if (parallelizeAssemblies.GetValueOrDefault())
{
var tasks = project.Assemblies.Select(assembly => Task.Run(() => ExecuteAssembly(consoleLock, defaultDirectory, assembly, needsXml, teamcity, appVeyor, parallelizeTestCollections, maxThreadCount, project.Filters)));
var results = Task.WhenAll(tasks).GetAwaiter().GetResult();
foreach (var assemblyElement in results.Where(result => result != null))
assembliesElement.Add(assemblyElement);
}
else
{
foreach (var assembly in project.Assemblies)
{
var assemblyElement = ExecuteAssembly(consoleLock, defaultDirectory, assembly, needsXml, teamcity, appVeyor, parallelizeTestCollections, maxThreadCount, project.Filters);
if (assemblyElement != null)
assembliesElement.Add(assemblyElement);
}
}
clockTime.Stop();
if (completionMessages.Count > 0)
{
Console.ForegroundColor = ConsoleColor.White;
Console.WriteLine();
Console.WriteLine("=== TEST EXECUTION SUMMARY ===");
Console.ForegroundColor = ConsoleColor.Gray;
var totalTestsRun = completionMessages.Values.Sum(summary => summary.Total);
var totalTestsFailed = completionMessages.Values.Sum(summary => summary.Failed);
var totalTestsSkipped = completionMessages.Values.Sum(summary => summary.Skipped);
var totalTime = completionMessages.Values.Sum(summary => summary.Time).ToString("0.000s");
var totalErrors = completionMessages.Values.Sum(summary => summary.Errors);
var longestAssemblyName = completionMessages.Keys.Max(key => key.Length);
var longestTotal = totalTestsRun.ToString().Length;
var longestFailed = totalTestsFailed.ToString().Length;
var longestSkipped = totalTestsSkipped.ToString().Length;
var longestTime = totalTime.Length;
var longestErrors = totalErrors.ToString().Length;
foreach (var message in completionMessages.OrderBy(m => m.Key))
Console.WriteLine(" {0} Total: {1}, Errors: {2}, Failed: {3}, Skipped: {4}, Time: {5}",
message.Key.PadRight(longestAssemblyName),
message.Value.Total.ToString().PadLeft(longestTotal),
message.Value.Errors.ToString().PadLeft(longestErrors),
message.Value.Failed.ToString().PadLeft(longestFailed),
message.Value.Skipped.ToString().PadLeft(longestSkipped),
message.Value.Time.ToString("0.000s").PadLeft(longestTime));
if (completionMessages.Count > 1)
Console.WriteLine(" {0} {1} {2} {3} {4} {5}" + Environment.NewLine +
" {6} {7} {8} {9} {10} {11} ({12})",
" ".PadRight(longestAssemblyName),
"-".PadRight(longestTotal, '-'),
"-".PadRight(longestErrors, '-'),
"-".PadRight(longestFailed, '-'),
"-".PadRight(longestSkipped, '-'),
"-".PadRight(longestTime, '-'),
"GRAND TOTAL:".PadLeft(longestAssemblyName),
totalTestsRun,
totalErrors,
totalTestsFailed,
totalTestsSkipped,
totalTime,
clockTime.Elapsed.TotalSeconds.ToString("0.000s"));
}
}
Directory.SetCurrentDirectory(originalWorkingFolder);
xmlTransformers.ForEach(transformer => transformer(assembliesElement));
return failed ? 1 : completionMessages.Values.Sum(summary => summary.Failed);
}
static XmlTestExecutionVisitor CreateVisitor(object consoleLock, string defaultDirectory, XElement assemblyElement, bool teamCity, bool appVeyor)
{
#if !NETCORE
if (teamCity)
return new TeamCityVisitor(assemblyElement, () => cancel);
else if (appVeyor)
return new AppVeyorVisitor(consoleLock, defaultDirectory, assemblyElement, () => cancel, completionMessages);
#endif
return new StandardOutputVisitor(consoleLock, defaultDirectory, assemblyElement, () => cancel, completionMessages);
}
static XElement ExecuteAssembly(object consoleLock, string defaultDirectory, XunitProjectAssembly assembly, bool needsXml, bool teamCity, bool appVeyor, bool? parallelizeTestCollections, int? maxThreadCount, XunitFilters filters)
{
if (cancel)
return null;
var assemblyElement = needsXml ? new XElement("assembly") : null;
try
{
if (!ValidateFileExists(consoleLock, assembly.AssemblyFilename) || !ValidateFileExists(consoleLock, assembly.ConfigFilename))
return null;
var discoveryOptions = TestFrameworkOptions.ForDiscovery(assembly.Configuration);
var executionOptions = TestFrameworkOptions.ForExecution(assembly.Configuration);
if (maxThreadCount.HasValue)
executionOptions.SetMaxParallelThreads(maxThreadCount.GetValueOrDefault());
if (parallelizeTestCollections.HasValue)
executionOptions.SetDisableParallelization(!parallelizeTestCollections.GetValueOrDefault());
lock (consoleLock)
{
if (assembly.Configuration.DiagnosticMessages)
Console.WriteLine("Discovering: {0} (method display = {1}, parallel test collections = {2}, max threads = {3})",
Path.GetFileNameWithoutExtension(assembly.AssemblyFilename),
discoveryOptions.GetMethodDisplay(),
!executionOptions.GetDisableParallelization(),
executionOptions.GetMaxParallelThreads());
else
Console.WriteLine("Discovering: {0}", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
}
using (var controller = new XunitFrontController(assembly.AssemblyFilename, assembly.ConfigFilename, assembly.ShadowCopy))
using (var discoveryVisitor = new TestDiscoveryVisitor())
{
controller.Find(includeSourceInformation: false, messageSink: discoveryVisitor, discoveryOptions: discoveryOptions);
discoveryVisitor.Finished.WaitOne();
lock (consoleLock)
Console.WriteLine("Discovered: {0}", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
var resultsVisitor = CreateVisitor(consoleLock, defaultDirectory, assemblyElement, teamCity, appVeyor);
var filteredTestCases = discoveryVisitor.TestCases.Where(filters.Filter).ToList();
if (filteredTestCases.Count == 0)
{
lock (consoleLock)
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("ERROR: {0} has no tests to run", Path.GetFileNameWithoutExtension(assembly.AssemblyFilename));
Console.ForegroundColor = ConsoleColor.Gray;
}
}
else
{
controller.RunTests(filteredTestCases, resultsVisitor, executionOptions);
resultsVisitor.Finished.WaitOne();
}
}
}
catch (Exception ex)
{
Console.WriteLine("{0}: {1}", ex.GetType().FullName, ex.Message);
failed = true;
}
return assemblyElement;
}
static bool ValidateFileExists(object consoleLock, string fileName)
{
if (String.IsNullOrWhiteSpace(fileName) || File.Exists(fileName))
return true;
lock (consoleLock)
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("File not found: {0}", fileName);
Console.ForegroundColor = ConsoleColor.Gray;
}
return false;
}
}
}
| 1 | 6,044 | What cases do you see as being valid for not having any tests to run? If a test project doesn't have any tests to run we should avoid running it at the project level. | dotnet-buildtools | .cs |
@@ -1139,6 +1139,10 @@ func (mset *stream) processMirrorMsgs() {
// Grab stream quit channel.
mset.mu.Lock()
+ if mset.mirror == nil {
+ mset.mu.Unlock()
+ return
+ }
msgs, mch, qch := mset.mirror.msgs, mset.mirror.msgs.mch, mset.qch
// Set the last seen as now so that we don't fail at the first check.
mset.mirror.last = time.Now() | 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"archive/tar"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/klauspost/compress/s2"
"github.com/nats-io/nuid"
)
// StreamConfig will determine the name, subjects and retention policy
// for a given stream. If subjects is empty the name will be used.
type StreamConfig struct {
Name string `json:"name"`
Subjects []string `json:"subjects,omitempty"`
Retention RetentionPolicy `json:"retention"`
MaxConsumers int `json:"max_consumers"`
MaxMsgs int64 `json:"max_msgs"`
MaxBytes int64 `json:"max_bytes"`
Discard DiscardPolicy `json:"discard"`
MaxAge time.Duration `json:"max_age"`
MaxMsgSize int32 `json:"max_msg_size,omitempty"`
Storage StorageType `json:"storage"`
Replicas int `json:"num_replicas"`
NoAck bool `json:"no_ack,omitempty"`
Template string `json:"template_owner,omitempty"`
Duplicates time.Duration `json:"duplicate_window,omitempty"`
Placement *Placement `json:"placement,omitempty"`
Mirror *StreamSource `json:"mirror,omitempty"`
Sources []*StreamSource `json:"sources,omitempty"`
}
const JSApiPubAckResponseType = "io.nats.jetstream.api.v1.pub_ack_response"
// JSPubAckResponse is a formal response to a publish operation.
type JSPubAckResponse struct {
Error *ApiError `json:"error,omitempty"`
*PubAck
}
// PubAck is the detail you get back from a publish to a stream that was successful.
// e.g. +OK {"stream": "Orders", "seq": 22}
type PubAck struct {
Stream string `json:"stream"`
Sequence uint64 `json:"seq"`
Duplicate bool `json:"duplicate,omitempty"`
}
// StreamInfo shows config and current state for this stream.
type StreamInfo struct {
Config StreamConfig `json:"config"`
Created time.Time `json:"created"`
State StreamState `json:"state"`
Cluster *ClusterInfo `json:"cluster,omitempty"`
Mirror *StreamSourceInfo `json:"mirror,omitempty"`
Sources []*StreamSourceInfo `json:"sources,omitempty"`
}
// ClusterInfo shows information about the underlying set of servers
// that make up the stream or consumer.
type ClusterInfo struct {
Name string `json:"name,omitempty"`
Leader string `json:"leader,omitempty"`
Replicas []*PeerInfo `json:"replicas,omitempty"`
}
// PeerInfo shows information about all the peers in the cluster that
// are supporting the stream or consumer.
type PeerInfo struct {
Name string `json:"name"`
Current bool `json:"current"`
Offline bool `json:"offline,omitempty"`
Active time.Duration `json:"active"`
Lag uint64 `json:"lag,omitempty"`
}
// StreamSourceInfo shows information about an upstream stream source.
type StreamSourceInfo struct {
Name string `json:"name"`
Lag uint64 `json:"lag"`
Active time.Duration `json:"active"`
Error *ApiError `json:"error,omitempty"`
}
// StreamSource dictates how streams can source from other streams.
type StreamSource struct {
Name string `json:"name"`
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
External *ExternalStream `json:"external,omitempty"`
}
// ExternalStream allows you to qualify access to a stream source in another account.
type ExternalStream struct {
ApiPrefix string `json:"api"`
DeliverPrefix string `json:"deliver"`
}
// Stream is a jetstream stream of messages. When we receive a message internally destined
// for a Stream we will direct link from the client to this structure.
type stream struct {
mu sync.RWMutex
js *jetStream
jsa *jsAccount
acc *Account
srv *Server
client *client
sysc *client
sid int
pubAck []byte
outq *jsOutQ
msgs *inbound
store StreamStore
rmch chan uint64
lseq uint64
lmsgId string
consumers map[string]*consumer
numFilter int
cfg StreamConfig
created time.Time
stype StorageType
ddmap map[string]*ddentry
ddarr []*ddentry
ddindex int
ddtmr *time.Timer
qch chan struct{}
active bool
// Mirror
mirror *sourceInfo
// Sources
sources map[string]*sourceInfo
// For flowcontrol processing for source and mirror internal consumers.
fcr map[uint64]string
// TODO(dlc) - Hide everything below behind two pointers.
// Clustered mode.
sa *streamAssignment
node RaftNode
catchup bool
syncSub *subscription
infoSub *subscription
clMu sync.Mutex
clseq uint64
clfs uint64
lqsent time.Time
catchups map[string]uint64
}
type sourceInfo struct {
name string
cname string
sub *subscription
msgs *inbound
sseq uint64
dseq uint64
clseq uint64
lag uint64
err *ApiError
last time.Time
lreq time.Time
grr bool
}
// Headers for published messages.
const (
JSMsgId = "Nats-Msg-Id"
JSExpectedStream = "Nats-Expected-Stream"
JSExpectedLastSeq = "Nats-Expected-Last-Sequence"
JSExpectedLastMsgId = "Nats-Expected-Last-Msg-Id"
JSStreamSource = "Nats-Stream-Source"
JSLastConsumerSeq = "Nats-Last-Consumer"
JSLastStreamSeq = "Nats-Last-Stream"
)
// Dedupe entry
type ddentry struct {
id string
seq uint64
ts int64
}
// Replicas Range
const (
StreamDefaultReplicas = 1
StreamMaxReplicas = 5
)
// AddStream adds a stream for the given account.
func (a *Account) addStream(config *StreamConfig) (*stream, error) {
return a.addStreamWithAssignment(config, nil, nil)
}
// AddStreamWithStore adds a stream for the given account with custome store config options.
func (a *Account) addStreamWithStore(config *StreamConfig, fsConfig *FileStoreConfig) (*stream, error) {
return a.addStreamWithAssignment(config, fsConfig, nil)
}
func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileStoreConfig, sa *streamAssignment) (*stream, error) {
s, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
// If we do not have the stream currently assigned to us in cluster mode we will proceed but warn.
// This can happen on startup with restored state where on meta replay we still do not have
// the assignment. Running in single server mode this always returns true.
if !jsa.streamAssigned(config.Name) {
s.Debugf("Stream '%s > %s' does not seem to be assigned to this server", a.Name, config.Name)
}
// Sensible defaults.
cfg, err := checkStreamCfg(config)
if err != nil {
return nil, err
}
jsa.mu.Lock()
js := jsa.js
if mset, ok := jsa.streams[cfg.Name]; ok {
jsa.mu.Unlock()
// Check to see if configs are same.
ocfg := mset.config()
if reflect.DeepEqual(ocfg, cfg) {
if sa != nil {
mset.setStreamAssignment(sa)
}
return mset, nil
} else {
return nil, ErrJetStreamStreamAlreadyUsed
}
}
// Check for limits.
if err := jsa.checkLimits(&cfg); err != nil {
jsa.mu.Unlock()
return nil, err
}
// Check for template ownership if present.
if cfg.Template != _EMPTY_ && jsa.account != nil {
if !jsa.checkTemplateOwnership(cfg.Template, cfg.Name) {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream not owned by template")
}
}
// Check for mirror designation.
if cfg.Mirror != nil {
// Can't have subjects.
if len(cfg.Subjects) > 0 {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream mirrors can not also contain subjects")
}
if len(cfg.Sources) > 0 {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream mirrors can not also contain other sources")
}
if cfg.Mirror.FilterSubject != _EMPTY_ {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream mirrors can not contain filtered subjects")
}
if cfg.Mirror.OptStartSeq > 0 && cfg.Mirror.OptStartTime != nil {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream mirrors can not have both start seq and start time configured")
}
} else if len(cfg.Subjects) == 0 && len(cfg.Sources) == 0 {
jsa.mu.Unlock()
return nil, fmt.Errorf("stream needs at least one configured subject or mirror")
}
// Check for overlapping subjects. These are not allowed for now.
if jsa.subjectsOverlap(cfg.Subjects) {
jsa.mu.Unlock()
return nil, fmt.Errorf("subjects overlap with an existing stream")
}
// Setup the internal clients.
c := s.createInternalJetStreamClient()
ic := s.createInternalJetStreamClient()
mset := &stream{
acc: a,
jsa: jsa,
cfg: cfg,
js: js,
srv: s,
client: c,
sysc: ic,
stype: cfg.Storage,
consumers: make(map[string]*consumer),
msgs: &inbound{mch: make(chan struct{}, 1)},
rmch: make(chan uint64, 8192),
qch: make(chan struct{}),
}
jsa.streams[cfg.Name] = mset
storeDir := path.Join(jsa.storeDir, streamsDir, cfg.Name)
jsa.mu.Unlock()
// Bind to the user account.
c.registerWithAccount(a)
// Bind to the system account.
ic.registerWithAccount(s.SystemAccount())
// Create the appropriate storage
fsCfg := fsConfig
if fsCfg == nil {
fsCfg = &FileStoreConfig{}
// If we are file based and not explicitly configured
// we may be able to auto-tune based on max msgs or bytes.
if cfg.Storage == FileStorage {
mset.autoTuneFileStorageBlockSize(fsCfg)
}
}
fsCfg.StoreDir = storeDir
fsCfg.AsyncFlush = false
fsCfg.SyncInterval = 2 * time.Minute
if err := mset.setupStore(fsCfg); err != nil {
mset.stop(true, false)
return nil, err
}
// Create our pubAck template here. Better than json marshal each time on success.
b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: cfg.Name, Sequence: math.MaxUint64}})
end := bytes.Index(b, []byte(strconv.FormatUint(math.MaxUint64, 10)))
// We need to force cap here to make sure this is a copy when sending a response.
mset.pubAck = b[:end:end]
// Rebuild dedupe as needed.
mset.rebuildDedupe()
// Set our stream assignment if in clustered mode.
if sa != nil {
mset.setStreamAssignment(sa)
}
// Setup our internal send go routine.
mset.setupSendCapabilities()
// Call directly to set leader if not in clustered mode.
// This can be called though before we actually setup clustering, so check both.
if !s.JetStreamIsClustered() && s.standAloneMode() {
if err := mset.setLeader(true); err != nil {
mset.stop(true, false)
return nil, err
}
}
// This is always true in single server mode.
mset.mu.RLock()
isLeader := mset.isLeader()
mset.mu.RUnlock()
if isLeader {
// Send advisory.
var suppress bool
if !s.standAloneMode() && sa == nil {
suppress = true
} else if sa != nil {
suppress = sa.responded
}
if !suppress {
mset.sendCreateAdvisory()
}
}
return mset, nil
}
func (mset *stream) streamAssignment() *streamAssignment {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.sa
}
func (mset *stream) setStreamAssignment(sa *streamAssignment) {
mset.mu.Lock()
defer mset.mu.Unlock()
mset.sa = sa
if sa == nil {
return
}
// Set our node.
mset.node = sa.Group.node
// Setup our info sub here as well for all stream members. This is now by design.
if mset.infoSub == nil {
isubj := fmt.Sprintf(clusterStreamInfoT, mset.jsa.acc(), mset.cfg.Name)
// Note below the way we subscribe here is so that we can send requests to ourselves.
mset.infoSub, _ = mset.srv.systemSubscribe(isubj, _EMPTY_, false, mset.sysc, mset.handleClusterStreamInfoRequest)
}
}
// Lock should be held.
func (mset *stream) isLeader() bool {
if mset.node != nil {
return mset.node.Leader()
}
return true
}
// TODO(dlc) - Check to see if we can accept being the leader or we should should step down.
func (mset *stream) setLeader(isLeader bool) error {
mset.mu.Lock()
// If we are here we have a change in leader status.
if isLeader {
// Make sure we are listening for sync requests.
// TODO(dlc) - Original design was that all in sync members of the group would do DQ.
mset.startClusterSubs()
// Setup subscriptions
if err := mset.subscribeToStream(); err != nil {
mset.mu.Unlock()
return err
}
} else {
// Stop responding to sync requests.
mset.stopClusterSubs()
// Unsubscribe from direct stream.
mset.unsubscribeToStream()
// Clear catchup state
mset.clearAllCatchupPeers()
}
mset.mu.Unlock()
return nil
}
// Lock should be held.
func (mset *stream) startClusterSubs() {
if mset.isClustered() && mset.syncSub == nil {
mset.syncSub, _ = mset.srv.systemSubscribe(mset.sa.Sync, _EMPTY_, false, mset.sysc, mset.handleClusterSyncRequest)
}
}
// Lock should be held.
func (mset *stream) stopClusterSubs() {
if mset.syncSub != nil {
mset.srv.sysUnsubscribe(mset.syncSub)
mset.syncSub = nil
}
}
// account gets the account for this stream.
func (mset *stream) account() *Account {
mset.mu.RLock()
jsa := mset.jsa
mset.mu.RUnlock()
if jsa == nil {
return nil
}
return jsa.acc()
}
// Helper to determine the max msg size for this stream if file based.
func (mset *stream) maxMsgSize() uint64 {
maxMsgSize := mset.cfg.MaxMsgSize
if maxMsgSize <= 0 {
// Pull from the account.
if mset.jsa != nil {
if acc := mset.jsa.acc(); acc != nil {
acc.mu.RLock()
maxMsgSize = acc.mpay
acc.mu.RUnlock()
}
}
// If all else fails use default.
if maxMsgSize <= 0 {
maxMsgSize = MAX_PAYLOAD_SIZE
}
}
// Now determine an estimation for the subjects etc.
maxSubject := -1
for _, subj := range mset.cfg.Subjects {
if subjectIsLiteral(subj) {
if len(subj) > maxSubject {
maxSubject = len(subj)
}
}
}
if maxSubject < 0 {
const defaultMaxSubject = 256
maxSubject = defaultMaxSubject
}
// filestore will add in estimates for record headers, etc.
return fileStoreMsgSizeEstimate(maxSubject, int(maxMsgSize))
}
// If we are file based and the file storage config was not explicitly set
// we can autotune block sizes to better match. Our target will be to store 125%
// of the theoretical limit. We will round up to nearest 100 bytes as well.
func (mset *stream) autoTuneFileStorageBlockSize(fsCfg *FileStoreConfig) {
var totalEstSize uint64
// MaxBytes will take precedence for now.
if mset.cfg.MaxBytes > 0 {
totalEstSize = uint64(mset.cfg.MaxBytes)
} else if mset.cfg.MaxMsgs > 0 {
// Determine max message size to estimate.
totalEstSize = mset.maxMsgSize() * uint64(mset.cfg.MaxMsgs)
} else {
// If nothing set will let underlying filestore determine blkSize.
return
}
blkSize := (totalEstSize / 4) + 1 // (25% overhead)
// Round up to nearest 100
if m := blkSize % 100; m != 0 {
blkSize += 100 - m
}
if blkSize < FileStoreMinBlkSize {
blkSize = FileStoreMinBlkSize
}
if blkSize > FileStoreMaxBlkSize {
blkSize = FileStoreMaxBlkSize
}
fsCfg.BlockSize = uint64(blkSize)
}
// rebuildDedupe will rebuild any dedupe structures needed after recovery of a stream.
// TODO(dlc) - Might be good to know if this should be checked at all for streams with no
// headers and msgId in them. Would need signaling from the storage layer.
func (mset *stream) rebuildDedupe() {
state := mset.store.State()
mset.lseq = state.LastSeq
// We have some messages. Lookup starting sequence by duplicate time window.
sseq := mset.store.GetSeqFromTime(time.Now().Add(-mset.cfg.Duplicates))
if sseq == 0 {
return
}
for seq := sseq; seq <= state.LastSeq; seq++ {
_, hdr, _, ts, err := mset.store.LoadMsg(seq)
var msgId string
if err == nil && len(hdr) > 0 {
if msgId = getMsgId(hdr); msgId != _EMPTY_ {
mset.storeMsgId(&ddentry{msgId, seq, ts})
}
}
if seq == state.LastSeq {
mset.lmsgId = msgId
}
}
}
func (mset *stream) lastSeq() uint64 {
mset.mu.RLock()
lseq := mset.lseq
mset.mu.RUnlock()
return lseq
}
func (mset *stream) setLastSeq(lseq uint64) {
mset.mu.Lock()
mset.lseq = lseq
mset.mu.Unlock()
}
func (mset *stream) sendCreateAdvisory() {
mset.mu.Lock()
name := mset.cfg.Name
template := mset.cfg.Template
outq := mset.outq
mset.mu.Unlock()
if outq == nil {
return
}
// finally send an event that this stream was created
m := JSStreamActionAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: name,
Action: CreateEvent,
Template: template,
}
j, err := json.Marshal(m)
if err != nil {
return
}
subj := JSAdvisoryStreamCreatedPre + "." + name
outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil})
}
func (mset *stream) sendDeleteAdvisoryLocked() {
if mset.outq == nil {
return
}
m := JSStreamActionAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: mset.cfg.Name,
Action: DeleteEvent,
Template: mset.cfg.Template,
}
j, err := json.Marshal(m)
if err == nil {
subj := JSAdvisoryStreamDeletedPre + "." + mset.cfg.Name
mset.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil})
}
}
func (mset *stream) sendUpdateAdvisoryLocked() {
if mset.outq == nil {
return
}
m := JSStreamActionAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: mset.cfg.Name,
Action: ModifyEvent,
}
j, err := json.Marshal(m)
if err == nil {
subj := JSAdvisoryStreamUpdatedPre + "." + mset.cfg.Name
mset.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil})
}
}
// Created returns created time.
func (mset *stream) createdTime() time.Time {
mset.mu.RLock()
created := mset.created
mset.mu.RUnlock()
return created
}
// Internal to allow creation time to be restored.
func (mset *stream) setCreatedTime(created time.Time) {
mset.mu.Lock()
mset.created = created
mset.mu.Unlock()
}
// Check to see if these subjects overlap with existing subjects.
// Lock should be held.
func (jsa *jsAccount) subjectsOverlap(subjects []string) bool {
for _, mset := range jsa.streams {
for _, subj := range mset.cfg.Subjects {
for _, tsubj := range subjects {
if SubjectsCollide(tsubj, subj) {
return true
}
}
}
}
return false
}
// Default duplicates window.
const StreamDefaultDuplicatesWindow = 2 * time.Minute
func checkStreamCfg(config *StreamConfig) (StreamConfig, error) {
if config == nil {
return StreamConfig{}, fmt.Errorf("stream configuration invalid")
}
if !isValidName(config.Name) {
return StreamConfig{}, fmt.Errorf("stream name is required and can not contain '.', '*', '>'")
}
if len(config.Name) > JSMaxNameLen {
return StreamConfig{}, fmt.Errorf("stream name is too long, maximum allowed is %d", JSMaxNameLen)
}
cfg := *config
// Make file the default.
if cfg.Storage == 0 {
cfg.Storage = FileStorage
}
if cfg.Replicas == 0 {
cfg.Replicas = 1
}
if cfg.Replicas > StreamMaxReplicas {
return cfg, fmt.Errorf("maximum replicas is %d", StreamMaxReplicas)
}
if cfg.MaxMsgs == 0 {
cfg.MaxMsgs = -1
}
if cfg.MaxBytes == 0 {
cfg.MaxBytes = -1
}
if cfg.MaxMsgSize == 0 {
cfg.MaxMsgSize = -1
}
if cfg.MaxConsumers == 0 {
cfg.MaxConsumers = -1
}
if cfg.Duplicates == 0 {
if cfg.MaxAge != 0 && cfg.MaxAge < StreamDefaultDuplicatesWindow {
cfg.Duplicates = cfg.MaxAge
} else {
cfg.Duplicates = StreamDefaultDuplicatesWindow
}
} else if cfg.Duplicates < 0 {
return StreamConfig{}, fmt.Errorf("duplicates window can not be negative")
}
// Check that duplicates is not larger then age if set.
if cfg.MaxAge != 0 && cfg.Duplicates > cfg.MaxAge {
return StreamConfig{}, fmt.Errorf("duplicates window can not be larger then max age")
}
if len(cfg.Subjects) == 0 {
if cfg.Mirror == nil && len(cfg.Sources) == 0 {
cfg.Subjects = append(cfg.Subjects, cfg.Name)
}
} else {
// We can allow overlaps, but don't allow direct duplicates.
dset := make(map[string]struct{}, len(cfg.Subjects))
for _, subj := range cfg.Subjects {
if _, ok := dset[subj]; ok {
return StreamConfig{}, fmt.Errorf("duplicate subjects detected")
}
// Also check to make sure we do not overlap with our $JS API subjects.
if subjectIsSubsetMatch(subj, "$JS.API.>") {
return StreamConfig{}, fmt.Errorf("subjects overlap with jetstream api")
}
dset[subj] = struct{}{}
}
}
return cfg, nil
}
// Config returns the stream's configuration.
func (mset *stream) config() StreamConfig {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.cfg
}
func (mset *stream) fileStoreConfig() (FileStoreConfig, error) {
mset.mu.Lock()
defer mset.mu.Unlock()
fs, ok := mset.store.(*fileStore)
if !ok {
return FileStoreConfig{}, ErrStoreWrongType
}
return fs.fileStoreConfig(), nil
}
func (jsa *jsAccount) configUpdateCheck(old, new *StreamConfig) (*StreamConfig, error) {
cfg, err := checkStreamCfg(new)
if err != nil {
return nil, err
}
// Name must match.
if cfg.Name != old.Name {
return nil, fmt.Errorf("stream configuration name must match original")
}
// Can't change MaxConsumers for now.
if cfg.MaxConsumers != old.MaxConsumers {
return nil, fmt.Errorf("stream configuration update can not change MaxConsumers")
}
// Can't change storage types.
if cfg.Storage != old.Storage {
return nil, fmt.Errorf("stream configuration update can not change storage type")
}
// Can't change retention.
if cfg.Retention != old.Retention {
return nil, fmt.Errorf("stream configuration update can not change retention policy")
}
// Can not have a template owner for now.
if old.Template != _EMPTY_ {
return nil, fmt.Errorf("stream configuration update not allowed on template owned stream")
}
if cfg.Template != _EMPTY_ {
return nil, fmt.Errorf("stream configuration update can not be owned by a template")
}
// Check limits.
if err := jsa.checkLimits(&cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// Update will allow certain configuration properties of an existing stream to be updated.
func (mset *stream) update(config *StreamConfig) error {
ocfg := mset.config()
cfg, err := mset.jsa.configUpdateCheck(&ocfg, config)
if err != nil {
return err
}
mset.mu.Lock()
if mset.isLeader() {
// Now check for subject interest differences.
current := make(map[string]struct{}, len(ocfg.Subjects))
for _, s := range ocfg.Subjects {
current[s] = struct{}{}
}
// Update config with new values. The store update will enforce any stricter limits.
// Now walk new subjects. All of these need to be added, but we will check
// the originals first, since if it is in there we can skip, already added.
for _, s := range cfg.Subjects {
if _, ok := current[s]; !ok {
if _, err := mset.subscribeInternal(s, mset.processInboundJetStreamMsg); err != nil {
mset.mu.Unlock()
return err
}
}
delete(current, s)
}
// What is left in current needs to be deleted.
for s := range current {
if err := mset.unsubscribeInternal(s); err != nil {
mset.mu.Unlock()
return err
}
}
// Check for the Duplicates
if cfg.Duplicates != ocfg.Duplicates && mset.ddtmr != nil {
// Let it fire right away, it will adjust properly on purge.
mset.ddtmr.Reset(time.Microsecond)
}
// Check for Sources.
if len(cfg.Sources) > 0 || len(ocfg.Sources) > 0 {
current := make(map[string]struct{})
for _, s := range ocfg.Sources {
current[s.Name] = struct{}{}
}
for _, s := range cfg.Sources {
if _, ok := current[s.Name]; !ok {
if mset.sources == nil {
mset.sources = make(map[string]*sourceInfo)
}
mset.cfg.Sources = append(mset.cfg.Sources, s)
si := &sourceInfo{name: s.Name, msgs: &inbound{mch: make(chan struct{}, 1)}}
mset.sources[s.Name] = si
mset.setStartingSequenceForSource(s.Name)
mset.setSourceConsumer(s.Name, si.sseq+1)
}
delete(current, s.Name)
}
// What is left in current needs to be deleted.
for sname := range current {
mset.cancelSourceConsumer(sname)
delete(mset.sources, sname)
}
}
}
// Now update config and store's version of our config.
mset.cfg = *cfg
var suppress bool
if mset.isClustered() && mset.sa != nil {
suppress = mset.sa.responded
}
if mset.isLeader() && !suppress {
mset.sendUpdateAdvisoryLocked()
}
mset.mu.Unlock()
mset.store.UpdateConfig(cfg)
return nil
}
// Purge will remove all messages from the stream and underlying store.
func (mset *stream) purge() (uint64, error) {
mset.mu.Lock()
if mset.client == nil {
mset.mu.Unlock()
return 0, errors.New("stream closed")
}
// Purge dedupe.
mset.ddmap = nil
var _obs [4]*consumer
obs := _obs[:0]
for _, o := range mset.consumers {
obs = append(obs, o)
}
mset.mu.Unlock()
purged, err := mset.store.Purge()
if err != nil {
return purged, err
}
var state StreamState
mset.store.FastState(&state)
for _, o := range obs {
o.purge(state.FirstSeq)
}
return purged, nil
}
// RemoveMsg will remove a message from a stream.
// FIXME(dlc) - Should pick one and be consistent.
func (mset *stream) removeMsg(seq uint64) (bool, error) {
return mset.deleteMsg(seq)
}
// DeleteMsg will remove a message from a stream.
func (mset *stream) deleteMsg(seq uint64) (bool, error) {
mset.mu.RLock()
if mset.client == nil {
mset.mu.RUnlock()
return false, fmt.Errorf("invalid stream")
}
mset.mu.RUnlock()
return mset.store.RemoveMsg(seq)
}
// EraseMsg will securely remove a message and rewrite the data with random data.
func (mset *stream) eraseMsg(seq uint64) (bool, error) {
mset.mu.RLock()
if mset.client == nil {
mset.mu.RUnlock()
return false, fmt.Errorf("invalid stream")
}
mset.mu.RUnlock()
return mset.store.EraseMsg(seq)
}
// Are we a mirror?
func (mset *stream) isMirror() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.cfg.Mirror != nil
}
func (mset *stream) hasSources() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return len(mset.sources) > 0
}
func (mset *stream) sourcesInfo() (sis []*StreamSourceInfo) {
mset.mu.RLock()
defer mset.mu.RUnlock()
for _, si := range mset.sources {
sis = append(sis, mset.sourceInfo(si))
}
return sis
}
func (mset *stream) allSubjects() ([]string, bool) {
subjects, cfg, acc := mset.subjects(), mset.config(), mset.account()
var hasExt bool
var seen map[string]bool
if cfg.Mirror != nil {
var subjs []string
seen = make(map[string]bool)
subjs, hasExt = acc.streamSourceSubjects(cfg.Mirror, seen)
if len(subjs) > 0 {
subjects = append(subjects, subjs...)
}
} else if len(cfg.Sources) > 0 {
var subjs []string
seen = make(map[string]bool)
for _, si := range cfg.Sources {
subjs, hasExt = acc.streamSourceSubjects(si, seen)
if len(subjs) > 0 {
subjects = append(subjects, subjs...)
}
}
}
return subjects, hasExt
}
// Return the subjects for a stream source.
func (a *Account) streamSourceSubjects(ss *StreamSource, seen map[string]bool) (subjects []string, hasExt bool) {
if ss != nil && ss.External != nil {
return nil, true
}
s, js, _ := a.getJetStreamFromAccount()
if !s.JetStreamIsClustered() {
return a.streamSourceSubjectsNotClustered(ss.Name, seen)
} else {
return js.streamSourceSubjectsClustered(a.Name, ss.Name, seen)
}
}
func (js *jetStream) streamSourceSubjectsClustered(accountName, streamName string, seen map[string]bool) (subjects []string, hasExt bool) {
if seen[streamName] {
return nil, false
}
// We are clustered here so need to work through stream assignments.
sa := js.streamAssignment(accountName, streamName)
if sa == nil {
return nil, false
}
seen[streamName] = true
js.mu.RLock()
cfg := sa.Config
if len(cfg.Subjects) > 0 {
subjects = append(subjects, cfg.Subjects...)
}
// Check if we need to keep going.
var sources []*StreamSource
if cfg.Mirror != nil {
sources = append(sources, cfg.Mirror)
} else if len(cfg.Sources) > 0 {
sources = append(sources, cfg.Sources...)
}
js.mu.RUnlock()
if len(sources) > 0 {
var subjs []string
if acc, err := js.srv.lookupAccount(accountName); err == nil {
for _, ss := range sources {
subjs, hasExt = acc.streamSourceSubjects(ss, seen)
if len(subjs) > 0 {
subjects = append(subjects, subjs...)
}
if hasExt {
break
}
}
}
}
return subjects, hasExt
}
func (a *Account) streamSourceSubjectsNotClustered(streamName string, seen map[string]bool) (subjects []string, hasExt bool) {
if seen[streamName] {
return nil, false
}
mset, err := a.lookupStream(streamName)
if err != nil {
return nil, false
}
seen[streamName] = true
cfg := mset.config()
if len(cfg.Subjects) > 0 {
subjects = append(subjects, cfg.Subjects...)
}
var subjs []string
if cfg.Mirror != nil {
subjs, hasExt = a.streamSourceSubjects(cfg.Mirror, seen)
if len(subjs) > 0 {
subjects = append(subjects, subjs...)
}
} else if len(cfg.Sources) > 0 {
for _, si := range cfg.Sources {
subjs, hasExt = a.streamSourceSubjects(si, seen)
if len(subjs) > 0 {
subjects = append(subjects, subjs...)
}
if hasExt {
break
}
}
}
return subjects, hasExt
}
// Lock should be held
func (mset *stream) sourceInfo(si *sourceInfo) *StreamSourceInfo {
if si == nil {
return nil
}
return &StreamSourceInfo{Name: si.name, Lag: si.lag, Active: time.Since(si.last), Error: si.err}
}
// Return our source info for our mirror.
func (mset *stream) mirrorInfo() *StreamSourceInfo {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.sourceInfo(mset.mirror)
}
const sourceHealthCheckInterval = 2 * time.Second
// Will run as a Go routine to process mirror consumer messages.
func (mset *stream) processMirrorMsgs() {
s := mset.srv
defer s.grWG.Done()
defer func() {
mset.mu.Lock()
if mset.mirror != nil {
mset.mirror.grr = false
}
mset.mu.Unlock()
}()
// Grab stream quit channel.
mset.mu.Lock()
msgs, mch, qch := mset.mirror.msgs, mset.mirror.msgs.mch, mset.qch
// Set the last seen as now so that we don't fail at the first check.
mset.mirror.last = time.Now()
mset.mu.Unlock()
t := time.NewTicker(sourceHealthCheckInterval)
defer t.Stop()
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case <-mch:
for im := mset.pending(msgs); im != nil; im = im.next {
if !mset.processInboundMirrorMsg(im) {
break
}
}
case <-t.C:
mset.mu.RLock()
stalled := mset.mirror != nil && time.Since(mset.mirror.last) > 3*sourceHealthCheckInterval
mset.mu.RUnlock()
if stalled {
mset.retryMirrorConsumer()
}
}
}
}
// Checks that the message is from our current direct consumer. We can not depend on sub comparison
// since cross account imports break.
func (si *sourceInfo) isCurrentSub(reply string) bool {
return si.cname != _EMPTY_ && strings.HasPrefix(reply, jsAckPre) && si.cname == tokenAt(reply, 4)
}
// processInboundMirrorMsg handles processing messages bound for a stream.
func (mset *stream) processInboundMirrorMsg(m *inMsg) bool {
mset.mu.Lock()
if mset.mirror == nil {
mset.mu.Unlock()
return false
}
if !mset.isLeader() {
mset.mu.Unlock()
mset.cancelMirrorConsumer()
return false
}
isControl := m.isControlMsg()
// Ignore from old subscriptions.
// The reason we can not just compare subs is that on cross account imports they will not match.
if !mset.mirror.isCurrentSub(m.rply) && !isControl {
mset.mu.Unlock()
return false
}
mset.mirror.last = time.Now()
node := mset.node
// Check for heartbeats and flow control messages.
if isControl {
var needsRetry bool
// Flow controls have reply subjects.
if m.rply != _EMPTY_ {
mset.handleFlowControl(mset.mirror, m)
} else {
// For idle heartbeats make sure we did not miss anything.
if ldseq := parseInt64(getHeader(JSLastConsumerSeq, m.hdr)); ldseq > 0 && uint64(ldseq) != mset.mirror.dseq {
needsRetry = true
}
}
mset.mu.Unlock()
if needsRetry {
mset.retryMirrorConsumer()
}
return !needsRetry
}
sseq, dseq, dc, ts, pending := replyInfo(m.rply)
if dc > 1 {
mset.mu.Unlock()
return false
}
// Mirror info tracking.
olag, osseq, odseq, oclseq := mset.mirror.lag, mset.mirror.sseq, mset.mirror.dseq, mset.mirror.clseq
if sseq == mset.mirror.sseq+1 {
mset.mirror.dseq = dseq
mset.mirror.sseq++
} else if sseq <= mset.mirror.sseq {
// Ignore older messages.
mset.mu.Unlock()
return true
} else if mset.mirror.cname == _EMPTY_ {
mset.mirror.cname = tokenAt(m.rply, 4)
mset.mirror.dseq, mset.mirror.sseq = dseq, sseq
} else {
// If the deliver sequence matches then the upstream stream has expired or deleted messages.
if dseq == mset.mirror.dseq+1 {
mset.skipMsgs(mset.mirror.sseq+1, sseq-1)
mset.mirror.dseq++
mset.mirror.sseq = sseq
} else {
mset.mu.Unlock()
mset.retryMirrorConsumer()
return false
}
}
if pending == 0 {
mset.mirror.lag = 0
} else {
mset.mirror.lag = pending - 1
}
mset.mirror.clseq = sseq - 1
js, stype := mset.js, mset.cfg.Storage
mset.mu.Unlock()
s := mset.srv
var err error
if node != nil {
if js.limitsExceeded(stype) {
s.resourcesExeededError()
err = ErrJetStreamResourcesExceeded
} else {
err = node.Propose(encodeStreamMsg(m.subj, _EMPTY_, m.hdr, m.msg, sseq-1, ts))
}
} else {
err = mset.processJetStreamMsg(m.subj, _EMPTY_, m.hdr, m.msg, sseq-1, ts)
}
if err != nil {
if err == errLastSeqMismatch {
// We may have missed messages, restart.
if sseq <= mset.lastSeq() {
mset.mu.Lock()
mset.mirror.lag = olag
mset.mirror.sseq = osseq
mset.mirror.dseq = odseq
mset.mirror.clseq = oclseq
mset.mu.Unlock()
return false
} else {
mset.mu.Lock()
mset.mirror.dseq = odseq
mset.mirror.sseq = osseq
mset.mu.Unlock()
mset.retryMirrorConsumer()
}
} else {
s.Warnf("Got error processing JetStream mirror msg: %v", err)
}
if strings.Contains(err.Error(), "no space left") {
s.Errorf("JetStream out of space, will be DISABLED")
s.DisableJetStream()
}
}
return err == nil
}
func (mset *stream) setMirrorErr(err *ApiError) {
mset.mu.Lock()
if mset.mirror != nil {
mset.mirror.err = err
}
mset.mu.Unlock()
}
func (mset *stream) cancelMirrorConsumer() {
mset.mu.Lock()
defer mset.mu.Unlock()
if mset.mirror == nil {
return
}
if mset.mirror.sub != nil {
mset.unsubscribe(mset.mirror.sub)
mset.mirror.sub = nil
}
mset.removeInternalConsumer(mset.mirror)
}
func (mset *stream) retryMirrorConsumer() error {
mset.mu.Lock()
defer mset.mu.Unlock()
mset.srv.Debugf("Retrying mirror consumer for '%s > %s'", mset.acc.Name, mset.cfg.Name)
return mset.setupMirrorConsumer()
}
// Lock should be held.
func (mset *stream) skipMsgs(start, end uint64) {
node, store := mset.node, mset.store
var entries []*Entry
for seq := start; seq <= end; seq++ {
if node != nil {
entries = append(entries, &Entry{EntryNormal, encodeStreamMsg(_EMPTY_, _EMPTY_, nil, nil, seq-1, 0)})
// So a single message does not get too big.
if len(entries) > 10_000 {
node.ProposeDirect(entries)
entries = entries[:0]
}
} else {
mset.lseq = store.SkipMsg()
}
}
// Send all at once.
if node != nil && len(entries) > 0 {
node.ProposeDirect(entries)
}
}
// Setup our mirror consumer.
// Lock should be held.
func (mset *stream) setupMirrorConsumer() error {
if mset.outq == nil {
return errors.New("outq required")
}
isReset := mset.mirror != nil
// Reset
if isReset {
if mset.mirror.sub != nil {
mset.unsubscribe(mset.mirror.sub)
mset.mirror.sub = nil
mset.mirror.dseq = 0
mset.mirror.sseq = mset.lseq
}
// Make sure to delete any prior consumers if we know about them.
mset.removeInternalConsumer(mset.mirror)
// If we are no longer the leader stop trying.
if !mset.isLeader() {
return nil
}
}
// Determine subjects etc.
var deliverSubject string
ext := mset.cfg.Mirror.External
if ext != nil {
deliverSubject = strings.ReplaceAll(ext.DeliverPrefix+syncSubject(".M"), "..", ".")
} else {
deliverSubject = syncSubject("$JS.M")
}
if !isReset {
mset.mirror = &sourceInfo{name: mset.cfg.Mirror.Name, msgs: &inbound{mch: make(chan struct{}, 1)}}
}
if !mset.mirror.grr {
mset.mirror.grr = true
mset.srv.startGoRoutine(func() { mset.processMirrorMsgs() })
}
// We want to throttle here in terms of how fast we request new consumers.
if time.Since(mset.mirror.lreq) < 2*time.Second {
return nil
}
mset.mirror.lreq = time.Now()
// Now send off request to create/update our consumer. This will be all API based even in single server mode.
// We calculate durable names apriori so we do not need to save them off.
var state StreamState
mset.store.FastState(&state)
req := &CreateConsumerRequest{
Stream: mset.cfg.Mirror.Name,
Config: ConsumerConfig{
DeliverSubject: deliverSubject,
DeliverPolicy: DeliverByStartSequence,
OptStartSeq: state.LastSeq + 1,
AckPolicy: AckNone,
AckWait: 48 * time.Hour,
MaxDeliver: 1,
Heartbeat: sourceHealthCheckInterval,
FlowControl: true,
Direct: true,
},
}
// Only use start optionals on first time.
if state.Msgs == 0 && state.FirstSeq == 0 {
req.Config.OptStartSeq = 0
if mset.cfg.Mirror.OptStartSeq > 0 {
req.Config.OptStartSeq = mset.cfg.Mirror.OptStartSeq
} else if mset.cfg.Mirror.OptStartTime != nil {
req.Config.OptStartTime = mset.cfg.Mirror.OptStartTime
req.Config.DeliverPolicy = DeliverByStartTime
}
}
if req.Config.OptStartSeq == 0 && req.Config.OptStartTime == nil {
// If starting out and lastSeq is 0.
req.Config.DeliverPolicy = DeliverAll
}
respCh := make(chan *JSApiConsumerCreateResponse, 1)
reply := infoReplySubject()
crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, subject, reply string, rmsg []byte) {
mset.unsubscribe(sub)
_, msg := c.msgParts(rmsg)
var ccr JSApiConsumerCreateResponse
if err := json.Unmarshal(msg, &ccr); err != nil {
c.Warnf("JetStream bad mirror consumer create response: %q", msg)
mset.cancelMirrorConsumer()
mset.setMirrorErr(jsInvalidJSONErr)
return
}
respCh <- &ccr
})
b, _ := json.Marshal(req)
subject := fmt.Sprintf(JSApiConsumerCreateT, mset.cfg.Mirror.Name)
if ext != nil {
subject = strings.Replace(subject, JSApiPrefix, ext.ApiPrefix, 1)
subject = strings.ReplaceAll(subject, "..", ".")
}
mset.outq.send(&jsPubMsg{subject, _EMPTY_, reply, nil, b, nil, 0, nil})
go func() {
select {
case ccr := <-respCh:
if ccr.Error != nil || ccr.ConsumerInfo == nil {
mset.cancelMirrorConsumer()
} else {
mset.mu.Lock()
// Mirror config has been removed.
if mset.mirror == nil {
mset.mu.Unlock()
mset.cancelMirrorConsumer()
return
}
// When an upstream stream expires messages or in general has messages that we want
// that are no longer available we need to adjust here.
var state StreamState
mset.store.FastState(&state)
// Check if we need to skip messages.
if state.LastSeq != ccr.ConsumerInfo.Delivered.Stream {
mset.skipMsgs(state.LastSeq+1, ccr.ConsumerInfo.Delivered.Stream)
}
// Capture consumer name.
mset.mirror.cname = ccr.ConsumerInfo.Name
msgs := mset.mirror.msgs
// Process inbound mirror messages from the wire.
sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, subject, reply string, rmsg []byte) {
hdr, msg := c.msgParts(append(rmsg[:0:0], rmsg...)) // Need to copy.
mset.queueInbound(msgs, subject, reply, hdr, msg)
})
if err != nil {
mset.mirror.err = jsError(err)
mset.mirror.sub = nil
mset.mirror.cname = _EMPTY_
} else {
mset.mirror.err = nil
mset.mirror.sub = sub
mset.mirror.last = time.Now()
mset.mirror.dseq = 0
mset.mirror.sseq = ccr.ConsumerInfo.Delivered.Stream
}
mset.mu.Unlock()
}
mset.setMirrorErr(ccr.Error)
case <-time.After(10 * time.Second):
mset.unsubscribe(crSub)
return
}
}()
return nil
}
func (mset *stream) streamSource(sname string) *StreamSource {
for _, ssi := range mset.cfg.Sources {
if ssi.Name == sname {
return ssi
}
}
return nil
}
func (mset *stream) retrySourceConsumer(sname string) {
mset.mu.Lock()
defer mset.mu.Unlock()
si := mset.sources[sname]
if si == nil {
return
}
mset.setStartingSequenceForSource(sname)
seq := si.sseq + 1
mset.retrySourceConsumerAtSeq(sname, seq)
}
// Lock should be held.
func (mset *stream) retrySourceConsumerAtSeq(sname string, seq uint64) {
if mset.client == nil {
return
}
s := mset.srv
s.Debugf("Retrying source consumer for '%s > %s'", mset.acc.Name, mset.cfg.Name)
// No longer configured.
if si := mset.sources[sname]; si == nil {
return
}
mset.setSourceConsumer(sname, seq)
}
// Locl should be held.
func (mset *stream) cancelSourceConsumer(sname string) {
if si := mset.sources[sname]; si != nil && si.sub != nil {
mset.unsubscribe(si.sub)
si.sub = nil
si.sseq, si.dseq = 0, 0
mset.removeInternalConsumer(si)
}
}
// Lock should be held.
func (mset *stream) setSourceConsumer(sname string, seq uint64) {
si := mset.sources[sname]
if si == nil {
return
}
if si.sub != nil {
mset.unsubscribe(si.sub)
si.sub = nil
}
// Need to delete the old one.
mset.removeInternalConsumer(si)
si.sseq, si.dseq = seq, 0
si.last = time.Now()
ssi := mset.streamSource(sname)
// Determine subjects etc.
var deliverSubject string
ext := ssi.External
if ext != nil {
deliverSubject = strings.ReplaceAll(ext.DeliverPrefix+syncSubject(".S"), "..", ".")
} else {
deliverSubject = syncSubject("$JS.S")
}
if !si.grr {
si.grr = true
mset.srv.startGoRoutine(func() { mset.processSourceMsgs(si) })
}
// We want to throttle here in terms of how fast we request new consumers.
if time.Since(si.lreq) < 2*time.Second {
return
}
si.lreq = time.Now()
req := &CreateConsumerRequest{
Stream: sname,
Config: ConsumerConfig{
DeliverSubject: deliverSubject,
AckPolicy: AckNone,
AckWait: 48 * time.Hour,
MaxDeliver: 1,
Heartbeat: sourceHealthCheckInterval,
FlowControl: true,
Direct: true,
},
}
// If starting, check any configs.
if seq <= 1 {
if ssi.OptStartSeq > 0 {
req.Config.OptStartSeq = ssi.OptStartSeq
req.Config.DeliverPolicy = DeliverByStartSequence
} else if ssi.OptStartTime != nil {
req.Config.OptStartTime = ssi.OptStartTime
req.Config.DeliverPolicy = DeliverByStartTime
}
} else {
req.Config.OptStartSeq = seq
req.Config.DeliverPolicy = DeliverByStartSequence
}
// Filters
if ssi.FilterSubject != _EMPTY_ {
req.Config.FilterSubject = ssi.FilterSubject
}
respCh := make(chan *JSApiConsumerCreateResponse, 1)
reply := infoReplySubject()
crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, subject, reply string, rmsg []byte) {
mset.unsubscribe(sub)
_, msg := c.msgParts(rmsg)
var ccr JSApiConsumerCreateResponse
if err := json.Unmarshal(msg, &ccr); err != nil {
c.Warnf("JetStream bad source consumer create response: %q", msg)
return
}
respCh <- &ccr
})
b, _ := json.Marshal(req)
subject := fmt.Sprintf(JSApiConsumerCreateT, sname)
if ext != nil {
subject = strings.Replace(subject, JSApiPrefix, ext.ApiPrefix, 1)
subject = strings.ReplaceAll(subject, "..", ".")
}
mset.outq.send(&jsPubMsg{subject, _EMPTY_, reply, nil, b, nil, 0, nil})
go func() {
select {
case ccr := <-respCh:
mset.mu.Lock()
if si := mset.sources[sname]; si != nil {
si.err = nil
if ccr.Error != nil || ccr.ConsumerInfo == nil {
mset.srv.Warnf("JetStream error response for create source consumer: %+v", ccr.Error)
si.err = ccr.Error
// We will retry every 10 seconds or so
mset.cancelSourceConsumer(sname)
} else {
if si.sseq != ccr.ConsumerInfo.Delivered.Stream {
si.sseq = ccr.ConsumerInfo.Delivered.Stream + 1
}
// Capture consumer name.
si.cname = ccr.ConsumerInfo.Name
// Now create sub to receive messages.
sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, subject, reply string, rmsg []byte) {
hdr, msg := c.msgParts(append(rmsg[:0:0], rmsg...)) // Need to copy.
mset.queueInbound(si.msgs, subject, reply, hdr, msg)
})
if err != nil {
si.err = jsError(err)
si.sub = nil
} else {
si.err = nil
si.sub = sub
si.last = time.Now()
}
}
}
mset.mu.Unlock()
case <-time.After(10 * time.Second):
mset.unsubscribe(crSub)
return
}
}()
}
func (mset *stream) processSourceMsgs(si *sourceInfo) {
s := mset.srv
defer s.grWG.Done()
defer func() {
mset.mu.Lock()
si.grr = false
mset.mu.Unlock()
}()
if si == nil {
return
}
// Grab stream quit channel.
mset.mu.Lock()
msgs, mch, qch := si.msgs, si.msgs.mch, mset.qch
// Set the last seen as now so that we don't fail at the first check.
si.last = time.Now()
mset.mu.Unlock()
t := time.NewTicker(sourceHealthCheckInterval)
defer t.Stop()
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case <-mch:
for im := mset.pending(msgs); im != nil; im = im.next {
if !mset.processInboundSourceMsg(si, im) {
break
}
}
case <-t.C:
mset.mu.RLock()
stalled := time.Since(si.last) > 3*sourceHealthCheckInterval
sname := si.name
mset.mu.RUnlock()
if stalled {
mset.retrySourceConsumer(sname)
}
}
}
}
// isControlMsg determines if this is a control message.
func (m *inMsg) isControlMsg() bool {
return len(m.msg) == 0 && len(m.hdr) > 0 && bytes.HasPrefix(m.hdr, []byte("NATS/1.0 100 "))
}
// handleFlowControl will properly handle flow control messages for both R1 and R>1.
// Lock should be held.
func (mset *stream) handleFlowControl(si *sourceInfo, m *inMsg) {
// If we are clustered we want to delay signaling back the the upstream consumer.
if node := mset.node; node != nil && si.clseq > 0 {
if mset.fcr == nil {
mset.fcr = make(map[uint64]string)
}
mset.fcr[si.clseq] = m.rply
} else {
mset.outq.send(&jsPubMsg{m.rply, _EMPTY_, _EMPTY_, nil, nil, nil, 0, nil})
}
}
// processInboundSourceMsg handles processing other stream messages bound for this stream.
func (mset *stream) processInboundSourceMsg(si *sourceInfo, m *inMsg) bool {
mset.mu.Lock()
if !mset.isLeader() {
mset.mu.Unlock()
mset.cancelSourceConsumer(si.name)
return false
}
isControl := m.isControlMsg()
// Ignore from old subscriptions.
if !si.isCurrentSub(m.rply) && !isControl {
mset.mu.Unlock()
return false
}
si.last = time.Now()
node := mset.node
// Check for heartbeats and flow control messages.
if isControl {
var needsRetry bool
// Flow controls have reply subjects.
if m.rply != _EMPTY_ {
mset.handleFlowControl(si, m)
} else {
// For idle heartbeats make sure we did not miss anything.
if ldseq := parseInt64(getHeader(JSLastConsumerSeq, m.hdr)); ldseq > 0 && uint64(ldseq) != si.dseq {
needsRetry = true
mset.retrySourceConsumerAtSeq(si.name, si.sseq+1)
}
}
mset.mu.Unlock()
return !needsRetry
}
sseq, dseq, dc, _, pending := replyInfo(m.rply)
if dc > 1 {
mset.mu.Unlock()
return false
}
// Tracking is done here.
if dseq == si.dseq+1 {
si.dseq++
si.sseq = sseq
} else if dseq > si.dseq {
if si.cname == _EMPTY_ {
si.cname = tokenAt(m.rply, 4)
si.dseq, si.sseq = dseq, sseq
} else {
mset.retrySourceConsumerAtSeq(si.name, si.sseq+1)
mset.mu.Unlock()
return false
}
} else {
mset.mu.Unlock()
return false
}
if pending == 0 {
si.lag = 0
} else {
si.lag = pending - 1
}
mset.mu.Unlock()
hdr, msg := m.hdr, m.msg
// If we are daisy chained here make sure to remove the original one.
if len(hdr) > 0 {
hdr = removeHeaderIfPresent(hdr, JSStreamSource)
}
// Hold onto the origin reply which has all the metadata.
hdr = genHeader(hdr, JSStreamSource, m.rply)
var err error
var clseq uint64
// If we are clustered we need to propose this message to the underlying raft group.
if node != nil {
clseq, err = mset.processClusteredInboundMsg(m.subj, _EMPTY_, hdr, msg)
if err == nil {
mset.mu.Lock()
si.clseq = clseq
mset.mu.Unlock()
}
} else {
err = mset.processJetStreamMsg(m.subj, _EMPTY_, hdr, msg, 0, 0)
}
if err != nil {
s := mset.srv
if err == errLastSeqMismatch {
mset.cancelSourceConsumer(si.name)
mset.retrySourceConsumer(si.name)
} else {
s.Warnf("JetStream got an error processing inbound source msg: %v", err)
}
if strings.Contains(err.Error(), "no space left") {
s.Errorf("JetStream out of space, will be DISABLED")
s.DisableJetStream()
}
}
return true
}
func streamAndSeq(subject string) (string, uint64) {
tsa := [expectedNumReplyTokens]string{}
start, tokens := 0, tsa[:0]
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
return _EMPTY_, 0
}
return tokens[2], uint64(parseAckReplyNum(tokens[5]))
}
// Lock should be held.
func (mset *stream) setStartingSequenceForSource(sname string) {
si := mset.sources[sname]
if si == nil {
return
}
var state StreamState
mset.store.FastState(&state)
// Do not reset sseq here so we can remember when purge/expiration happens.
if state.Msgs == 0 {
si.dseq = 0
return
}
for seq := state.LastSeq; seq >= state.FirstSeq; seq-- {
_, hdr, _, _, err := mset.store.LoadMsg(seq)
if err != nil || len(hdr) == 0 {
continue
}
reply := getHeader(JSStreamSource, hdr)
if len(reply) == 0 {
continue
}
name, sseq := streamAndSeq(string(reply))
if name == sname {
si.sseq = sseq
si.dseq = 0
return
}
}
}
// Lock should be held.
// This will do a reverse scan on startup or leader election
// searching for the starting sequence number.
// This can be slow in degenerative cases.
// Lock should be held.
func (mset *stream) startingSequenceForSources() {
mset.sources = make(map[string]*sourceInfo)
if len(mset.cfg.Sources) == 0 {
return
}
for _, ssi := range mset.cfg.Sources {
si := &sourceInfo{name: ssi.Name, msgs: &inbound{mch: make(chan struct{}, 1)}}
mset.sources[ssi.Name] = si
}
var state StreamState
mset.store.FastState(&state)
if state.Msgs == 0 {
return
}
// For short circuiting return.
expected := len(mset.cfg.Sources)
seqs := make(map[string]uint64)
// Stamp our si seq records on the way out.
defer func() {
for sname, seq := range seqs {
// Ignore if not set.
if seq == 0 {
continue
}
if si := mset.sources[sname]; si != nil {
si.sseq = seq
si.dseq = 0
}
}
}()
for seq := state.LastSeq; seq >= state.FirstSeq; seq-- {
_, hdr, _, _, err := mset.store.LoadMsg(seq)
if err != nil || len(hdr) == 0 {
continue
}
reply := getHeader(JSStreamSource, hdr)
if len(reply) == 0 {
continue
}
name, sseq := streamAndSeq(string(reply))
// Only update active in case we have older ones in here that got configured out.
if si := mset.sources[name]; si != nil {
if _, ok := seqs[name]; !ok {
seqs[name] = sseq
if len(seqs) == expected {
return
}
}
}
}
}
// Setup our source consumers.
// Lock should be held.
func (mset *stream) setupSourceConsumers() error {
if mset.outq == nil {
return errors.New("outq required")
}
// Reset if needed.
for _, si := range mset.sources {
if si.sub != nil {
mset.unsubscribe(si.sub)
mset.removeInternalConsumer(si)
}
}
mset.startingSequenceForSources()
// Setup our consumers at the proper starting position.
for _, ssi := range mset.cfg.Sources {
if si := mset.sources[ssi.Name]; si != nil {
mset.setSourceConsumer(ssi.Name, si.sseq+1)
}
}
return nil
}
// Will create internal subscriptions for the stream.
// Lock should be held.
func (mset *stream) subscribeToStream() error {
if mset.active {
return nil
}
for _, subject := range mset.cfg.Subjects {
if _, err := mset.subscribeInternal(subject, mset.processInboundJetStreamMsg); err != nil {
return err
}
}
// Check if we need to setup mirroring.
if mset.cfg.Mirror != nil {
if err := mset.setupMirrorConsumer(); err != nil {
return err
}
} else if len(mset.cfg.Sources) > 0 {
if err := mset.setupSourceConsumers(); err != nil {
return err
}
}
mset.active = true
return nil
}
// Stop our source consumers.
// Lock should be held.
func (mset *stream) stopSourceConsumers() {
for _, si := range mset.sources {
if si.sub != nil {
mset.unsubscribe(si.sub)
}
// Need to delete the old one.
mset.removeInternalConsumer(si)
}
}
// Lock should be held.
func (mset *stream) removeInternalConsumer(si *sourceInfo) {
if si == nil || si.cname == _EMPTY_ {
return
}
si.cname = _EMPTY_
}
// Will unsubscribe from the stream.
// Lock should be held.
func (mset *stream) unsubscribeToStream() error {
for _, subject := range mset.cfg.Subjects {
mset.unsubscribeInternal(subject)
}
if mset.mirror != nil {
if mset.mirror.sub != nil {
mset.unsubscribe(mset.mirror.sub)
}
mset.removeInternalConsumer(mset.mirror)
mset.mirror = nil
}
if len(mset.cfg.Sources) > 0 {
mset.stopSourceConsumers()
}
mset.active = false
return nil
}
// Lock should be held.
func (mset *stream) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
c := mset.client
if c == nil {
return nil, fmt.Errorf("invalid stream")
}
if !c.srv.eventsEnabled() {
return nil, ErrNoSysAccount
}
if cb == nil {
return nil, fmt.Errorf("undefined message handler")
}
mset.sid++
// Now create the subscription
return c.processSub([]byte(subject), nil, []byte(strconv.Itoa(mset.sid)), cb, false)
}
// Helper for unlocked stream.
func (mset *stream) subscribeInternalUnlocked(subject string, cb msgHandler) (*subscription, error) {
mset.mu.Lock()
defer mset.mu.Unlock()
return mset.subscribeInternal(subject, cb)
}
// This will unsubscribe us from the exact subject given.
// We do not currently track the subs so do not have the sid.
// This should be called only on an update.
// Lock should be held.
func (mset *stream) unsubscribeInternal(subject string) error {
c := mset.client
if c == nil {
return fmt.Errorf("invalid stream")
}
var sid []byte
c.mu.Lock()
for _, sub := range c.subs {
if subject == string(sub.subject) {
sid = sub.sid
break
}
}
c.mu.Unlock()
if sid != nil {
return c.processUnsub(sid)
}
return nil
}
// Lock should be held.
func (mset *stream) unsubscribe(sub *subscription) {
if sub == nil || mset.client == nil {
return
}
mset.client.processUnsub(sub.sid)
}
func (mset *stream) unsubscribeUnlocked(sub *subscription) {
mset.mu.Lock()
mset.unsubscribe(sub)
mset.mu.Unlock()
}
func (mset *stream) setupStore(fsCfg *FileStoreConfig) error {
mset.mu.Lock()
mset.created = time.Now().UTC()
switch mset.cfg.Storage {
case MemoryStorage:
ms, err := newMemStore(&mset.cfg)
if err != nil {
mset.mu.Unlock()
return err
}
mset.store = ms
case FileStorage:
fs, err := newFileStoreWithCreated(*fsCfg, mset.cfg, mset.created)
if err != nil {
mset.mu.Unlock()
return err
}
mset.store = fs
}
mset.mu.Unlock()
mset.store.RegisterStorageUpdates(mset.storeUpdates)
return nil
}
// Clears out any filtered index from filestores.
func (mset *stream) clearFilterIndex() {
mset.mu.Lock()
defer mset.mu.Unlock()
if fs, ok := mset.store.(*fileStore); ok {
fs.clearFilterIndex()
}
}
// Called for any updates to the underlying stream. We pass through the bytes to the
// jetstream account. We do local processing for stream pending for consumers, but only
// for removals.
// Lock should not be held.
func (mset *stream) storeUpdates(md, bd int64, seq uint64, subj string) {
// If we have a single negative update then we will process our consumers for stream pending.
// Purge and Store handled separately inside individual calls.
if md == -1 && seq > 0 {
mset.mu.RLock()
for _, o := range mset.consumers {
o.decStreamPending(seq, subj)
}
mset.mu.RUnlock()
}
if mset.jsa != nil {
mset.jsa.updateUsage(mset.stype, bd)
}
}
// NumMsgIds returns the number of message ids being tracked for duplicate suppression.
func (mset *stream) numMsgIds() int {
mset.mu.RLock()
defer mset.mu.RUnlock()
return len(mset.ddmap)
}
// checkMsgId will process and check for duplicates.
// Lock should be held.
func (mset *stream) checkMsgId(id string) *ddentry {
if id == "" || mset.ddmap == nil {
return nil
}
return mset.ddmap[id]
}
// Will purge the entries that are past the window.
// Should be called from a timer.
func (mset *stream) purgeMsgIds() {
mset.mu.Lock()
defer mset.mu.Unlock()
now := time.Now().UnixNano()
tmrNext := mset.cfg.Duplicates
window := int64(tmrNext)
for i, dde := range mset.ddarr[mset.ddindex:] {
if now-dde.ts >= window {
delete(mset.ddmap, dde.id)
} else {
mset.ddindex += i
// Check if we should garbage collect here if we are 1/3 total size.
if cap(mset.ddarr) > 3*(len(mset.ddarr)-mset.ddindex) {
mset.ddarr = append([]*ddentry(nil), mset.ddarr[mset.ddindex:]...)
mset.ddindex = 0
}
tmrNext = time.Duration(window - (now - dde.ts))
break
}
}
if len(mset.ddmap) > 0 {
// Make sure to not fire too quick
const minFire = 50 * time.Millisecond
if tmrNext < minFire {
tmrNext = minFire
}
mset.ddtmr.Reset(tmrNext)
} else {
mset.ddtmr.Stop()
mset.ddtmr = nil
}
}
// storeMsgId will store the message id for duplicate detection.
func (mset *stream) storeMsgId(dde *ddentry) {
mset.mu.Lock()
if mset.ddmap == nil {
mset.ddmap = make(map[string]*ddentry)
}
if mset.ddtmr == nil {
mset.ddtmr = time.AfterFunc(mset.cfg.Duplicates, mset.purgeMsgIds)
}
mset.ddmap[dde.id] = dde
mset.ddarr = append(mset.ddarr, dde)
mset.mu.Unlock()
}
// Fast lookup of msgId.
func getMsgId(hdr []byte) string {
return string(getHeader(JSMsgId, hdr))
}
// Fast lookup of expected last msgId.
func getExpectedLastMsgId(hdr []byte) string {
return string(getHeader(JSExpectedLastMsgId, hdr))
}
// Fast lookup of expected stream.
func getExpectedStream(hdr []byte) string {
return string(getHeader(JSExpectedStream, hdr))
}
// Fast lookup of expected stream.
func getExpectedLastSeq(hdr []byte) uint64 {
bseq := getHeader(JSExpectedLastSeq, hdr)
if len(bseq) == 0 {
return 0
}
return uint64(parseInt64(bseq))
}
// Lock should be held.
func (mset *stream) isClustered() bool {
return mset.node != nil
}
// Used if we have to queue things internally to avoid the route/gw path.
type inMsg struct {
subj string
rply string
hdr []byte
msg []byte
next *inMsg
}
// Linked list for inbound messages.
type inbound struct {
head *inMsg
tail *inMsg
mch chan struct{}
}
func (mset *stream) pending(msgs *inbound) *inMsg {
mset.mu.Lock()
head := msgs.head
msgs.head, msgs.tail = nil, nil
mset.mu.Unlock()
return head
}
func (mset *stream) queueInbound(ib *inbound, subj, rply string, hdr, msg []byte) {
m := &inMsg{subj, rply, hdr, msg, nil}
mset.mu.Lock()
var notify bool
if ib.head == nil {
ib.head = m
notify = true
} else {
ib.tail.next = m
}
ib.tail = m
mch := ib.mch
mset.mu.Unlock()
if notify {
select {
case mch <- struct{}{}:
default:
}
}
}
func (mset *stream) queueInboundMsg(subj, rply string, hdr, msg []byte) {
// Copy these.
if len(hdr) > 0 {
hdr = append(hdr[:0:0], hdr...)
}
if len(msg) > 0 {
msg = append(msg[:0:0], msg...)
}
mset.queueInbound(mset.msgs, subj, rply, hdr, msg)
}
// processInboundJetStreamMsg handles processing messages bound for a stream.
func (mset *stream) processInboundJetStreamMsg(_ *subscription, c *client, subject, reply string, rmsg []byte) {
mset.mu.RLock()
isLeader, isClustered := mset.isLeader(), mset.node != nil
mset.mu.RUnlock()
// If we are not the leader just ignore.
if !isLeader {
return
}
hdr, msg := c.msgParts(rmsg)
// If we are not receiving directly from a client we should move this this Go routine.
if c.kind != CLIENT {
mset.queueInboundMsg(subject, reply, hdr, msg)
return
}
// If we are clustered we need to propose this message to the underlying raft group.
if isClustered {
mset.processClusteredInboundMsg(subject, reply, hdr, msg)
} else {
mset.processJetStreamMsg(subject, reply, hdr, msg, 0, 0)
}
}
var errLastSeqMismatch = errors.New("last sequence mismatch")
// processJetStreamMsg is where we try to actually process the stream msg.
func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int64) error {
mset.mu.Lock()
store := mset.store
c, s := mset.client, mset.srv
if c == nil {
mset.mu.Unlock()
return nil
}
var accName string
if mset.acc != nil {
accName = mset.acc.Name
}
doAck, pubAck := !mset.cfg.NoAck, mset.pubAck
js, jsa := mset.js, mset.jsa
name, stype := mset.cfg.Name, mset.cfg.Storage
maxMsgSize := int(mset.cfg.MaxMsgSize)
numConsumers := len(mset.consumers)
interestRetention := mset.cfg.Retention == InterestPolicy
// Snapshot if we are the leader and if we can respond.
isLeader := mset.isLeader()
canRespond := doAck && len(reply) > 0 && isLeader
var resp = &JSPubAckResponse{}
// For clustering the lower layers will pass our expected lseq. If it is present check for that here.
if lseq > 0 && lseq != (mset.lseq+mset.clfs) {
isMisMatch := true
// If our first message for this mirror, see if we have to adjust our starting sequence.
if mset.cfg.Mirror != nil {
var state StreamState
mset.store.FastState(&state)
if state.FirstSeq == 0 {
mset.store.Compact(lseq + 1)
mset.lseq = lseq
isMisMatch = false
}
}
// Really is a mismatch.
if isMisMatch {
outq := mset.outq
mset.mu.Unlock()
if canRespond && outq != nil {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 503, Description: "expected stream sequence does not match"}
b, _ := json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
return errLastSeqMismatch
}
}
// If we have received this message across an account we may have request information attached.
// For now remove. TODO(dlc) - Should this be opt-in or opt-out?
if len(hdr) > 0 {
hdr = removeHeaderIfPresent(hdr, ClientInfoHdr)
}
// Process additional msg headers if still present.
var msgId string
if len(hdr) > 0 {
msgId = getMsgId(hdr)
outq := mset.outq
if dde := mset.checkMsgId(msgId); dde != nil {
mset.clfs++
mset.mu.Unlock()
if canRespond {
response := append(pubAck, strconv.FormatUint(dde.seq, 10)...)
response = append(response, ",\"duplicate\": true}"...)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
return errors.New("msgid is duplicate")
}
// Expected stream.
if sname := getExpectedStream(hdr); sname != _EMPTY_ && sname != name {
mset.clfs++
mset.mu.Unlock()
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 400, Description: "expected stream does not match"}
b, _ := json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
return errors.New("expected stream does not match")
}
// Expected last sequence.
if seq := getExpectedLastSeq(hdr); seq > 0 && seq != mset.lseq {
mlseq := mset.lseq
mset.clfs++
mset.mu.Unlock()
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("wrong last sequence: %d", mlseq)}
b, _ := json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
return fmt.Errorf("last sequence mismatch: %d vs %d", seq, mlseq)
}
// Expected last msgId.
if lmsgId := getExpectedLastMsgId(hdr); lmsgId != _EMPTY_ && lmsgId != mset.lmsgId {
last := mset.lmsgId
mset.clfs++
mset.mu.Unlock()
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("wrong last msg ID: %s", last)}
b, _ := json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
return fmt.Errorf("last msgid mismatch: %q vs %q", lmsgId, last)
}
}
// Response Ack.
var (
response []byte
seq uint64
err error
)
// Check to see if we are over the max msg size.
if maxMsgSize >= 0 && (len(hdr)+len(msg)) > maxMsgSize {
mset.clfs++
mset.mu.Unlock()
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"}
b, _ := json.Marshal(resp)
mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
return ErrMaxPayload
}
// Check to see if we have exceeded our limits.
if js.limitsExceeded(stype) {
s.resourcesExeededError()
mset.clfs++
mset.mu.Unlock()
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = jsInsufficientErr
b, _ := json.Marshal(resp)
mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
// Stepdown regardless.
if node := mset.raftNode(); node != nil {
node.StepDown()
}
return ErrJetStreamResourcesExceeded
}
var noInterest bool
// If we are interest based retention and have no consumers then we can skip.
if interestRetention {
if numConsumers == 0 {
noInterest = true
} else if mset.numFilter > 0 {
// Assume none.
noInterest = true
for _, o := range mset.consumers {
if o.cfg.FilterSubject != _EMPTY_ && subjectIsSubsetMatch(subject, o.cfg.FilterSubject) {
noInterest = false
break
}
}
}
}
// Grab timestamp if not already set.
if ts == 0 && lseq > 0 {
ts = time.Now().UnixNano()
}
// Skip msg here.
if noInterest {
mset.lseq = store.SkipMsg()
mset.lmsgId = msgId
mset.mu.Unlock()
if canRespond {
response = append(pubAck, strconv.FormatUint(mset.lseq, 10)...)
response = append(response, '}')
mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
// If we have a msgId make sure to save.
if msgId != _EMPTY_ {
mset.storeMsgId(&ddentry{msgId, seq, ts})
}
return nil
}
// If here we will attempt to store the message.
// Assume this will succeed.
olmsgId := mset.lmsgId
mset.lmsgId = msgId
mset.lseq++
// We hold the lock to this point to make sure nothing gets between us since we check for pre-conditions.
// Currently can not hold while calling store b/c we have inline storage update calls that may need the lock.
// Note that upstream that sets seq/ts should be serialized as much as possible.
mset.mu.Unlock()
// Store actual msg.
if lseq == 0 && ts == 0 {
seq, ts, err = store.StoreMsg(subject, hdr, msg)
} else {
seq = lseq + 1
err = store.StoreRawMsg(subject, hdr, msg, seq, ts)
}
if err != nil {
// If we did not succeed put those values back.
mset.mu.Lock()
var state StreamState
mset.store.FastState(&state)
mset.lseq = state.LastSeq
mset.lmsgId = olmsgId
mset.mu.Unlock()
if err != ErrStoreClosed {
s.Errorf("JetStream failed to store a msg on stream '%s > %s' - %v", accName, name, err)
}
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 503, Description: err.Error()}
response, _ = json.Marshal(resp)
}
} else if jsa.limitsExceeded(stype) {
s.Warnf("JetStream resource limits exceeded for account: %q", accName)
if canRespond {
resp.PubAck = &PubAck{Stream: name}
resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"}
response, _ = json.Marshal(resp)
}
// If we did not succeed put those values back.
mset.mu.Lock()
var state StreamState
mset.store.FastState(&state)
mset.lseq = state.LastSeq
mset.lmsgId = olmsgId
mset.mu.Unlock()
store.RemoveMsg(seq)
seq = 0
} else {
// If we have a msgId make sure to save.
if msgId != _EMPTY_ {
mset.storeMsgId(&ddentry{msgId, seq, ts})
}
if canRespond {
response = append(pubAck, strconv.FormatUint(seq, 10)...)
response = append(response, '}')
}
}
// Send response here.
if canRespond {
mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
if err == nil && seq > 0 && numConsumers > 0 {
mset.mu.Lock()
for _, o := range mset.consumers {
o.mu.Lock()
if o.isLeader() {
if o.isFilteredMatch(subject) {
o.sgap++
}
o.signalNewMessages()
}
o.mu.Unlock()
}
mset.mu.Unlock()
}
return err
}
// Internal message for use by jetstream subsystem.
type jsPubMsg struct {
subj string
dsubj string
reply string
hdr []byte
msg []byte
o *consumer
seq uint64
next *jsPubMsg
}
func (pm *jsPubMsg) size() int {
if pm == nil {
return 0
}
return len(pm.subj) + len(pm.reply) + len(pm.hdr) + len(pm.msg)
}
// Forms a linked list for sending internal system messages.
type jsOutQ struct {
mu sync.Mutex
mch chan struct{}
head *jsPubMsg
tail *jsPubMsg
}
func (q *jsOutQ) pending() *jsPubMsg {
if q == nil {
return nil
}
q.mu.Lock()
head := q.head
q.head, q.tail = nil, nil
q.mu.Unlock()
return head
}
func (q *jsOutQ) send(msg *jsPubMsg) {
if q == nil || msg == nil {
return
}
q.mu.Lock()
var notify bool
if q.head == nil {
q.head = msg
notify = true
} else {
q.tail.next = msg
}
q.tail = msg
q.mu.Unlock()
if notify {
select {
case q.mch <- struct{}{}:
default:
}
}
}
// StoredMsg is for raw access to messages in a stream.
type StoredMsg struct {
Subject string `json:"subject"`
Sequence uint64 `json:"seq"`
Header []byte `json:"hdrs,omitempty"`
Data []byte `json:"data,omitempty"`
Time time.Time `json:"time"`
}
// This is similar to system semantics but did not want to overload the single system sendq,
// or require system account when doing simple setup with jetstream.
func (mset *stream) setupSendCapabilities() {
mset.mu.Lock()
defer mset.mu.Unlock()
if mset.outq != nil {
return
}
mset.outq = &jsOutQ{mch: make(chan struct{}, 1)}
go mset.internalLoop()
}
// Name returns the stream name.
func (mset *stream) name() string {
if mset == nil {
return _EMPTY_
}
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.cfg.Name
}
// Returns a copy of the interest subjects for this stream.
func (mset *stream) subjects() []string {
mset.mu.RLock()
defer mset.mu.RUnlock()
if len(mset.cfg.Subjects) == 0 {
return nil
}
return append(mset.cfg.Subjects[:0:0], mset.cfg.Subjects...)
}
func (mset *stream) internalLoop() {
mset.mu.RLock()
s := mset.srv
c := s.createInternalJetStreamClient()
c.registerWithAccount(mset.acc)
defer c.closeConnection(ClientClosed)
outq, qch, mch, rmch := mset.outq, mset.qch, mset.msgs.mch, mset.rmch
isClustered := mset.cfg.Replicas > 1
mset.mu.RUnlock()
for {
select {
case <-outq.mch:
for pm := outq.pending(); pm != nil; pm = pm.next {
c.pa.subject = []byte(pm.subj)
c.pa.deliver = []byte(pm.dsubj)
c.pa.size = len(pm.msg) + len(pm.hdr)
c.pa.szb = []byte(strconv.Itoa(c.pa.size))
c.pa.reply = []byte(pm.reply)
var msg []byte
if len(pm.hdr) > 0 {
c.pa.hdr = len(pm.hdr)
c.pa.hdb = []byte(strconv.Itoa(c.pa.hdr))
msg = append(pm.hdr, pm.msg...)
msg = append(msg, _CRLF_...)
} else {
c.pa.hdr = -1
c.pa.hdb = nil
msg = append(pm.msg, _CRLF_...)
}
didDeliver, _ := c.processInboundClientMsg(msg)
c.pa.szb = nil
// Check to see if this is a delivery for a consumer and
// we failed to deliver the message. If so alert the consumer.
if pm.o != nil && pm.seq > 0 && !didDeliver {
pm.o.didNotDeliver(pm.seq)
}
}
c.flushClients(10 * time.Millisecond)
case <-mch:
for im := mset.pending(mset.msgs); im != nil; im = im.next {
// If we are clustered we need to propose this message to the underlying raft group.
if isClustered {
mset.processClusteredInboundMsg(im.subj, im.rply, im.hdr, im.msg)
} else {
mset.processJetStreamMsg(im.subj, im.rply, im.hdr, im.msg, 0, 0)
}
}
case seq := <-rmch:
mset.store.RemoveMsg(seq)
case <-qch:
return
case <-s.quitCh:
return
}
}
}
// Internal function to delete a stream.
func (mset *stream) delete() error {
return mset.stop(true, true)
}
// Internal function to stop or delete the stream.
func (mset *stream) stop(deleteFlag, advisory bool) error {
mset.mu.RLock()
jsa := mset.jsa
mset.mu.RUnlock()
if jsa == nil {
return ErrJetStreamNotEnabledForAccount
}
// Remove from our account map.
jsa.mu.Lock()
delete(jsa.streams, mset.cfg.Name)
jsa.mu.Unlock()
// Clean up consumers.
mset.mu.Lock()
var obs []*consumer
for _, o := range mset.consumers {
obs = append(obs, o)
}
// Check if we are a mirror.
if mset.mirror != nil && mset.mirror.sub != nil {
mset.unsubscribe(mset.mirror.sub)
mset.mirror.sub = nil
mset.removeInternalConsumer(mset.mirror)
}
// Now check for sources.
if len(mset.sources) > 0 {
for _, si := range mset.sources {
mset.cancelSourceConsumer(si.name)
}
}
mset.mu.Unlock()
for _, o := range obs {
// Second flag says do not broadcast to signal.
// TODO(dlc) - If we have an err here we don't want to stop
// but should we log?
o.stopWithFlags(deleteFlag, false, advisory)
}
mset.mu.Lock()
// Stop responding to sync requests.
mset.stopClusterSubs()
// Unsubscribe from direct stream.
mset.unsubscribeToStream()
// Our info sub if we spun it up.
if mset.infoSub != nil {
mset.srv.sysUnsubscribe(mset.infoSub)
mset.infoSub = nil
}
// Quit channel.
if mset.qch != nil {
close(mset.qch)
mset.qch = nil
}
// Cluster cleanup
if n := mset.node; n != nil {
if deleteFlag {
n.Delete()
} else {
n.Stop()
}
}
// Send stream delete advisory after the consumers.
if deleteFlag && advisory {
mset.sendDeleteAdvisoryLocked()
}
c := mset.client
mset.client = nil
if c == nil {
mset.mu.Unlock()
return nil
}
// Cleanup duplicate timer if running.
if mset.ddtmr != nil {
mset.ddtmr.Stop()
mset.ddtmr = nil
mset.ddarr = nil
mset.ddmap = nil
}
sysc := mset.sysc
mset.sysc = nil
// Clustered cleanup.
mset.mu.Unlock()
c.closeConnection(ClientClosed)
if sysc != nil {
sysc.closeConnection(ClientClosed)
}
if mset.store == nil {
return nil
}
if deleteFlag {
if err := mset.store.Delete(); err != nil {
return err
}
} else if err := mset.store.Stop(); err != nil {
return err
}
return nil
}
func (mset *stream) getMsg(seq uint64) (*StoredMsg, error) {
subj, hdr, msg, ts, err := mset.store.LoadMsg(seq)
if err != nil {
return nil, err
}
sm := &StoredMsg{
Subject: subj,
Sequence: seq,
Header: hdr,
Data: msg,
Time: time.Unix(0, ts).UTC(),
}
return sm, nil
}
// Consunmers will return all the current consumers for this stream.
func (mset *stream) getConsumers() []*consumer {
mset.mu.Lock()
defer mset.mu.Unlock()
var obs []*consumer
for _, o := range mset.consumers {
obs = append(obs, o)
}
return obs
}
// NumConsumers reports on number of active consumers for this stream.
func (mset *stream) numConsumers() int {
mset.mu.Lock()
defer mset.mu.Unlock()
return len(mset.consumers)
}
func (mset *stream) setConsumer(o *consumer) {
mset.consumers[o.name] = o
if o.cfg.FilterSubject != _EMPTY_ {
mset.numFilter++
}
}
func (mset *stream) removeConsumer(o *consumer) {
if o.cfg.FilterSubject != _EMPTY_ {
mset.numFilter--
}
delete(mset.consumers, o.name)
}
// lookupConsumer will retrieve a consumer by name.
func (mset *stream) lookupConsumer(name string) *consumer {
mset.mu.Lock()
defer mset.mu.Unlock()
return mset.consumers[name]
}
// State will return the current state for this stream.
func (mset *stream) state() StreamState {
return mset.stateWithDetail(false)
}
func (mset *stream) stateWithDetail(details bool) StreamState {
mset.mu.RLock()
c, store := mset.client, mset.store
mset.mu.RUnlock()
if c == nil || store == nil {
return StreamState{}
}
// Currently rely on store.
state := store.State()
if !details {
state.Deleted = nil
}
return state
}
// Determines if the new proposed partition is unique amongst all consumers.
// Lock should be held.
func (mset *stream) partitionUnique(partition string) bool {
for _, o := range mset.consumers {
if o.cfg.FilterSubject == _EMPTY_ {
return false
}
if subjectIsSubsetMatch(partition, o.cfg.FilterSubject) {
return false
}
}
return true
}
// Lock should be held.
func (mset *stream) checkInterest(seq uint64, obs *consumer) bool {
for _, o := range mset.consumers {
if o != obs && o.needAck(seq) {
return true
}
}
return false
}
// ackMsg is called into from a consumer when we have a WorkQueue or Interest retention policy.
func (mset *stream) ackMsg(obs *consumer, seq uint64) {
switch mset.cfg.Retention {
case LimitsPolicy:
return
case WorkQueuePolicy:
mset.store.RemoveMsg(seq)
case InterestPolicy:
mset.mu.Lock()
hasInterest := mset.checkInterest(seq, obs)
mset.mu.Unlock()
if !hasInterest {
mset.store.RemoveMsg(seq)
}
}
}
// Snapshot creates a snapshot for the stream and possibly consumers.
func (mset *stream) snapshot(deadline time.Duration, checkMsgs, includeConsumers bool) (*SnapshotResult, error) {
mset.mu.RLock()
if mset.client == nil || mset.store == nil {
mset.mu.RUnlock()
return nil, fmt.Errorf("invalid stream")
}
store := mset.store
mset.mu.RUnlock()
return store.Snapshot(deadline, checkMsgs, includeConsumers)
}
const snapsDir = "__snapshots__"
// RestoreStream will restore a stream from a snapshot.
func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error) {
if ncfg == nil {
return nil, errors.New("nil config on stream restore")
}
cfg, err := checkStreamCfg(ncfg)
if err != nil {
return nil, err
}
_, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
sd := path.Join(jsa.storeDir, snapsDir)
if _, err := os.Stat(sd); os.IsNotExist(err) {
if err := os.MkdirAll(sd, 0755); err != nil {
return nil, fmt.Errorf("could not create snapshots directory - %v", err)
}
}
sdir, err := ioutil.TempDir(sd, "snap-")
if err != nil {
return nil, err
}
if _, err := os.Stat(sdir); os.IsNotExist(err) {
if err := os.MkdirAll(sdir, 0755); err != nil {
return nil, fmt.Errorf("could not create snapshots directory - %v", err)
}
}
defer os.RemoveAll(sdir)
tr := tar.NewReader(s2.NewReader(r))
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of snapshot
}
if err != nil {
return nil, err
}
fpath := path.Join(sdir, filepath.Clean(hdr.Name))
pdir := filepath.Dir(fpath)
os.MkdirAll(pdir, 0750)
fd, err := os.OpenFile(fpath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
return nil, err
}
_, err = io.Copy(fd, tr)
fd.Close()
if err != nil {
return nil, err
}
}
// Check metadata.
// The cfg passed in will be the new identity for the stream.
var fcfg FileStreamInfo
b, err := ioutil.ReadFile(path.Join(sdir, JetStreamMetaFile))
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &fcfg); err != nil {
return nil, err
}
// See if this stream already exists.
if _, err := a.lookupStream(cfg.Name); err == nil {
return nil, ErrJetStreamStreamAlreadyUsed
}
// Move into the correct place here.
ndir := path.Join(jsa.storeDir, streamsDir, cfg.Name)
// Remove old one if for some reason is here.
if _, err := os.Stat(ndir); !os.IsNotExist(err) {
os.RemoveAll(ndir)
}
// Move into new location.
if err := os.Rename(sdir, ndir); err != nil {
return nil, err
}
if cfg.Template != _EMPTY_ {
if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil {
return nil, err
}
}
mset, err := a.addStream(&cfg)
if err != nil {
return nil, err
}
if !fcfg.Created.IsZero() {
mset.setCreatedTime(fcfg.Created)
}
// Now do consumers.
odir := path.Join(ndir, consumerDir)
ofis, _ := ioutil.ReadDir(odir)
for _, ofi := range ofis {
metafile := path.Join(odir, ofi.Name(), JetStreamMetaFile)
metasum := path.Join(odir, ofi.Name(), JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
buf, err := ioutil.ReadFile(metafile)
if err != nil {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
var cfg FileConsumerInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig)
if isEphemeral {
// This is an ephermal consumer and this could fail on restart until
// the consumer can reconnect. We will create it as a durable and switch it.
cfg.ConsumerConfig.Durable = ofi.Name()
}
obs, err := mset.addConsumer(&cfg.ConsumerConfig)
if err != nil {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
if isEphemeral {
obs.switchToEphemeral()
}
if !cfg.Created.IsZero() {
obs.setCreatedTime(cfg.Created)
}
obs.mu.Lock()
err = obs.readStoredState()
obs.mu.Unlock()
if err != nil {
mset.stop(true, false)
return nil, fmt.Errorf("error restoring consumer [%q]: %v", ofi.Name(), err)
}
}
return mset, nil
}
| 1 | 12,995 | and is it possible that mset.mirror.msgs be nil? because if so, then you would get a panic also because of that. | nats-io-nats-server | go |
@@ -258,6 +258,7 @@ static void surface_move_state(struct wlr_surface *surface,
}
if ((next->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
+ pixman_region32_copy(&state->opaque, &next->opaque);
pixman_region32_clear(&next->opaque);
}
if ((next->invalid & WLR_SURFACE_INVALID_INPUT_REGION)) { | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_buffer.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/types/wlr_region.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/util/log.h>
#include <wlr/util/region.h>
#include "util/signal.h"
#define CALLBACK_VERSION 1
#define SURFACE_VERSION 4
#define SUBSURFACE_VERSION 1
static int min(int fst, int snd) {
if (fst < snd) {
return fst;
} else {
return snd;
}
}
static int max(int fst, int snd) {
if (fst > snd) {
return fst;
} else {
return snd;
}
}
static void surface_state_reset_buffer(struct wlr_surface_state *state) {
if (state->buffer) {
wl_list_remove(&state->buffer_destroy_listener.link);
state->buffer = NULL;
}
}
static void surface_handle_buffer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface_state *state =
wl_container_of(listener, state, buffer_destroy_listener);
surface_state_reset_buffer(state);
}
static void surface_state_set_buffer(struct wlr_surface_state *state,
struct wl_resource *buffer) {
surface_state_reset_buffer(state);
state->buffer = buffer;
if (buffer) {
wl_resource_add_destroy_listener(buffer,
&state->buffer_destroy_listener);
state->buffer_destroy_listener.notify = surface_handle_buffer_destroy;
}
}
static void surface_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending->sx = sx;
surface->pending->sy = sy;
surface_state_set_buffer(surface->pending, buffer);
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending->surface_damage,
&surface->pending->surface_damage,
x, y, width, height);
}
static void callback_handle_resource_destroy(struct wl_resource *resource) {
wl_list_remove(wl_resource_get_link(resource));
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
struct wl_resource *callback_resource = wl_resource_create(client,
&wl_callback_interface, CALLBACK_VERSION, callback);
if (callback_resource == NULL) {
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(callback_resource, NULL, NULL,
callback_handle_resource_destroy);
wl_list_insert(surface->pending->frame_callback_list.prev,
wl_resource_get_link(callback_resource));
surface->pending->invalid |= WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST;
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if ((surface->pending->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
pixman_region32_clear(&surface->pending->opaque);
}
surface->pending->invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending->opaque, region);
} else {
pixman_region32_clear(&surface->pending->opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *region_resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wlr_region_from_resource(region_resource);
pixman_region32_copy(&surface->pending->input, region);
} else {
pixman_region32_fini(&surface->pending->input);
pixman_region32_init_rect(&surface->pending->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static bool surface_update_size(struct wlr_surface *surface,
struct wlr_surface_state *state) {
if (!state->buffer) {
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
state->height = 0;
state->width = 0;
return true;
}
int scale = state->scale;
enum wl_output_transform transform = state->transform;
wlr_buffer_get_resource_size(state->buffer, surface->renderer,
&state->buffer_width, &state->buffer_height);
int width = state->buffer_width / scale;
int height = state->buffer_height / scale;
if (transform == WL_OUTPUT_TRANSFORM_90 ||
transform == WL_OUTPUT_TRANSFORM_270 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_90 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_270) {
int tmp = width;
width = height;
height = tmp;
}
bool update_damage = false;
if (width != state->width || height != state->height) {
// Damage the whole surface on resize
// This isn't in the spec, but Weston does it and QT expects it
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, width, height);
update_damage = true;
}
state->width = width;
state->height = height;
return update_damage;
}
/**
* Append pending state to current state and clear pending state.
*/
static void surface_move_state(struct wlr_surface *surface,
struct wlr_surface_state *next, struct wlr_surface_state *state) {
bool update_damage = false;
bool update_size = false;
int oldw = state->width;
int oldh = state->height;
if ((next->invalid & WLR_SURFACE_INVALID_SCALE)) {
state->scale = next->scale;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_TRANSFORM)) {
state->transform = next->transform;
update_size = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER)) {
surface_state_set_buffer(state, next->buffer);
surface_state_reset_buffer(next);
state->sx = next->sx;
state->sy = next->sy;
update_size = true;
}
if (update_size) {
update_damage = surface_update_size(surface, state);
}
if ((next->invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
pixman_region32_intersect_rect(&next->surface_damage,
&next->surface_damage, 0, 0, state->width, state->height);
pixman_region32_union(&state->surface_damage, &state->surface_damage,
&next->surface_damage);
pixman_region32_clear(&next->surface_damage);
update_damage = true;
}
if ((next->invalid & WLR_SURFACE_INVALID_BUFFER_DAMAGE)) {
pixman_region32_intersect_rect(&next->buffer_damage,
&next->buffer_damage, 0, 0, state->buffer_width,
state->buffer_height);
pixman_region32_union(&state->buffer_damage, &state->buffer_damage,
&next->buffer_damage);
pixman_region32_clear(&next->buffer_damage);
update_damage = true;
}
if (update_damage) {
pixman_region32_t buffer_damage, surface_damage;
pixman_region32_init(&buffer_damage);
pixman_region32_init(&surface_damage);
// Surface to buffer damage
pixman_region32_copy(&buffer_damage, &state->surface_damage);
wlr_region_transform(&buffer_damage, &buffer_damage,
wlr_output_transform_invert(state->transform),
state->width, state->height);
wlr_region_scale(&buffer_damage, &buffer_damage, state->scale);
// Buffer to surface damage
pixman_region32_copy(&surface_damage, &state->buffer_damage);
wlr_region_transform(&surface_damage, &surface_damage, state->transform,
state->buffer_width, state->buffer_height);
wlr_region_scale(&surface_damage, &surface_damage, 1.0f/state->scale);
pixman_region32_union(&state->buffer_damage, &state->buffer_damage,
&buffer_damage);
pixman_region32_union(&state->surface_damage, &state->surface_damage,
&surface_damage);
pixman_region32_fini(&buffer_damage);
pixman_region32_fini(&surface_damage);
}
if ((next->invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
pixman_region32_clear(&next->opaque);
}
if ((next->invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
// TODO: process buffer
pixman_region32_copy(&state->input, &next->input);
}
if ((next->invalid & WLR_SURFACE_INVALID_SUBSURFACE_POSITION)) {
// Subsurface has moved
int dx = state->subsurface_position.x - next->subsurface_position.x;
int dy = state->subsurface_position.y - next->subsurface_position.y;
state->subsurface_position.x = next->subsurface_position.x;
state->subsurface_position.y = next->subsurface_position.y;
next->subsurface_position.x = 0;
next->subsurface_position.y = 0;
if (dx != 0 || dy != 0) {
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, dx, dy, oldw, oldh);
pixman_region32_union_rect(&state->surface_damage,
&state->surface_damage, 0, 0, state->width, state->height);
}
}
if ((next->invalid & WLR_SURFACE_INVALID_FRAME_CALLBACK_LIST)) {
wl_list_insert_list(&state->frame_callback_list,
&next->frame_callback_list);
wl_list_init(&next->frame_callback_list);
}
state->invalid |= next->invalid;
next->invalid = 0;
}
static void surface_damage_subsurfaces(struct wlr_subsurface *subsurface) {
// XXX: This is probably the wrong way to do it, because this damage should
// come from the client, but weston doesn't do it correctly either and it
// seems to work ok. See the comment on weston_surface_damage for more info
// about a better approach.
struct wlr_surface *surface = subsurface->surface;
pixman_region32_union_rect(&surface->current->surface_damage,
&surface->current->surface_damage,
0, 0, surface->current->width,
surface->current->height);
subsurface->reordered = false;
struct wlr_subsurface *child;
wl_list_for_each(child, &subsurface->surface->subsurfaces, parent_link) {
surface_damage_subsurfaces(child);
}
}
static void surface_apply_damage(struct wlr_surface *surface) {
struct wl_resource *resource = surface->current->buffer;
if (resource == NULL) {
// NULL commit
wlr_buffer_unref(surface->buffer);
surface->buffer = NULL;
return;
}
if (surface->buffer != NULL && surface->buffer->released) {
pixman_region32_t damage;
pixman_region32_init(&damage);
pixman_region32_copy(&damage, &surface->current->buffer_damage);
pixman_region32_intersect_rect(&damage, &damage, 0, 0,
surface->current->buffer_width, surface->current->buffer_height);
struct wlr_buffer *updated_buffer =
wlr_buffer_apply_damage(surface->buffer, resource, &damage);
pixman_region32_fini(&damage);
if (updated_buffer != NULL) {
surface->buffer = updated_buffer;
return;
}
}
wlr_buffer_unref(surface->buffer);
surface->buffer = NULL;
struct wlr_buffer *buffer = wlr_buffer_create(surface->renderer, resource);
if (buffer == NULL) {
wlr_log(L_ERROR, "Failed to upload buffer");
return;
}
surface->buffer = buffer;
}
static void surface_commit_pending(struct wlr_surface *surface) {
bool invalid_buffer = surface->pending->invalid & WLR_SURFACE_INVALID_BUFFER;
surface_move_state(surface, surface->pending, surface->current);
if (invalid_buffer) {
surface_apply_damage(surface);
}
// commit subsurface order
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurface_pending_list,
parent_pending_link) {
wl_list_remove(&subsurface->parent_link);
wl_list_insert(&surface->subsurfaces, &subsurface->parent_link);
if (subsurface->reordered) {
// TODO: damage all the subsurfaces
surface_damage_subsurfaces(subsurface);
}
}
if (surface->role_committed) {
surface->role_committed(surface, surface->role_data);
}
// TODO: add the invalid bitfield to this callback
wlr_signal_emit_safe(&surface->events.commit, surface);
pixman_region32_clear(&surface->current->surface_damage);
pixman_region32_clear(&surface->current->buffer_damage);
}
static bool subsurface_is_synchronized(struct wlr_subsurface *subsurface) {
while (1) {
if (subsurface->synchronized) {
return true;
}
if (!subsurface->parent) {
return false;
}
if (!wlr_surface_is_subsurface(subsurface->parent)) {
break;
}
subsurface = wlr_subsurface_from_wlr_surface(subsurface->parent);
}
return false;
}
/**
* Recursive function to commit the effectively synchronized children.
*/
static void subsurface_parent_commit(struct wlr_subsurface *subsurface,
bool synchronized) {
struct wlr_surface *surface = subsurface->surface;
if (synchronized || subsurface->synchronized) {
if (subsurface->has_cache) {
surface_move_state(surface, subsurface->cached, surface->pending);
surface_commit_pending(surface);
subsurface->has_cache = false;
subsurface->cached->invalid = 0;
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, true);
}
}
}
static void subsurface_commit(struct wlr_subsurface *subsurface) {
struct wlr_surface *surface = subsurface->surface;
if (subsurface_is_synchronized(subsurface)) {
surface_move_state(surface, surface->pending, subsurface->cached);
subsurface->has_cache = true;
} else {
if (subsurface->has_cache) {
surface_move_state(surface, subsurface->cached, surface->pending);
surface_commit_pending(surface);
subsurface->has_cache = false;
} else {
surface_commit_pending(surface);
}
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, false);
}
}
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (wlr_surface_is_subsurface(surface)) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_wlr_surface(surface);
subsurface_commit(subsurface);
return;
}
surface_commit_pending(surface);
struct wlr_subsurface *tmp;
wl_list_for_each(tmp, &surface->subsurfaces, parent_link) {
subsurface_parent_commit(tmp, false);
}
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_TRANSFORM;
surface->pending->transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
surface->pending->invalid |= WLR_SURFACE_INVALID_SCALE;
surface->pending->scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending->invalid |= WLR_SURFACE_INVALID_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending->buffer_damage,
&surface->pending->buffer_damage,
x, y, width, height);
}
static const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
struct wlr_surface *wlr_surface_from_resource(struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_surface_interface,
&surface_interface));
return wl_resource_get_user_data(resource);
}
static struct wlr_surface_state *surface_state_create(void) {
struct wlr_surface_state *state =
calloc(1, sizeof(struct wlr_surface_state));
if (state == NULL) {
return NULL;
}
state->scale = 1;
state->transform = WL_OUTPUT_TRANSFORM_NORMAL;
wl_list_init(&state->frame_callback_list);
pixman_region32_init(&state->surface_damage);
pixman_region32_init(&state->buffer_damage);
pixman_region32_init(&state->opaque);
pixman_region32_init_rect(&state->input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
return state;
}
static void surface_state_destroy(struct wlr_surface_state *state) {
surface_state_reset_buffer(state);
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp, &state->frame_callback_list) {
wl_resource_destroy(resource);
}
pixman_region32_fini(&state->surface_damage);
pixman_region32_fini(&state->buffer_damage);
pixman_region32_fini(&state->opaque);
pixman_region32_fini(&state->input);
free(state);
}
static void subsurface_destroy(struct wlr_subsurface *subsurface) {
if (subsurface == NULL) {
return;
}
wlr_signal_emit_safe(&subsurface->events.destroy, subsurface);
wl_list_remove(&subsurface->surface_destroy.link);
surface_state_destroy(subsurface->cached);
if (subsurface->parent) {
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
}
wl_resource_set_user_data(subsurface->resource, NULL);
if (subsurface->surface) {
subsurface->surface->role_data = NULL;
}
free(subsurface);
}
static void surface_handle_resource_destroy(struct wl_resource *resource) {
struct wlr_surface *surface = wlr_surface_from_resource(resource);
wlr_signal_emit_safe(&surface->events.destroy, surface);
wl_list_remove(wl_resource_get_link(surface->resource));
wl_list_remove(&surface->renderer_destroy.link);
surface_state_destroy(surface->pending);
surface_state_destroy(surface->current);
wlr_buffer_unref(surface->buffer);
free(surface);
}
static void surface_handle_renderer_destroy(struct wl_listener *listener,
void *data) {
struct wlr_surface *surface =
wl_container_of(listener, surface, renderer_destroy);
wl_resource_destroy(surface->resource);
}
struct wlr_surface *wlr_surface_create(struct wl_client *client,
uint32_t version, uint32_t id, struct wlr_renderer *renderer,
struct wl_list *resource_list) {
assert(version <= SURFACE_VERSION);
struct wlr_surface *surface = calloc(1, sizeof(struct wlr_surface));
if (!surface) {
wl_client_post_no_memory(client);
return NULL;
}
surface->resource = wl_resource_create(client, &wl_surface_interface,
version, id);
if (surface->resource == NULL) {
free(surface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(surface->resource, &surface_interface,
surface, surface_handle_resource_destroy);
wlr_log(L_DEBUG, "New wlr_surface %p (res %p)", surface, surface->resource);
surface->renderer = renderer;
surface->current = surface_state_create();
surface->pending = surface_state_create();
wl_signal_init(&surface->events.commit);
wl_signal_init(&surface->events.destroy);
wl_signal_init(&surface->events.new_subsurface);
wl_list_init(&surface->subsurfaces);
wl_list_init(&surface->subsurface_pending_list);
wl_signal_add(&renderer->events.destroy, &surface->renderer_destroy);
surface->renderer_destroy.notify = surface_handle_renderer_destroy;
struct wl_list *resource_link = wl_resource_get_link(surface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
return surface;
}
struct wlr_texture *wlr_surface_get_texture(struct wlr_surface *surface) {
if (surface->buffer == NULL) {
return NULL;
}
return surface->buffer->texture;
}
bool wlr_surface_has_buffer(struct wlr_surface *surface) {
return wlr_surface_get_texture(surface) != NULL;
}
int wlr_surface_set_role(struct wlr_surface *surface, const char *role,
struct wl_resource *error_resource, uint32_t error_code) {
assert(role);
if (surface->role == NULL ||
surface->role == role ||
strcmp(surface->role, role) == 0) {
surface->role = role;
return 0;
}
wl_resource_post_error(error_resource, error_code,
"Cannot assign role %s to wl_surface@%d, already has role %s\n",
role,
wl_resource_get_id(surface->resource),
surface->role);
return -1;
}
static const struct wl_subsurface_interface subsurface_implementation;
static struct wlr_subsurface *subsurface_from_resource(
struct wl_resource *resource) {
assert(wl_resource_instance_of(resource, &wl_subsurface_interface,
&subsurface_implementation));
return wl_resource_get_user_data(resource);
}
static void subsurface_resource_destroy(struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
wl_list_remove(wl_resource_get_link(resource));
subsurface_destroy(subsurface);
}
static void subsurface_handle_destroy(struct wl_client *client,
struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void subsurface_handle_set_position(struct wl_client *client,
struct wl_resource *resource, int32_t x, int32_t y) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *surface = subsurface->surface;
surface->pending->invalid |= WLR_SURFACE_INVALID_SUBSURFACE_POSITION;
surface->pending->subsurface_position.x = x;
surface->pending->subsurface_position.y = y;
}
static struct wlr_subsurface *subsurface_find_sibling(
struct wlr_subsurface *subsurface, struct wlr_surface *surface) {
struct wlr_surface *parent = subsurface->parent;
struct wlr_subsurface *sibling;
wl_list_for_each(sibling, &parent->subsurfaces, parent_link) {
if (sibling->surface == surface && sibling != subsurface) {
return sibling;
}
}
return NULL;
}
static void subsurface_handle_place_above(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_above", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(&sibling->parent_pending_link,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_place_below(struct wl_client *client,
struct wl_resource *resource, struct wl_resource *sibling_resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
struct wlr_surface *sibling_surface =
wlr_surface_from_resource(sibling_resource);
struct wlr_subsurface *sibling =
subsurface_find_sibling(subsurface, sibling_surface);
if (!sibling) {
wl_resource_post_error(subsurface->resource,
WL_SUBSURFACE_ERROR_BAD_SURFACE,
"%s: wl_surface@%d is not a parent or sibling",
"place_below", wl_resource_get_id(sibling_surface->resource));
return;
}
wl_list_remove(&subsurface->parent_pending_link);
wl_list_insert(sibling->parent_pending_link.prev,
&subsurface->parent_pending_link);
subsurface->reordered = true;
}
static void subsurface_handle_set_sync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
subsurface->synchronized = true;
}
static void subsurface_handle_set_desync(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_subsurface *subsurface = subsurface_from_resource(resource);
if (subsurface == NULL) {
return;
}
if (subsurface->synchronized) {
subsurface->synchronized = false;
if (!subsurface_is_synchronized(subsurface)) {
// TODO: do a synchronized commit to flush the cache
subsurface_parent_commit(subsurface, true);
}
}
}
static const struct wl_subsurface_interface subsurface_implementation = {
.destroy = subsurface_handle_destroy,
.set_position = subsurface_handle_set_position,
.place_above = subsurface_handle_place_above,
.place_below = subsurface_handle_place_below,
.set_sync = subsurface_handle_set_sync,
.set_desync = subsurface_handle_set_desync,
};
static void subsurface_handle_parent_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, parent_destroy);
wl_list_remove(&subsurface->parent_link);
wl_list_remove(&subsurface->parent_pending_link);
wl_list_remove(&subsurface->parent_destroy.link);
subsurface->parent = NULL;
}
static void subsurface_handle_surface_destroy(struct wl_listener *listener,
void *data) {
struct wlr_subsurface *subsurface =
wl_container_of(listener, subsurface, surface_destroy);
subsurface_destroy(subsurface);
}
struct wlr_subsurface *wlr_subsurface_create(struct wlr_surface *surface,
struct wlr_surface *parent, uint32_t version, uint32_t id,
struct wl_list *resource_list) {
assert(version <= SUBSURFACE_VERSION);
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wlr_subsurface *subsurface =
calloc(1, sizeof(struct wlr_subsurface));
if (!subsurface) {
wl_client_post_no_memory(client);
return NULL;
}
subsurface->cached = surface_state_create();
if (subsurface->cached == NULL) {
free(subsurface);
wl_client_post_no_memory(client);
return NULL;
}
subsurface->synchronized = true;
subsurface->surface = surface;
subsurface->resource =
wl_resource_create(client, &wl_subsurface_interface, version, id);
if (subsurface->resource == NULL) {
surface_state_destroy(subsurface->cached);
free(subsurface);
wl_client_post_no_memory(client);
return NULL;
}
wl_resource_set_implementation(subsurface->resource,
&subsurface_implementation, subsurface,
subsurface_resource_destroy);
wl_signal_init(&subsurface->events.destroy);
wl_signal_add(&surface->events.destroy, &subsurface->surface_destroy);
subsurface->surface_destroy.notify = subsurface_handle_surface_destroy;
// link parent
subsurface->parent = parent;
wl_signal_add(&parent->events.destroy, &subsurface->parent_destroy);
subsurface->parent_destroy.notify = subsurface_handle_parent_destroy;
wl_list_insert(parent->subsurfaces.prev, &subsurface->parent_link);
wl_list_insert(parent->subsurface_pending_list.prev,
&subsurface->parent_pending_link);
surface->role_data = subsurface;
struct wl_list *resource_link = wl_resource_get_link(subsurface->resource);
if (resource_list != NULL) {
wl_list_insert(resource_list, resource_link);
} else {
wl_list_init(resource_link);
}
wlr_signal_emit_safe(&parent->events.new_subsurface, subsurface);
return subsurface;
}
struct wlr_surface *wlr_surface_get_root_surface(struct wlr_surface *surface) {
while (wlr_surface_is_subsurface(surface)) {
struct wlr_subsurface *subsurface =
wlr_subsurface_from_wlr_surface(surface);
surface = subsurface->parent;
}
return surface;
}
bool wlr_surface_point_accepts_input(struct wlr_surface *surface,
double sx, double sy) {
return sx >= 0 && sx <= surface->current->width &&
sy >= 0 && sy <= surface->current->height &&
pixman_region32_contains_point(&surface->current->input, sx, sy, NULL);
}
struct wlr_surface *wlr_surface_surface_at(struct wlr_surface *surface,
double sx, double sy, double *sub_x, double *sub_y) {
struct wlr_subsurface *subsurface;
wl_list_for_each_reverse(subsurface, &surface->subsurfaces, parent_link) {
double _sub_x = subsurface->surface->current->subsurface_position.x;
double _sub_y = subsurface->surface->current->subsurface_position.y;
struct wlr_surface *sub = wlr_surface_surface_at(subsurface->surface,
sx - _sub_x, sy - _sub_y, sub_x, sub_y);
if (sub != NULL) {
return sub;
}
}
if (wlr_surface_point_accepts_input(surface, sx, sy)) {
*sub_x = sx;
*sub_y = sy;
return surface;
}
return NULL;
}
void wlr_surface_send_enter(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_enter(surface->resource, resource);
}
}
}
void wlr_surface_send_leave(struct wlr_surface *surface,
struct wlr_output *output) {
struct wl_client *client = wl_resource_get_client(surface->resource);
struct wl_resource *resource;
wl_resource_for_each(resource, &output->wl_resources) {
if (client == wl_resource_get_client(resource)) {
wl_surface_send_leave(surface->resource, resource);
}
}
}
static inline int64_t timespec_to_msec(const struct timespec *a) {
return (int64_t)a->tv_sec * 1000 + a->tv_nsec / 1000000;
}
void wlr_surface_send_frame_done(struct wlr_surface *surface,
const struct timespec *when) {
struct wl_resource *resource, *tmp;
wl_resource_for_each_safe(resource, tmp,
&surface->current->frame_callback_list) {
wl_callback_send_done(resource, timespec_to_msec(when));
wl_resource_destroy(resource);
}
}
void wlr_surface_set_role_committed(struct wlr_surface *surface,
void (*role_committed)(struct wlr_surface *surface, void *role_data),
void *role_data) {
surface->role_committed = role_committed;
surface->role_data = role_data;
}
static void surface_for_each_surface(struct wlr_surface *surface, int x, int y,
wlr_surface_iterator_func_t iterator, void *user_data) {
iterator(surface, x, y, user_data);
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurfaces, parent_link) {
struct wlr_surface_state *state = subsurface->surface->current;
int sx = state->subsurface_position.x;
int sy = state->subsurface_position.y;
surface_for_each_surface(subsurface->surface, x + sx, y + sy,
iterator, user_data);
}
}
void wlr_surface_for_each_surface(struct wlr_surface *surface,
wlr_surface_iterator_func_t iterator, void *user_data) {
surface_for_each_surface(surface, 0, 0, iterator, user_data);
}
struct bound_acc {
int32_t min_x, min_y;
int32_t max_x, max_y;
};
static void handle_bounding_box_surface(struct wlr_surface *surface,
int x, int y, void *data) {
struct bound_acc *acc = data;
acc->min_x = min(x, acc->min_x);
acc->min_y = min(y, acc->min_y);
acc->max_x = max(x + surface->current->width, acc->max_x);
acc->max_y = max(y + surface->current->height, acc->max_y);
}
void wlr_surface_get_extends(struct wlr_surface *surface, struct wlr_box *box) {
struct bound_acc acc = {
.min_x = 0,
.min_y = 0,
.max_x = surface->current->width,
.max_y = surface->current->height,
};
wlr_surface_for_each_surface(surface, handle_bounding_box_surface, &acc);
box->x = acc.min_x;
box->y = acc.min_y;
box->width = acc.max_x - acc.min_x;
box->height = acc.max_y - acc.min_y;
}
| 1 | 12,001 | `next->opaque` should not be cleared. | swaywm-wlroots | c |
@@ -525,6 +525,7 @@ func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) boo
c.Debugf("Account JWT not signed by trusted operator")
return false
}
+ // this only executes IF there's an issuer on the Juc - otherwise the account is already
if juc.IssuerAccount != "" && !acc.hasIssuer(juc.Issuer) {
c.Debugf("User JWT issuer is not known")
return false | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"crypto/tls"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"fmt"
"net"
"net/url"
"regexp"
"strings"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/internal/ldap"
"github.com/nats-io/nkeys"
"golang.org/x/crypto/bcrypt"
)
// Authentication is an interface for implementing authentication
type Authentication interface {
// Check if a client is authorized to connect
Check(c ClientAuthentication) bool
}
// ClientAuthentication is an interface for client authentication
type ClientAuthentication interface {
// Get options associated with a client
GetOpts() *clientOpts
// If TLS is enabled, TLS ConnectionState, nil otherwise
GetTLSConnectionState() *tls.ConnectionState
// Optionally map a user after auth.
RegisterUser(*User)
// RemoteAddress expose the connection information of the client
RemoteAddress() net.Addr
}
// NkeyUser is for multiple nkey based users
type NkeyUser struct {
Nkey string `json:"user"`
Permissions *Permissions `json:"permissions,omitempty"`
Account *Account `json:"account,omitempty"`
SigningKey string `json:"signing_key,omitempty"`
AllowedConnectionTypes map[string]struct{} `json:"connection_types,omitempty"`
}
// User is for multiple accounts/users.
type User struct {
Username string `json:"user"`
Password string `json:"password"`
Permissions *Permissions `json:"permissions,omitempty"`
Account *Account `json:"account,omitempty"`
AllowedConnectionTypes map[string]struct{} `json:"connection_types,omitempty"`
}
// clone performs a deep copy of the User struct, returning a new clone with
// all values copied.
func (u *User) clone() *User {
if u == nil {
return nil
}
clone := &User{}
*clone = *u
clone.Permissions = u.Permissions.clone()
return clone
}
// clone performs a deep copy of the NkeyUser struct, returning a new clone with
// all values copied.
func (n *NkeyUser) clone() *NkeyUser {
if n == nil {
return nil
}
clone := &NkeyUser{}
*clone = *n
clone.Permissions = n.Permissions.clone()
return clone
}
// SubjectPermission is an individual allow and deny struct for publish
// and subscribe authorizations.
type SubjectPermission struct {
Allow []string `json:"allow,omitempty"`
Deny []string `json:"deny,omitempty"`
}
// ResponsePermission can be used to allow responses to any reply subject
// that is received on a valid subscription.
type ResponsePermission struct {
MaxMsgs int `json:"max"`
Expires time.Duration `json:"ttl"`
}
// Permissions are the allowed subjects on a per
// publish or subscribe basis.
type Permissions struct {
Publish *SubjectPermission `json:"publish"`
Subscribe *SubjectPermission `json:"subscribe"`
Response *ResponsePermission `json:"responses,omitempty"`
}
// RoutePermissions are similar to user permissions
// but describe what a server can import/export from and to
// another server.
type RoutePermissions struct {
Import *SubjectPermission `json:"import"`
Export *SubjectPermission `json:"export"`
}
// clone will clone an individual subject permission.
func (p *SubjectPermission) clone() *SubjectPermission {
if p == nil {
return nil
}
clone := &SubjectPermission{}
if p.Allow != nil {
clone.Allow = make([]string, len(p.Allow))
copy(clone.Allow, p.Allow)
}
if p.Deny != nil {
clone.Deny = make([]string, len(p.Deny))
copy(clone.Deny, p.Deny)
}
return clone
}
// clone performs a deep copy of the Permissions struct, returning a new clone
// with all values copied.
func (p *Permissions) clone() *Permissions {
if p == nil {
return nil
}
clone := &Permissions{}
if p.Publish != nil {
clone.Publish = p.Publish.clone()
}
if p.Subscribe != nil {
clone.Subscribe = p.Subscribe.clone()
}
if p.Response != nil {
clone.Response = &ResponsePermission{
MaxMsgs: p.Response.MaxMsgs,
Expires: p.Response.Expires,
}
}
return clone
}
// checkAuthforWarnings will look for insecure settings and log concerns.
// Lock is assumed held.
func (s *Server) checkAuthforWarnings() {
warn := false
if s.opts.Password != "" && !isBcrypt(s.opts.Password) {
warn = true
}
for _, u := range s.users {
// Skip warn if using TLS certs based auth
// unless a password has been left in the config.
if u.Password == "" && s.opts.TLSMap {
continue
}
if !isBcrypt(u.Password) {
warn = true
break
}
}
if warn {
// Warning about using plaintext passwords.
s.Warnf("Plaintext passwords detected, use nkeys or bcrypt")
}
}
// If Users or Nkeys options have definitions without an account defined,
// assign them to the default global account.
// Lock should be held.
func (s *Server) assignGlobalAccountToOrphanUsers(nkeys map[string]*NkeyUser, users map[string]*User) {
for _, u := range users {
if u.Account == nil {
u.Account = s.gacc
}
}
for _, u := range nkeys {
if u.Account == nil {
u.Account = s.gacc
}
}
}
// If the given permissions has a ResponsePermission
// set, ensure that defaults are set (if values are 0)
// and that a Publish permission is set, and Allow
// is disabled if not explicitly set.
func validateResponsePermissions(p *Permissions) {
if p == nil || p.Response == nil {
return
}
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
// If there is a response permission, ensure
// that if value is 0, we set the default value.
if p.Response.MaxMsgs == 0 {
p.Response.MaxMsgs = DEFAULT_ALLOW_RESPONSE_MAX_MSGS
}
if p.Response.Expires == 0 {
p.Response.Expires = DEFAULT_ALLOW_RESPONSE_EXPIRATION
}
}
// configureAuthorization will do any setup needed for authorization.
// Lock is assumed held.
func (s *Server) configureAuthorization() {
opts := s.getOpts()
if opts == nil {
return
}
// Check for multiple users first
// This just checks and sets up the user map if we have multiple users.
if opts.CustomClientAuthentication != nil {
s.info.AuthRequired = true
} else if len(s.trustedKeys) > 0 {
s.info.AuthRequired = true
} else if opts.Nkeys != nil || opts.Users != nil {
s.nkeys, s.users = s.buildNkeysAndUsersFromOptions(opts.Nkeys, opts.Users)
s.info.AuthRequired = true
} else if opts.Username != "" || opts.Authorization != "" {
s.info.AuthRequired = true
} else {
s.users = nil
s.nkeys = nil
s.info.AuthRequired = false
}
// Do similar for websocket config
s.wsConfigAuth(&opts.Websocket)
}
// Takes the given slices of NkeyUser and User options and build
// corresponding maps used by the server. The users are cloned
// so that server does not reference options.
// The global account is assigned to users that don't have an
// existing account.
// Server lock is held on entry.
func (s *Server) buildNkeysAndUsersFromOptions(nko []*NkeyUser, uo []*User) (map[string]*NkeyUser, map[string]*User) {
var nkeys map[string]*NkeyUser
var users map[string]*User
if nko != nil {
nkeys = make(map[string]*NkeyUser, len(nko))
for _, u := range nko {
copy := u.clone()
if u.Account != nil {
if v, ok := s.accounts.Load(u.Account.Name); ok {
copy.Account = v.(*Account)
}
}
if copy.Permissions != nil {
validateResponsePermissions(copy.Permissions)
}
nkeys[u.Nkey] = copy
}
}
if uo != nil {
users = make(map[string]*User, len(uo))
for _, u := range uo {
copy := u.clone()
if u.Account != nil {
if v, ok := s.accounts.Load(u.Account.Name); ok {
copy.Account = v.(*Account)
}
}
if copy.Permissions != nil {
validateResponsePermissions(copy.Permissions)
}
users[u.Username] = copy
}
}
s.assignGlobalAccountToOrphanUsers(nkeys, users)
return nkeys, users
}
// checkAuthentication will check based on client type and
// return boolean indicating if client is authorized.
func (s *Server) checkAuthentication(c *client) bool {
switch c.kind {
case CLIENT:
return s.isClientAuthorized(c)
case ROUTER:
return s.isRouterAuthorized(c)
case GATEWAY:
return s.isGatewayAuthorized(c)
case LEAF:
return s.isLeafNodeAuthorized(c)
default:
return false
}
}
// isClientAuthorized will check the client against the proper authorization method and data.
// This could be nkey, token, or username/password based.
func (s *Server) isClientAuthorized(c *client) bool {
opts := s.getOpts()
// Check custom auth first, then jwts, then nkeys, then
// multiple users with TLS map if enabled, then token,
// then single user/pass.
if opts.CustomClientAuthentication != nil {
return opts.CustomClientAuthentication.Check(c)
}
return s.processClientOrLeafAuthentication(c, opts)
}
func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) bool {
var (
nkey *NkeyUser
juc *jwt.UserClaims
acc *Account
user *User
ok bool
err error
ao bool // auth override
)
s.mu.Lock()
authRequired := s.info.AuthRequired
// c.ws is immutable, but may need lock if we get race reports.
if !authRequired && c.ws != nil {
// If no auth required for regular clients, then check if
// we have an override for websocket clients.
authRequired = s.websocket.authOverride
}
if !authRequired {
// TODO(dlc) - If they send us credentials should we fail?
s.mu.Unlock()
return true
}
var (
username string
password string
token string
noAuthUser string
)
tlsMap := opts.TLSMap
if c.ws != nil {
wo := &opts.Websocket
// Always override TLSMap.
tlsMap = wo.TLSMap
// The rest depends on if there was any auth override in
// the websocket's config.
if s.websocket.authOverride {
noAuthUser = wo.NoAuthUser
username = wo.Username
password = wo.Password
token = wo.Token
ao = true
}
} else if c.kind == LEAF {
tlsMap = opts.LeafNode.TLSMap
}
if !ao {
noAuthUser = opts.NoAuthUser
username = opts.Username
password = opts.Password
token = opts.Authorization
}
// Check if we have trustedKeys defined in the server. If so we require a user jwt.
if s.trustedKeys != nil {
if c.opts.JWT == "" {
s.mu.Unlock()
c.Debugf("Authentication requires a user JWT")
return false
}
// So we have a valid user jwt here.
juc, err = jwt.DecodeUserClaims(c.opts.JWT)
if err != nil {
s.mu.Unlock()
c.Debugf("User JWT not valid: %v", err)
return false
}
vr := jwt.CreateValidationResults()
juc.Validate(vr)
if vr.IsBlocking(true) {
s.mu.Unlock()
c.Debugf("User JWT no longer valid: %+v", vr)
return false
}
}
// Check if we have nkeys or users for client.
hasNkeys := len(s.nkeys) > 0
hasUsers := len(s.users) > 0
if hasNkeys && c.opts.Nkey != "" {
nkey, ok = s.nkeys[c.opts.Nkey]
if !ok || !c.connectionTypeAllowed(nkey.AllowedConnectionTypes) {
s.mu.Unlock()
return false
}
} else if hasUsers {
// Check if we are tls verify and are mapping users from the client_certificate.
if tlsMap {
authorized := checkClientTLSCertSubject(c, func(u string, certRDN *ldap.DN, _ bool) (string, bool) {
// First do literal lookup using the resulting string representation
// of RDNSequence as implemented by the pkix package from Go.
if u != "" {
usr, ok := s.users[u]
if !ok || !c.connectionTypeAllowed(usr.AllowedConnectionTypes) {
return "", ok
}
user = usr
return usr.Username, ok
}
if certRDN == nil {
return "", false
}
// Look through the accounts for an RDN that is equal to the one
// presented by the certificate.
for _, usr := range s.users {
if !c.connectionTypeAllowed(usr.AllowedConnectionTypes) {
continue
}
// TODO: Use this utility to make a full validation pass
// on start in case tlsmap feature is being used.
inputRDN, err := ldap.ParseDN(usr.Username)
if err != nil {
continue
}
if inputRDN.Equal(certRDN) {
user = usr
return usr.Username, true
}
}
return "", false
})
if !authorized {
s.mu.Unlock()
return false
}
if c.opts.Username != "" {
s.Warnf("User %q found in connect proto, but user required from cert", c.opts.Username)
}
// Already checked that the client didn't send a user in connect
// but we set it here to be able to identify it in the logs.
c.opts.Username = user.Username
} else {
if c.kind == CLIENT && c.opts.Username == "" && noAuthUser != "" {
if u, exists := s.users[noAuthUser]; exists {
c.opts.Username = u.Username
c.opts.Password = u.Password
}
}
if c.opts.Username != "" {
user, ok = s.users[c.opts.Username]
if !ok || !c.connectionTypeAllowed(user.AllowedConnectionTypes) {
s.mu.Unlock()
return false
}
}
}
}
s.mu.Unlock()
// If we have a jwt and a userClaim, make sure we have the Account, etc associated.
// We need to look up the account. This will use an account resolver if one is present.
if juc != nil {
allowedConnTypes, err := convertAllowedConnectionTypes(juc.AllowedConnectionTypes)
if err != nil {
// We got an error, which means some connection types were unknown. As long as
// a valid one is returned, we proceed with auth. If not, we have to reject.
// In other words, suppose that JWT allows "WEBSOCKET" in the array. No error
// is returned and allowedConnTypes will contain "WEBSOCKET" only.
// Client will be rejected if not a websocket client, or proceed with rest of
// auth if it is.
// Now suppose JWT allows "WEBSOCKET, MQTT" and say MQTT is not known by this
// server. In this case, allowedConnTypes would contain "WEBSOCKET" and we
// would get `err` indicating that "MQTT" is an unknown connection type.
// If a websocket client connects, it should still be allowed, since after all
// the admin wanted to allow websocket and mqtt connection types.
// However, say that the JWT only allows "MQTT" (and again suppose this server
// does not know about MQTT connection type), then since the allowedConnTypes
// map would be empty (no valid types found), and since empty means allow-all,
// then we should reject because the intent was to allow connections for this
// user only as an MQTT client.
c.Debugf("%v", err)
if len(allowedConnTypes) == 0 {
return false
}
err = nil
}
if !c.connectionTypeAllowed(allowedConnTypes) {
c.Debugf("Connection type not allowed")
return false
}
issuer := juc.Issuer
if juc.IssuerAccount != "" {
issuer = juc.IssuerAccount
}
if acc, err = s.LookupAccount(issuer); acc == nil {
c.Debugf("Account JWT lookup error: %v", err)
return false
}
if !s.isTrustedIssuer(acc.Issuer) {
c.Debugf("Account JWT not signed by trusted operator")
return false
}
if juc.IssuerAccount != "" && !acc.hasIssuer(juc.Issuer) {
c.Debugf("User JWT issuer is not known")
return false
}
if acc.IsExpired() {
c.Debugf("Account JWT has expired")
return false
}
// skip validation of nonce when presented with a bearer token
// FIXME: if BearerToken is only for WSS, need check for server with that port enabled
if !juc.BearerToken {
// Verify the signature against the nonce.
if c.opts.Sig == "" {
c.Debugf("Signature missing")
return false
}
sig, err := base64.RawURLEncoding.DecodeString(c.opts.Sig)
if err != nil {
// Allow fallback to normal base64.
sig, err = base64.StdEncoding.DecodeString(c.opts.Sig)
if err != nil {
c.Debugf("Signature not valid base64")
return false
}
}
pub, err := nkeys.FromPublicKey(juc.Subject)
if err != nil {
c.Debugf("User nkey not valid: %v", err)
return false
}
if err := pub.Verify(c.nonce, sig); err != nil {
c.Debugf("Signature not verified")
return false
}
}
if acc.checkUserRevoked(juc.Subject, juc.IssuedAt) {
c.Debugf("User authentication revoked")
return false
}
if !validateSrc(juc, c.host) {
c.Errorf("Bad src Ip %s", c.host)
return false
}
allowNow, validFor := validateTimes(juc)
if !allowNow {
c.Errorf("Outside connect times")
return false
}
nkey = buildInternalNkeyUser(juc, allowedConnTypes, acc)
if err := c.RegisterNkeyUser(nkey); err != nil {
return false
}
// Hold onto the user's public key.
c.pubKey = juc.Subject
// Generate an event if we have a system account.
s.accountConnectEvent(c)
// Check if we need to set an auth timer if the user jwt expires.
c.setExpiration(juc.Claims(), validFor)
return true
}
if nkey != nil {
if c.opts.Sig == "" {
c.Debugf("Signature missing")
return false
}
sig, err := base64.RawURLEncoding.DecodeString(c.opts.Sig)
if err != nil {
// Allow fallback to normal base64.
sig, err = base64.StdEncoding.DecodeString(c.opts.Sig)
if err != nil {
c.Debugf("Signature not valid base64")
return false
}
}
pub, err := nkeys.FromPublicKey(c.opts.Nkey)
if err != nil {
c.Debugf("User nkey not valid: %v", err)
return false
}
if err := pub.Verify(c.nonce, sig); err != nil {
c.Debugf("Signature not verified")
return false
}
if err := c.RegisterNkeyUser(nkey); err != nil {
return false
}
return true
}
if user != nil {
ok = comparePasswords(user.Password, c.opts.Password)
// If we are authorized, register the user which will properly setup any permissions
// for pub/sub authorizations.
if ok {
c.RegisterUser(user)
// Generate an event if we have a system account and this is not the $G account.
s.accountConnectEvent(c)
}
return ok
}
if c.kind == CLIENT {
if token != "" {
return comparePasswords(token, c.opts.Token)
} else if username != "" {
if username != c.opts.Username {
return false
}
return comparePasswords(password, c.opts.Password)
}
} else if c.kind == LEAF {
// There is no required username/password to connect and
// there was no u/p in the CONNECT or none that matches the
// know users. Register the leaf connection with global account
// or the one specified in config (if provided).
return s.registerLeafWithAccount(c, opts.LeafNode.Account)
}
return false
}
func getTLSAuthDCs(rdns *pkix.RDNSequence) string {
dcOID := asn1.ObjectIdentifier{0, 9, 2342, 19200300, 100, 1, 25}
dcs := []string{}
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
value, ok := atv.Value.(string)
if !ok {
continue
}
if atv.Type.Equal(dcOID) {
dcs = append(dcs, "DC="+value)
}
}
}
return strings.Join(dcs, ",")
}
type tlsMapAuthFn func(string, *ldap.DN, bool) (string, bool)
func checkClientTLSCertSubject(c *client, fn tlsMapAuthFn) bool {
tlsState := c.GetTLSConnectionState()
if tlsState == nil {
c.Debugf("User required in cert, no TLS connection state")
return false
}
if len(tlsState.PeerCertificates) == 0 {
c.Debugf("User required in cert, no peer certificates found")
return false
}
cert := tlsState.PeerCertificates[0]
if len(tlsState.PeerCertificates) > 1 {
c.Debugf("Multiple peer certificates found, selecting first")
}
hasSANs := len(cert.DNSNames) > 0
hasEmailAddresses := len(cert.EmailAddresses) > 0
hasSubject := len(cert.Subject.String()) > 0
hasURIs := len(cert.URIs) > 0
if !hasEmailAddresses && !hasSubject && !hasURIs {
c.Debugf("User required in cert, none found")
return false
}
switch {
case hasEmailAddresses:
for _, u := range cert.EmailAddresses {
if match, ok := fn(u, nil, false); ok {
c.Debugf("Using email found in cert for auth [%q]", match)
return true
}
}
fallthrough
case hasSANs:
for _, u := range cert.DNSNames {
if match, ok := fn(u, nil, true); ok {
c.Debugf("Using SAN found in cert for auth [%q]", match)
return true
}
}
fallthrough
case hasURIs:
for _, u := range cert.URIs {
if match, ok := fn(u.String(), nil, false); ok {
c.Debugf("Using URI found in cert for auth [%q]", match)
return true
}
}
}
// Use the string representation of the full RDN Sequence including
// the domain components in case there are any.
rdn := cert.Subject.ToRDNSequence().String()
// Match that follows original order from the subject takes precedence.
dn, err := ldap.FromCertSubject(cert.Subject)
if err == nil {
if match, ok := fn("", dn, false); ok {
c.Debugf("Using DistinguishedNameMatch for auth [%q]", match)
return true
}
c.Debugf("DistinguishedNameMatch could not be used for auth [%q]", rdn)
}
var rdns pkix.RDNSequence
if _, err := asn1.Unmarshal(cert.RawSubject, &rdns); err == nil {
// If found domain components then include roughly following
// the order from https://tools.ietf.org/html/rfc2253
//
// NOTE: The original sequence from string representation by ToRDNSequence does not follow
// the correct ordering, so this addition ofdomainComponents would likely be deprecated in
// another release in favor of using the correct ordered as parsed by the go-ldap library.
//
dcs := getTLSAuthDCs(&rdns)
if len(dcs) > 0 {
u := strings.Join([]string{rdn, dcs}, ",")
if match, ok := fn(u, nil, false); ok {
c.Debugf("Using RDNSequence for auth [%q]", match)
return true
}
c.Debugf("RDNSequence could not be used for auth [%q]", u)
}
}
// If no match, then use the string representation of the RDNSequence
// from the subject without the domainComponents.
if match, ok := fn(rdn, nil, false); ok {
c.Debugf("Using certificate subject for auth [%q]", match)
return true
}
c.Debugf("User in cert [%q], not found", rdn)
return false
}
func dnsAltNameLabels(dnsAltName string) []string {
return strings.Split(strings.ToLower(dnsAltName), ".")
}
// Check DNS name according to https://tools.ietf.org/html/rfc6125#section-6.4.1
func dnsAltNameMatches(dnsAltNameLabels []string, urls []*url.URL) bool {
URLS:
for _, url := range urls {
if url == nil {
continue URLS
}
hostLabels := strings.Split(strings.ToLower(url.Hostname()), ".")
// Following https://tools.ietf.org/html/rfc6125#section-6.4.3, should not => will not, may => will not
// The wilcard * never matches multiple label and only matches the left most label.
if len(hostLabels) != len(dnsAltNameLabels) {
continue URLS
}
i := 0
// only match wildcard on left most label
if dnsAltNameLabels[0] == "*" {
i++
}
for ; i < len(dnsAltNameLabels); i++ {
if dnsAltNameLabels[i] != hostLabels[i] {
continue URLS
}
}
return true
}
return false
}
// checkRouterAuth checks optional router authorization which can be nil or username/password.
func (s *Server) isRouterAuthorized(c *client) bool {
// Snapshot server options.
opts := s.getOpts()
// Check custom auth first, then TLS map if enabled
// then single user/pass.
if s.opts.CustomRouterAuthentication != nil {
return s.opts.CustomRouterAuthentication.Check(c)
}
if opts.Cluster.TLSMap || opts.Cluster.TLSCheckKnwonURLs {
return checkClientTLSCertSubject(c, func(user string, _ *ldap.DN, isDNSAltName bool) (string, bool) {
if user == "" {
return "", false
}
if opts.Cluster.TLSCheckKnwonURLs && isDNSAltName {
if dnsAltNameMatches(dnsAltNameLabels(user), opts.Routes) {
return "", true
}
}
if opts.Cluster.TLSMap && opts.Cluster.Username == user {
return "", true
}
return "", false
})
}
if opts.Cluster.Username == "" {
return true
}
if opts.Cluster.Username != c.opts.Username {
return false
}
if !comparePasswords(opts.Cluster.Password, c.opts.Password) {
return false
}
return true
}
// isGatewayAuthorized checks optional gateway authorization which can be nil or username/password.
func (s *Server) isGatewayAuthorized(c *client) bool {
// Snapshot server options.
opts := s.getOpts()
// Check whether TLS map is enabled, otherwise use single user/pass.
if opts.Gateway.TLSMap || opts.Gateway.TLSCheckKnownURLs {
return checkClientTLSCertSubject(c, func(user string, _ *ldap.DN, isDNSAltName bool) (string, bool) {
if user == "" {
return "", false
}
if opts.Gateway.TLSCheckKnownURLs && isDNSAltName {
labels := dnsAltNameLabels(user)
for _, gw := range opts.Gateway.Gateways {
if gw != nil && dnsAltNameMatches(labels, gw.URLs) {
return "", true
}
}
}
if opts.Gateway.TLSMap && opts.Gateway.Username == user {
return "", true
}
return "", false
})
}
if opts.Gateway.Username == "" {
return true
}
if opts.Gateway.Username != c.opts.Username {
return false
}
return comparePasswords(opts.Gateway.Password, c.opts.Password)
}
func (s *Server) registerLeafWithAccount(c *client, account string) bool {
var err error
acc := s.globalAccount()
if account != _EMPTY_ {
acc, err = s.lookupAccount(account)
if err != nil {
s.Errorf("authentication of user %q failed, unable to lookup account %q: %v",
c.opts.Username, account, err)
return false
}
}
if err = c.registerWithAccount(acc); err != nil {
return false
}
return true
}
// isLeafNodeAuthorized will check for auth for an inbound leaf node connection.
func (s *Server) isLeafNodeAuthorized(c *client) bool {
opts := s.getOpts()
isAuthorized := func(username, password, account string) bool {
if username != c.opts.Username {
return false
}
if !comparePasswords(password, c.opts.Password) {
return false
}
return s.registerLeafWithAccount(c, account)
}
// If leafnodes config has an authorization{} stanza, this takes precedence.
// The user in CONNECT mutch match. We will bind to the account associated
// with that user (from the leafnode's authorization{} config).
if opts.LeafNode.Username != _EMPTY_ {
return isAuthorized(opts.LeafNode.Username, opts.LeafNode.Password, opts.LeafNode.Account)
} else if len(opts.LeafNode.Users) > 0 {
if opts.LeafNode.TLSMap {
var user *User
found := checkClientTLSCertSubject(c, func(u string, _ *ldap.DN, _ bool) (string, bool) {
// This is expected to be a very small array.
for _, usr := range opts.LeafNode.Users {
if u == usr.Username {
user = usr
return u, true
}
}
return "", false
})
if !found {
return false
}
if c.opts.Username != "" {
s.Warnf("User %q found in connect proto, but user required from cert", c.opts.Username)
}
c.opts.Username = user.Username
// This will authorize since are using an existing user,
// but it will also register with proper account.
return isAuthorized(user.Username, user.Password, user.Account.GetName())
}
// This is expected to be a very small array.
for _, u := range opts.LeafNode.Users {
if u.Username == c.opts.Username {
var accName string
if u.Account != nil {
accName = u.Account.Name
}
return isAuthorized(u.Username, u.Password, accName)
}
}
return false
}
// We are here if we accept leafnode connections without any credentials.
// Still, if the CONNECT has some user info, we will bind to the
// user's account or to the specified default account (if provided)
// or to the global account.
return s.processClientOrLeafAuthentication(c, opts)
}
// Support for bcrypt stored passwords and tokens.
var validBcryptPrefix = regexp.MustCompile(`^\$2[a,b,x,y]{1}\$\d{2}\$.*`)
// isBcrypt checks whether the given password or token is bcrypted.
func isBcrypt(password string) bool {
if strings.HasPrefix(password, "$") {
return validBcryptPrefix.MatchString(password)
}
return false
}
func comparePasswords(serverPassword, clientPassword string) bool {
// Check to see if the server password is a bcrypt hash
if isBcrypt(serverPassword) {
if err := bcrypt.CompareHashAndPassword([]byte(serverPassword), []byte(clientPassword)); err != nil {
return false
}
} else if serverPassword != clientPassword {
return false
}
return true
}
func validateAuth(o *Options) error {
for _, u := range o.Users {
if err := validateAllowedConnectionTypes(u.AllowedConnectionTypes); err != nil {
return err
}
}
for _, u := range o.Nkeys {
if err := validateAllowedConnectionTypes(u.AllowedConnectionTypes); err != nil {
return err
}
}
return validateNoAuthUser(o, o.NoAuthUser)
}
func validateAllowedConnectionTypes(m map[string]struct{}) error {
for ct := range m {
ctuc := strings.ToUpper(ct)
switch ctuc {
case jwt.ConnectionTypeStandard, jwt.ConnectionTypeWebsocket, jwt.ConnectionTypeLeafnode:
default:
return fmt.Errorf("unknown connection type %q", ct)
}
if ctuc != ct {
delete(m, ct)
m[ctuc] = struct{}{}
}
}
return nil
}
func validateNoAuthUser(o *Options, noAuthUser string) error {
if noAuthUser == "" {
return nil
}
if len(o.TrustedOperators) > 0 {
return fmt.Errorf("no_auth_user not compatible with Trusted Operator")
}
if o.Users == nil {
return fmt.Errorf(`no_auth_user: "%s" present, but users are not defined`, noAuthUser)
}
for _, u := range o.Users {
if u.Username == noAuthUser {
return nil
}
}
return fmt.Errorf(
`no_auth_user: "%s" not present as user in authorization block or account configuration`,
noAuthUser)
}
| 1 | 12,005 | already what? I know what, but seems like smth. is missing in this sentence | nats-io-nats-server | go |
@@ -1,6 +1 @@
-$("div.card-for-activity").replaceWith('<%=j render partial: 'proposals/details/activity', locals: {events: @events} %>');
-$("div.card-for-activity textarea:first").focus();
-$("#add_a_comment").attr('disabled', true);
-$("div.card-for-activity textarea:first").on('input',function(){
- $("#add_a_comment").attr('disabled', false);
-});
+window.c2.activityCardController.update('<%=j render partial: 'proposals/details/activity', locals: {events: @events} %>'); | 1 | $("div.card-for-activity").replaceWith('<%=j render partial: 'proposals/details/activity', locals: {events: @events} %>');
$("div.card-for-activity textarea:first").focus();
$("#add_a_comment").attr('disabled', true);
$("div.card-for-activity textarea:first").on('input',function(){
$("#add_a_comment").attr('disabled', false);
}); | 1 | 17,511 | Can you put spaces after the `{`and before the `}` | 18F-C2 | rb |
@@ -21,11 +21,12 @@ import logging
import logging.handlers
import os
+from google.cloud.forseti import __version__
DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s '
'%(name)s(%(funcName)s): %(message).1024s')
-SYSLOG_LOG_FMT = ('%(levelname)s [forseti-security] '
+SYSLOG_LOG_FMT = ('%(levelname)s [forseti-security] %(__version__)s '
'%(name)s(%(funcName)s): %(message).1024s')
# %(asctime)s is used as the marker by multiline parser to determine | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic util that wraps logging.
Setup logging for Forseti Security. Logs to console and syslog.
"""
import logging
import logging.handlers
import os
DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s '
'%(name)s(%(funcName)s): %(message).1024s')
SYSLOG_LOG_FMT = ('%(levelname)s [forseti-security] '
'%(name)s(%(funcName)s): %(message).1024s')
# %(asctime)s is used as the marker by multiline parser to determine
# the first line of a log record that spans multiple lines.
# So if this is moved or changed here, update "format_firstline" in the logging
# parser config.
FORSETI_LOG_FMT = '%(asctime)s ' + SYSLOG_LOG_FMT
LOGGERS = {}
LOGLEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARN,
'error': logging.ERROR,
}
LOGLEVEL = logging.INFO
LOG_TO_CONSOLE = False
def _create_syslog_handler():
"""Create the syslog handler.
Returns:
handler: A configured syslog handler.
"""
syslog_handler = logging.handlers.SysLogHandler()
syslog_handler.setFormatter(logging.Formatter(SYSLOG_LOG_FMT))
return syslog_handler
def get_logger(module_name):
"""Setup the logger.
Args:
module_name (str): The name of the mdule to describe the log entry.
Returns:
logger: An instance of the configured logger.
"""
if os.path.exists('/var/log/forseti.log'): # ubuntu on GCE
try:
default_log_handler = logging.FileHandler('/var/log/forseti.log')
default_log_handler.setFormatter(logging.Formatter(FORSETI_LOG_FMT))
# users of CLI on server vm can not write to /var/log/forseti.log
except IOError:
# pylint:disable=redefined-variable-type
default_log_handler = _create_syslog_handler()
# pylint:enable=redefined-variable-type
else:
default_log_handler = _create_syslog_handler()
logger_instance = logging.getLogger(module_name)
logger_instance.addHandler(default_log_handler)
logger_instance.setLevel(LOGLEVEL)
if LOG_TO_CONSOLE:
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(DEFAULT_LOG_FMT))
logger_instance.addHandler(console_handler)
LOGGERS[module_name] = logger_instance
return logger_instance
def _map_logger(func):
"""Map function to current loggers.
Args:
func (function): Function to call on every logger.
"""
for logger in LOGGERS.itervalues():
func(logger)
def set_logger_level(level):
"""Modify log level of existing loggers as well as the default
for new loggers.
Args:
level (int): The log level to set the loggers to.
"""
# pylint: disable=global-statement
global LOGLEVEL
LOGLEVEL = level
_map_logger(lambda logger: logger.setLevel(level))
def enable_console_log():
"""Enable console logging for all the new loggers and add console
handlers to all the existing loggers."""
# pylint: disable=global-statement
global LOG_TO_CONSOLE
LOG_TO_CONSOLE = True
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(DEFAULT_LOG_FMT))
_map_logger(lambda logger: logger.addHandler(console_handler))
def set_logger_level_from_config(level_name):
"""Set the logger level from a config value.
Args:
level_name (str): The log level name. The accepted values are
in the LOGLEVELS variable.
"""
set_logger_level(LOGLEVELS.get(level_name, LOGLEVEL))
| 1 | 32,010 | We said we would put `[ ]` around the version. `[forseti-security] [v2.3.0]` | forseti-security-forseti-security | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.