max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,894 | #! /usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
# Copyright 2014 Altera Corporation. All Rights Reserved.
# Copyright 2014-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .fake_filesystem import __version__
import os
NAME = 'pyfakefs'
MODULES = ['fake_filesystem',
'fake_filesystem_glob',
'fake_filesystem_shutil',
'fake_tempfile',
'fake_filesystem_unittest']
REQUIRES = ['mox3']
DESCRIPTION = 'Fake file system for testing file operations without touching the real file system.'
URL = "https://github.com/jmcgeheeiv/pyfakefs"
readme = os.path.join(os.path.dirname(__file__), 'README.md')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: System :: Filesystems',
]
AUTHOR = 'Google and <NAME>'
AUTHOR_EMAIL = '<EMAIL>'
KEYWORDS = ("testing test file os shutil glob mocking unittest "
"fakes filesystem unit").split(' ')
params = dict(
name=NAME,
version=__version__,
py_modules=MODULES,
install_requires=REQUIRES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
params['tests_require'] = ['unittest2']
params['test_suite'] = 'unittest2.collector'
setup(**params) # pylint: disable = W0142
| 937 |
1,970 | <filename>hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordGlobalLocation.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.common.model;
import java.util.Objects;
/**
* Similar with {@link org.apache.hudi.common.model.HoodieRecordLocation} but with partition path.
*/
public class HoodieRecordGlobalLocation extends HoodieRecordLocation {
private static final long serialVersionUID = 1L;
private String partitionPath;
public HoodieRecordGlobalLocation() {
}
public HoodieRecordGlobalLocation(String partitionPath, String instantTime, String fileId) {
super(instantTime, fileId);
this.partitionPath = partitionPath;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("HoodieGlobalRecordLocation {");
sb.append("partitionPath=").append(partitionPath).append(", ");
sb.append("instantTime=").append(instantTime).append(", ");
sb.append("fileId=").append(fileId);
sb.append('}');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
HoodieRecordGlobalLocation otherLoc = (HoodieRecordGlobalLocation) o;
return Objects.equals(partitionPath, otherLoc.partitionPath)
&& Objects.equals(instantTime, otherLoc.instantTime)
&& Objects.equals(fileId, otherLoc.fileId);
}
@Override
public int hashCode() {
return Objects.hash(partitionPath, instantTime, fileId);
}
public String getPartitionPath() {
return partitionPath;
}
public void setPartitionPath(String partitionPath) {
this.partitionPath = partitionPath;
}
/**
* Returns the global record location from local.
*/
public static HoodieRecordGlobalLocation fromLocal(String partitionPath, HoodieRecordLocation localLoc) {
return new HoodieRecordGlobalLocation(partitionPath, localLoc.getInstantTime(), localLoc.getFileId());
}
/**
* Returns the record location as local.
*/
public HoodieRecordLocation toLocal(String instantTime) {
return new HoodieRecordLocation(instantTime, fileId);
}
/**
* Copy the location with given partition path.
*/
public HoodieRecordGlobalLocation copy(String partitionPath) {
return new HoodieRecordGlobalLocation(partitionPath, instantTime, fileId);
}
}
| 951 |
956 | /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
enum icp_qat_hw_ae_id {
ICP_QAT_HW_AE_0 = 0,
ICP_QAT_HW_AE_1 = 1,
ICP_QAT_HW_AE_2 = 2,
ICP_QAT_HW_AE_3 = 3,
ICP_QAT_HW_AE_4 = 4,
ICP_QAT_HW_AE_5 = 5,
ICP_QAT_HW_AE_6 = 6,
ICP_QAT_HW_AE_7 = 7,
ICP_QAT_HW_AE_8 = 8,
ICP_QAT_HW_AE_9 = 9,
ICP_QAT_HW_AE_10 = 10,
ICP_QAT_HW_AE_11 = 11,
ICP_QAT_HW_AE_DELIMITER = 12
};
enum icp_qat_hw_qat_id {
ICP_QAT_HW_QAT_0 = 0,
ICP_QAT_HW_QAT_1 = 1,
ICP_QAT_HW_QAT_2 = 2,
ICP_QAT_HW_QAT_3 = 3,
ICP_QAT_HW_QAT_4 = 4,
ICP_QAT_HW_QAT_5 = 5,
ICP_QAT_HW_QAT_DELIMITER = 6
};
enum icp_qat_hw_auth_algo {
ICP_QAT_HW_AUTH_ALGO_NULL = 0,
ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
ICP_QAT_HW_AUTH_RESERVED_1 = 15,
ICP_QAT_HW_AUTH_RESERVED_2 = 16,
ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
ICP_QAT_HW_AUTH_RESERVED_3 = 18,
ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
};
enum icp_qat_hw_auth_mode {
ICP_QAT_HW_AUTH_MODE0 = 0,
ICP_QAT_HW_AUTH_MODE1 = 1,
ICP_QAT_HW_AUTH_MODE2 = 2,
ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
};
struct icp_qat_hw_auth_config {
uint32_t config;
uint32_t reserved;
};
#define QAT_AUTH_MODE_BITPOS 4
#define QAT_AUTH_MODE_MASK 0xF
#define QAT_AUTH_ALGO_BITPOS 0
#define QAT_AUTH_ALGO_MASK 0xF
#define QAT_AUTH_CMP_BITPOS 8
#define QAT_AUTH_CMP_MASK 0x7F
#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
#define QAT_AUTH_ALGO_SHA3_BITPOS 22
#define QAT_AUTH_ALGO_SHA3_MASK 0x3
#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
(((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
<< QAT_AUTH_ALGO_SHA3_BITPOS) | \
(((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
<< QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
(((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
<< QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
(((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
<< QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
(((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
<< QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
struct icp_qat_hw_auth_counter {
uint32_t counter;
uint32_t reserved;
};
#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
#define QAT_AUTH_COUNT_BITPOS 0
#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
(((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
struct icp_qat_hw_auth_setup {
struct icp_qat_hw_auth_config auth_config;
struct icp_qat_hw_auth_counter auth_counter;
};
#define QAT_HW_DEFAULT_ALIGNMENT 8
#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
#define ICP_QAT_HW_NULL_STATE1_SZ 32
#define ICP_QAT_HW_MD5_STATE1_SZ 16
#define ICP_QAT_HW_SHA1_STATE1_SZ 20
#define ICP_QAT_HW_SHA224_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
#define ICP_QAT_HW_SHA256_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
#define ICP_QAT_HW_SHA384_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_SHA512_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
#define ICP_QAT_HW_NULL_STATE2_SZ 32
#define ICP_QAT_HW_MD5_STATE2_SZ 16
#define ICP_QAT_HW_SHA1_STATE2_SZ 20
#define ICP_QAT_HW_SHA224_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
#define ICP_QAT_HW_SHA256_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
#define ICP_QAT_HW_SHA384_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_SHA512_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
#define ICP_QAT_HW_F9_IK_SZ 16
#define ICP_QAT_HW_F9_FK_SZ 16
#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
ICP_QAT_HW_F9_FK_SZ)
#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
#define ICP_QAT_HW_GALOIS_H_SZ 16
#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
struct icp_qat_hw_auth_sha512 {
struct icp_qat_hw_auth_setup inner_setup;
uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
struct icp_qat_hw_auth_setup outer_setup;
uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
};
struct icp_qat_hw_auth_sha3_512 {
struct icp_qat_hw_auth_setup inner_setup;
uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
struct icp_qat_hw_auth_setup outer_setup;
};
struct icp_qat_hw_auth_algo_blk {
struct icp_qat_hw_auth_sha512 sha;
};
#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
enum icp_qat_hw_cipher_algo {
ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
ICP_QAT_HW_CIPHER_ALGO_DES = 1,
ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
ICP_QAT_HW_CIPHER_ALGO_SM4 = 10,
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 = 11,
ICP_QAT_HW_CIPHER_DELIMITER = 12
};
enum icp_qat_hw_cipher_mode {
ICP_QAT_HW_CIPHER_ECB_MODE = 0,
ICP_QAT_HW_CIPHER_CBC_MODE = 1,
ICP_QAT_HW_CIPHER_CTR_MODE = 2,
ICP_QAT_HW_CIPHER_F8_MODE = 3,
ICP_QAT_HW_CIPHER_AEAD_MODE = 4,
ICP_QAT_HW_CIPHER_XTS_MODE = 6,
ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
};
struct icp_qat_hw_cipher_config {
uint32_t val;
uint32_t reserved;
};
enum icp_qat_hw_cipher_dir {
ICP_QAT_HW_CIPHER_ENCRYPT = 0,
ICP_QAT_HW_CIPHER_DECRYPT = 1,
};
enum icp_qat_hw_auth_op {
ICP_QAT_HW_AUTH_VERIFY = 0,
ICP_QAT_HW_AUTH_GENERATE = 1,
};
enum icp_qat_hw_cipher_convert {
ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
};
#define QAT_CIPHER_MODE_BITPOS 4
#define QAT_CIPHER_MODE_MASK 0xF
#define QAT_CIPHER_ALGO_BITPOS 0
#define QAT_CIPHER_ALGO_MASK 0xF
#define QAT_CIPHER_CONVERT_BITPOS 9
#define QAT_CIPHER_CONVERT_MASK 0x1
#define QAT_CIPHER_DIR_BITPOS 8
#define QAT_CIPHER_DIR_MASK 0x1
#define QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS 10
#define QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK 0x1F
#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
(((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
#define QAT_CIPHER_AEAD_AAD_LOWER_SHIFT 24
#define QAT_CIPHER_AEAD_AAD_UPPER_SHIFT 8
#define QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK 0xFF
#define QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK 0x3F
#define QAT_CIPHER_AEAD_AAD_SIZE_BITPOS 16
#define ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(aad_size) \
({ \
typeof(aad_size) aad_size1 = aad_size; \
(((((aad_size1) >> QAT_CIPHER_AEAD_AAD_UPPER_SHIFT) & \
QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK) << \
QAT_CIPHER_AEAD_AAD_SIZE_BITPOS) | \
(((aad_size1) & QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK) << \
QAT_CIPHER_AEAD_AAD_LOWER_SHIFT)); \
})
#define ICP_QAT_HW_DES_BLK_SZ 8
#define ICP_QAT_HW_3DES_BLK_SZ 8
#define ICP_QAT_HW_NULL_BLK_SZ 8
#define ICP_QAT_HW_AES_BLK_SZ 16
#define ICP_QAT_HW_KASUMI_BLK_SZ 8
#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
#define ICP_QAT_HW_NULL_KEY_SZ 256
#define ICP_QAT_HW_DES_KEY_SZ 8
#define ICP_QAT_HW_3DES_KEY_SZ 24
#define ICP_QAT_HW_AES_128_KEY_SZ 16
#define ICP_QAT_HW_AES_192_KEY_SZ 24
#define ICP_QAT_HW_AES_256_KEY_SZ 32
#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
#define ICP_QAT_HW_KASUMI_KEY_SZ 16
#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
#define ICP_QAT_HW_ARC4_KEY_SZ 256
#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
#define ICP_QAT_HW_CHACHAPOLY_KEY_SZ 32
#define ICP_QAT_HW_CHACHAPOLY_IV_SZ 12
#define ICP_QAT_HW_CHACHAPOLY_BLK_SZ 64
#define ICP_QAT_HW_SPC_CTR_SZ 16
#define ICP_QAT_HW_CHACHAPOLY_ICV_SZ 16
#define ICP_QAT_HW_CHACHAPOLY_AAD_MAX_LOG 14
#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
/* These defines describe position of the bit-fields
* in the flags byte in B0
*/
#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
| ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
| ((q) - 1))
#define ICP_QAT_HW_CCM_NQ_CONST 15
#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
ICP_QAT_HW_CCM_AAD_LEN_INFO)
#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_config cipher_config;
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
/* ========================================================================= */
/* COMPRESSION SLICE */
/* ========================================================================= */
enum icp_qat_hw_compression_direction {
ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
};
enum icp_qat_hw_compression_delayed_match {
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
};
enum icp_qat_hw_compression_algo {
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
};
enum icp_qat_hw_compression_depth {
ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
};
enum icp_qat_hw_compression_file_type {
ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
};
struct icp_qat_hw_compression_config {
uint32_t val;
uint32_t reserved;
};
#define QAT_COMPRESSION_DIR_BITPOS 4
#define QAT_COMPRESSION_DIR_MASK 0x7
#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
#define QAT_COMPRESSION_ALGO_BITPOS 31
#define QAT_COMPRESSION_ALGO_MASK 0x1
#define QAT_COMPRESSION_DEPTH_BITPOS 28
#define QAT_COMPRESSION_DEPTH_MASK 0x7
#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
dir, delayed, algo, depth, filetype) \
((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
(((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
<< QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
(((algo) & QAT_COMPRESSION_ALGO_MASK) \
<< QAT_COMPRESSION_ALGO_BITPOS) | \
(((depth) & QAT_COMPRESSION_DEPTH_MASK) \
<< QAT_COMPRESSION_DEPTH_BITPOS) | \
(((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
<< QAT_COMPRESSION_FILE_TYPE_BITPOS))
#endif
| 8,567 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/capabilities/video_decode_stats_db_impl.h"
#include <memory>
#include <tuple>
#include "base/files/file_path.h"
#include "base/format_macros.h"
#include "base/metrics/histogram_macros.h"
#include "base/sequence_checker.h"
#include "base/strings/stringprintf.h"
#include "base/task_scheduler/post_task.h"
#include "components/leveldb_proto/proto_database_impl.h"
#include "media/capabilities/video_decode_stats.pb.h"
namespace media {
namespace {
// Avoid changing client name. Used in UMA.
// See comments in components/leveldb_proto/leveldb_database.h
const char kDatabaseClientName[] = "VideoDecodeStatsDB";
// Serialize the |entry| to a string to use as a key in the database.
std::string SerializeKey(const VideoDecodeStatsDB::VideoDescKey& key) {
return base::StringPrintf("%d|%s|%d", static_cast<int>(key.codec_profile),
key.size.ToString().c_str(), key.frame_rate);
}
// For debug logging.
std::string KeyToString(const VideoDecodeStatsDB::VideoDescKey& key) {
return "Key {" + SerializeKey(key) + "}";
}
// For debug logging.
std::string EntryToString(const VideoDecodeStatsDB::DecodeStatsEntry& entry) {
return base::StringPrintf("DecodeStatsEntry {frames decoded:%" PRIu64
", dropped:%" PRIu64
", power efficient decoded:%" PRIu64 "}",
entry.frames_decoded, entry.frames_dropped,
entry.frames_decoded_power_efficient);
}
}; // namespace
VideoDecodeStatsDBImplFactory::VideoDecodeStatsDBImplFactory(
base::FilePath db_dir)
: db_dir_(db_dir) {
DVLOG(2) << __func__ << " db_dir:" << db_dir_;
}
VideoDecodeStatsDBImplFactory::~VideoDecodeStatsDBImplFactory() = default;
std::unique_ptr<VideoDecodeStatsDB> VideoDecodeStatsDBImplFactory::CreateDB() {
std::unique_ptr<leveldb_proto::ProtoDatabase<DecodeStatsProto>> db_;
auto inner_db =
std::make_unique<leveldb_proto::ProtoDatabaseImpl<DecodeStatsProto>>(
base::CreateSequencedTaskRunnerWithTraits(
{base::MayBlock(), base::TaskPriority::BACKGROUND,
base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN}));
return std::make_unique<VideoDecodeStatsDBImpl>(std::move(inner_db), db_dir_);
}
VideoDecodeStatsDBImpl::VideoDecodeStatsDBImpl(
std::unique_ptr<leveldb_proto::ProtoDatabase<DecodeStatsProto>> db,
const base::FilePath& db_dir)
: db_(std::move(db)), db_dir_(db_dir), weak_ptr_factory_(this) {
DCHECK(db_);
DCHECK(!db_dir_.empty());
}
VideoDecodeStatsDBImpl::~VideoDecodeStatsDBImpl() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
void VideoDecodeStatsDBImpl::Initialize(
base::OnceCallback<void(bool)> init_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(init_cb);
DCHECK(!IsInitialized());
DCHECK(!db_destroy_pending_);
// "Simple options" will use the default global cache of 8MB. In the worst
// case our whole DB will be less than 35K, so we aren't worried about
// spamming the cache.
// TODO(chcunningham): Keep an eye on the size as the table evolves.
db_->Init(kDatabaseClientName, db_dir_, leveldb_proto::CreateSimpleOptions(),
base::BindOnce(&VideoDecodeStatsDBImpl::OnInit,
weak_ptr_factory_.GetWeakPtr(), std::move(init_cb)));
}
void VideoDecodeStatsDBImpl::OnInit(base::OnceCallback<void(bool)> init_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Initialize",
success);
db_init_ = true;
// Can't use DB when initialization fails.
if (!success)
db_.reset();
std::move(init_cb).Run(success);
}
bool VideoDecodeStatsDBImpl::IsInitialized() {
// |db_| will be null if Initialization failed.
return db_init_ && db_;
}
void VideoDecodeStatsDBImpl::AppendDecodeStats(
const VideoDescKey& key,
const DecodeStatsEntry& entry,
AppendDecodeStatsCB append_done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(IsInitialized());
DVLOG(3) << __func__ << " Reading key " << KeyToString(key)
<< " from DB with intent to update with " << EntryToString(entry);
db_->GetEntry(SerializeKey(key),
base::BindOnce(&VideoDecodeStatsDBImpl::WriteUpdatedEntry,
weak_ptr_factory_.GetWeakPtr(), key, entry,
std::move(append_done_cb)));
}
void VideoDecodeStatsDBImpl::GetDecodeStats(const VideoDescKey& key,
GetDecodeStatsCB get_stats_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(IsInitialized());
DVLOG(3) << __func__ << " " << KeyToString(key);
db_->GetEntry(
SerializeKey(key),
base::BindOnce(&VideoDecodeStatsDBImpl::OnGotDecodeStats,
weak_ptr_factory_.GetWeakPtr(), std::move(get_stats_cb)));
}
void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
const VideoDescKey& key,
const DecodeStatsEntry& entry,
AppendDecodeStatsCB append_done_cb,
bool read_success,
std::unique_ptr<DecodeStatsProto> prev_stats_proto) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(IsInitialized());
// Note: outcome of "Write" operation logged in OnEntryUpdated().
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Read",
read_success);
if (!read_success) {
DVLOG(2) << __func__ << " FAILED DB read for " << KeyToString(key)
<< "; ignoring update!";
std::move(append_done_cb).Run(false);
return;
}
if (!prev_stats_proto) {
prev_stats_proto.reset(new DecodeStatsProto());
prev_stats_proto->set_frames_decoded(0);
prev_stats_proto->set_frames_dropped(0);
prev_stats_proto->set_frames_decoded_power_efficient(0);
}
uint64_t sum_frames_decoded =
prev_stats_proto->frames_decoded() + entry.frames_decoded;
uint64_t sum_frames_dropped =
prev_stats_proto->frames_dropped() + entry.frames_dropped;
uint64_t sum_frames_decoded_power_efficient =
prev_stats_proto->frames_decoded_power_efficient() +
entry.frames_decoded_power_efficient;
prev_stats_proto->set_frames_decoded(sum_frames_decoded);
prev_stats_proto->set_frames_dropped(sum_frames_dropped);
prev_stats_proto->set_frames_decoded_power_efficient(
sum_frames_decoded_power_efficient);
DVLOG(3) << __func__ << " Updating " << KeyToString(key) << " with "
<< EntryToString(entry) << " aggregate:"
<< EntryToString(
DecodeStatsEntry(sum_frames_decoded, sum_frames_dropped,
sum_frames_decoded_power_efficient));
using ProtoDecodeStatsEntry = leveldb_proto::ProtoDatabase<DecodeStatsProto>;
std::unique_ptr<ProtoDecodeStatsEntry::KeyEntryVector> entries =
std::make_unique<ProtoDecodeStatsEntry::KeyEntryVector>();
entries->emplace_back(SerializeKey(key), *prev_stats_proto);
db_->UpdateEntries(std::move(entries),
std::make_unique<leveldb_proto::KeyVector>(),
base::BindOnce(&VideoDecodeStatsDBImpl::OnEntryUpdated,
weak_ptr_factory_.GetWeakPtr(),
std::move(append_done_cb)));
}
void VideoDecodeStatsDBImpl::OnEntryUpdated(AppendDecodeStatsCB append_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Write", success);
DVLOG(3) << __func__ << " update " << (success ? "succeeded" : "FAILED!");
std::move(append_done_cb).Run(success);
}
void VideoDecodeStatsDBImpl::OnGotDecodeStats(
GetDecodeStatsCB get_stats_cb,
bool success,
std::unique_ptr<DecodeStatsProto> stats_proto) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Read", success);
std::unique_ptr<DecodeStatsEntry> entry;
if (stats_proto) {
DCHECK(success);
entry = std::make_unique<DecodeStatsEntry>(
stats_proto->frames_decoded(), stats_proto->frames_dropped(),
stats_proto->frames_decoded_power_efficient());
}
DVLOG(3) << __func__ << " read " << (success ? "succeeded" : "FAILED!")
<< " entry: " << (entry ? EntryToString(*entry) : "nullptr");
std::move(get_stats_cb).Run(success, std::move(entry));
}
void VideoDecodeStatsDBImpl::DestroyStats(base::OnceClosure destroy_done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(2) << __func__;
// DB is no longer initialized once destruction kicks off.
db_init_ = false;
db_destroy_pending_ = true;
db_->Destroy(base::BindOnce(&VideoDecodeStatsDBImpl::OnDestroyedStats,
weak_ptr_factory_.GetWeakPtr(),
std::move(destroy_done_cb)));
}
void VideoDecodeStatsDBImpl::OnDestroyedStats(base::OnceClosure destroy_done_cb,
bool success) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOG(2) << __func__ << (success ? " succeeded" : " FAILED!");
UMA_HISTOGRAM_BOOLEAN("Media.VideoDecodeStatsDB.OpSuccess.Destroy", success);
// Allow calls to re-Intialize() now that destruction is complete.
DCHECK(!db_init_);
db_destroy_pending_ = false;
// We don't pass success to |destroy_done_cb|. Clearing is best effort and
// there is no additional action for callers to take in case of failure.
// TODO(chcunningham): Monitor UMA and consider more aggressive action like
// deleting the DB directory.
std::move(destroy_done_cb).Run();
}
} // namespace media
| 4,186 |
5,169 | {
"name": "BetaDataSDK",
"version": "1.2.22",
"summary": "The official iOS SDK for Analytics.",
"description": "The official iOS SDK for Analytics. Supports iOS 8+.",
"homepage": "http://code.mocaapp.cn/betadata/sdk-ios.git",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/hoongbin/sdk-ios.git",
"tag": "1.2.22"
},
"platforms": {
"ios": "8.0"
},
"source_files": "BetaDataSDK/Classes/**/*",
"resources": "BetaDataSDK/Assets/*.bundle"
}
| 243 |
3,580 | <gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest import TestCase
import numpy as np
import pandas as pd
from kats.models.nowcasting.feature_extraction import (
LAG,
MOM,
MA,
ROC,
MACD,
BBANDS,
TRIX,
EMA,
TSI,
RSI,
)
class testNowcasting(TestCase):
def test_LAG(self) -> None:
self.assertEqual(
list(LAG(pd.DataFrame(list(range(5)), columns=["y"]), 1)["LAG_1"])[1:],
[0, 1, 2, 3],
)
def test_MOM(self) -> None:
self.assertEqual(
list(MOM(pd.DataFrame(list(range(5)), columns=["y"]), 1)["MOM_1"][1:]),
[1, 1, 1, 1],
)
def test_MA(self) -> None:
self.assertEqual(
list(MA(pd.DataFrame(list(range(5)), columns=["y"]), 1)["MA_1"]),
[0, 1, 2, 3, 4],
)
def test_ROC(self) -> None:
self.assertEqual(
list(ROC(pd.DataFrame(list(range(5)), columns=["y"]), 1)["ROC_1"])[1:],
[0, 0, 0, 0],
)
def test_MACD(self) -> None:
error_threshold = 0.0001
target = np.array(
[7.770436585431938, 7.913716315475984, 8.048858332839053, 8.176225524209826]
)
error1 = np.sum(
np.array(
MACD(pd.DataFrame(list(range(30)), columns=["y"]), 1)[-4:]["MACD_1_21"]
)
- target
)
self.assertLessEqual(error1, error_threshold, "MACD_1_21 produces errors!")
target = [
7.37176002981048,
7.496954620458209,
7.620613089998056,
7.742177915869659,
]
error2 = np.sum(
np.abs(
MACD(pd.DataFrame(list(range(30)), columns=["y"]), 1)[-4:][
"MACDsign_1_21"
]
- target
)
)
self.assertLessEqual(error2, error_threshold, "MACDsign_1_21 produces errors!")
target = [
0.3986765556214573,
0.41676169501777505,
0.4282452428409975,
0.4340476083401672,
]
error3 = np.sum(
np.abs(
MACD(pd.DataFrame(list(range(30)), columns=["y"]), 1)[-4:][
"MACDdiff_1_21"
]
- target
)
)
self.assertLessEqual(error3, error_threshold, "MACDdiff_1_21 produces errors!")
def test_BBANDS(self) -> None:
error_threshold = 0.0001
# Bollinger Band 1
target_1 = np.array(
[
5.656854249492381,
1.885618083164127,
1.131370849898476,
0.8081220356417687,
]
)
error_1 = np.sum(
np.abs(
np.array(
list(
BBANDS(pd.DataFrame(list(range(5)), columns=["y"]), 2)[
"BollingerBand1_2"
][1:]
)
)
- target_1
)
)
self.assertLessEqual(
error_1, error_threshold, "BollingerBand1_2 produces errors!"
)
# Bolinger Band 2
target_2 = np.array(
[
0.6767766952966369,
0.6767766952966369,
0.6767766952966369,
0.6767766952966369,
]
)
error_2 = np.sum(
np.abs(
np.array(
list(
BBANDS(pd.DataFrame(list(range(5)), columns=["y"]), 2)[
"BollingerBand2_2"
][1:]
)
)
- target_2
)
)
self.assertLessEqual(
error_2, error_threshold, "BollingerBand2_2 produces errors!"
)
def test_EMA(self) -> None:
error_threshold = 0.0001
target = np.array(
[0.0, 0.7499999999999999, 1.6153846153846152, 2.55, 3.5206611570247937]
)
error = np.sum(
np.abs(
np.array(
list(EMA(pd.DataFrame(list(range(5)), columns=["y"]), 2)["EMA_2"])
)
- target
)
)
self.assertLessEqual(error, error_threshold, "EMA_2 produces errors!")
def test_TRIX(self) -> None:
error_threshold = 0.0001
target = np.array(
[0.0, 0.421875, 0.42337953352973806, 0.372572902464051, 0.3100591536021331]
)
error = np.sum(
np.abs(
np.array(
list(
TRIX(pd.DataFrame(list(range(1, 6)), columns=["y"]), 2)[
"TRIX_2"
]
)
)
- target
)
)
self.assertLessEqual(error, error_threshold, "TRIX_2 produces errors!")
def test_TSI(self) -> None:
error_threshold = 0.0001
target = np.array([1.0, 1.0, 1.0])
error = np.sum(
np.abs(
np.array(
list(
TSI(pd.DataFrame(list(range(5, 10)), columns=["y"]), 2, 3)[
"TSI_2_3"
]
)[2:]
)
- target
)
)
self.assertLessEqual(error, error_threshold, "TSI_2_3 produces errors!")
def test_RSI(self) -> None:
error_threshold = 0.0001
target = np.array([100.0, 100.0, 100.0, 100.0])
error = np.sum(
np.abs(
np.array(
list(
RSI(pd.DataFrame(list(range(5, 10)), columns=["y"]), 2)["RSI_2"]
)[1:]
)
- target
)
)
self.assertLessEqual(error, error_threshold, "RSI_2 produces errors!")
if __name__ == "__main__":
unittest.main()
| 3,817 |
357 | <reponame>wfu8/lightwave
/*
* Copyright © 2012-2015 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* VMware Identity Service
*
* Native Adapter
*
* @author: <NAME> <<EMAIL>>
*
* @version: 1.0
* @since: 2011-12-7
*
*/
package com.vmware.af.interop;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang.SystemUtils;
import com.sun.jna.platform.win32.Advapi32Util;
import com.sun.jna.platform.win32.WinReg;
public abstract class NativeAdapter
{
static
{
final String propName = "jna.library.path";
final String LINUX_VMDIR_LIB64_PATH = "/opt/vmware/lib64";
final String LINUX_VMDIR_VC_LIB64_PATH = "/usr/lib/vmware-vmafd/lib64";
final String LINUX_LIKEWISE_LIB64_PATH = "/opt/likewise/lib64";
final String WIN_REG_VMDIR_PATH =
"SOFTWARE\\VMWare, Inc.\\VMware Directory Services";
final String WIN_REG_VMAFD_PATH =
"SOFTWARE\\VMWare, Inc.\\VMware afd Services";
final String WIN_REG_INSTALL_KEY = "InstallPath";
String WIN_VMWARE_CIS_VMDIRD_PATH =
"C:" + File.separator + "Program Files" + File.separator +
"VMware" + File.separator + "CIS" + File.separator + "vmdird";
String WIN_VMWARE_CIS_VMAFD_PATH =
"C:" + File.separator + "Program Files" + File.separator +
"VMware" + File.separator + "CIS" + File.separator + "vmafdd";
List<String> paths = null;
if (SystemUtils.IS_OS_LINUX)
{
paths = Arrays.asList(
LINUX_VMDIR_VC_LIB64_PATH,
LINUX_VMDIR_LIB64_PATH,
LINUX_LIKEWISE_LIB64_PATH);
}
else if (SystemUtils.IS_OS_WINDOWS)
{
if (winRegistryValueExists(WIN_REG_VMDIR_PATH, WIN_REG_INSTALL_KEY))
{
WIN_VMWARE_CIS_VMDIRD_PATH =
Advapi32Util.registryGetStringValue(
WinReg.HKEY_LOCAL_MACHINE,
WIN_REG_VMDIR_PATH,
WIN_REG_INSTALL_KEY);
}
if (winRegistryValueExists(WIN_REG_VMAFD_PATH, WIN_REG_INSTALL_KEY))
{
WIN_VMWARE_CIS_VMAFD_PATH =
Advapi32Util.registryGetStringValue(
WinReg.HKEY_LOCAL_MACHINE,
WIN_REG_VMAFD_PATH,
WIN_REG_INSTALL_KEY);
}
paths = Arrays.asList(
WIN_VMWARE_CIS_VMDIRD_PATH,
WIN_VMWARE_CIS_VMAFD_PATH);
}
else
{
throw new IllegalStateException("Only Windows and Linux platforms are supported");
}
// Check if the paths exist
//for (String pathString : paths)
//{
//Path path = Paths.get(pathString);
//if (Files.notExists(path))
//{
//throw new IllegalStateException("Path \"" + pathString + "\" does not exist");
//}
//}
String propValue = System.getProperty(propName);
StringBuilder jnalibpath = new StringBuilder(propValue == null ? "" : propValue);
for (String path : paths )
{
File libDir = new File(path);
if (libDir.exists() && libDir.isDirectory())
{
if (jnalibpath.length() > 0)
{
jnalibpath.append(File.pathSeparator);
}
jnalibpath.append(path);
}
}
propValue = jnalibpath.substring(0);
if (!propValue.isEmpty())
{
System.setProperty(propName, propValue);
}
}
/**
* Returns true if both the key and value exist in the Windows registry
* @param key registry key where the value is contained
* @param value value contained in the registry key
* @return true if the value exists; false if the registry
* key or the value do not exist
*/
private static boolean winRegistryValueExists(String key, String value) {
return Advapi32Util.registryKeyExists(WinReg.HKEY_LOCAL_MACHINE, key) &&
Advapi32Util.registryValueExists(WinReg.HKEY_LOCAL_MACHINE, key, value);
}
}
| 2,466 |
1,338 |
/*
* M_APM - mapmfact.c
*
* Copyright (C) 1999 - 2007 <NAME>
*
* Permission to use, copy, and distribute this software and its
* documentation for any purpose with or without fee is hereby granted,
* provided that the above copyright notice appear in all copies and
* that both that copyright notice and this permission notice appear
* in supporting documentation.
*
* Permission to modify the software is granted. Permission to distribute
* the modified code is granted. Modifications are to be distributed by
* using the file 'license.txt' as a template to modify the file header.
* 'license.txt' is available in the official MAPM distribution.
*
* This software is provided "as is" without express or implied warranty.
*/
/*
* $Id: mapmfact.c,v 1.11 2007/12/03 01:51:50 mike Exp $
*
* This file contains the FACTORIAL function.
*
* $Log: mapmfact.c,v $
* Revision 1.11 2007/12/03 01:51:50 mike
* Update license
*
* Revision 1.10 2003/07/21 21:05:31 mike
* facilitate 'gcov' code coverage tool by making an array smaller
*
* Revision 1.9 2002/11/03 21:27:28 mike
* Updated function parameters to use the modern style
*
* Revision 1.8 2000/06/14 20:36:16 mike
* increase size of DOS array
*
* Revision 1.7 2000/05/29 13:15:59 mike
* minor tweaks, fixed comment
*
* Revision 1.6 2000/05/26 16:39:03 mike
* minor update to comments and code
*
* Revision 1.5 2000/05/25 23:12:53 mike
* change 'nd' calculation
*
* Revision 1.4 2000/05/25 22:17:45 mike
* implement new algorithm for speed. approx 5 - 10X
* faster on my PC when N! is large (> 6000)
*
* Revision 1.3 1999/06/19 21:25:21 mike
* changed local static variables to MAPM stack variables
*
* Revision 1.2 1999/05/23 18:21:12 mike
* minor variable name tweaks
*
* Revision 1.1 1999/05/15 21:06:11 mike
* Initial revision
*/
/*
* Brief explanation of the factorial algorithm.
* ----------------------------------------------
*
* The old algorithm simply multiplied N * (N-1) * (N-2) etc, until
* the number counted down to '2'. So one term of the multiplication
* kept getting bigger while multiplying by the next number in the
* sequence.
*
* The new algorithm takes advantage of the fast multiplication
* algorithm. The "ideal" setup for fast multiplication is when
* both numbers have approx the same number of significant digits
* and the number of digits is very near (but not over) an exact
* power of 2.
*
* So, we will multiply N * (N-1) * (N-2), etc until the number of
* significant digits is approx 256.
*
* Store this temp product into an array.
*
* Then we will multiply the next sequence until the number of
* significant digits is approx 256.
*
* Store this temp product into the next element of the array.
*
* Continue until we've counted down to 2.
*
* We now have an array of numbers with approx the same number
* of digits (except for the last element, depending on where it
* ended.) Now multiply each of the array elements together to
* get the final product.
*
* The array multiplies are done as follows (assume we used 11
* array elements for this example, indicated by [0] - [10] ) :
*
* initial iter-1 iter-2 iter-3 iter-4
*
* [0]
* * -> [0]
* [1]
* * -> [0]
*
* [2]
* * -> [1]
* [3]
* * -> [0]
*
* [4]
* * -> [2]
* [5]
*
* * -> [1]
*
* [6]
* * -> [3] * -> [0]
* [7]
*
*
* [8]
* * -> [4]
* [9]
* * -> [2] -> [1]
*
*
* [10] -> [5]
*
*/
#include "m_apm_lc.h"
/* define size of local array for temp storage */
#define NDIM 32
/****************************************************************************/
void m_apm_factorial(M_APM moutput, M_APM minput)
{
int ii, nmul, ndigits, nd, jj, kk, mm, ct;
M_APM array[NDIM];
M_APM iprod1, iprod2, tmp1, tmp2;
/* return 1 for any input <= 1 */
if (m_apm_compare(minput, MM_One) <= 0)
{
m_apm_copy(moutput, MM_One);
return;
}
ct = 0;
mm = NDIM - 2;
ndigits = 256;
nd = ndigits - 20;
tmp1 = m_apm_init();
tmp2 = m_apm_init();
iprod1 = m_apm_init();
iprod2 = m_apm_init();
array[0] = m_apm_init();
m_apm_copy(tmp2, minput);
/* loop until multiply count-down has reached '2' */
while (TRUE)
{
m_apm_copy(iprod1, MM_One);
/*
* loop until the number of significant digits in this
* partial result is slightly less than 256
*/
while (TRUE)
{
m_apm_multiply(iprod2, iprod1, tmp2);
m_apm_subtract(tmp1, tmp2, MM_One);
m_apm_multiply(iprod1, iprod2, tmp1);
/*
* I know, I know. There just isn't a *clean* way
* to break out of 2 nested loops.
*/
if (m_apm_compare(tmp1, MM_Two) <= 0)
goto PHASE2;
m_apm_subtract(tmp2, tmp1, MM_One);
if (iprod1->m_apm_datalength > nd)
break;
}
if (ct == (NDIM - 1))
{
/*
* if the array has filled up, start multiplying
* some of the partial products now.
*/
m_apm_copy(tmp1, array[mm]);
m_apm_multiply(array[mm], iprod1, tmp1);
if (mm == 0)
{
mm = NDIM - 2;
ndigits = ndigits << 1;
nd = ndigits - 20;
}
else
mm--;
}
else
{
/*
* store this partial product in the array
* and allocate the next array element
*/
m_apm_copy(array[ct], iprod1);
array[++ct] = m_apm_init();
}
}
PHASE2:
m_apm_copy(array[ct], iprod1);
kk = ct;
while (kk != 0)
{
ii = 0;
jj = 0;
nmul = (kk + 1) >> 1;
while (TRUE)
{
/* must use tmp var when ii,jj point to same element */
if (ii == 0)
{
m_apm_copy(tmp1, array[ii]);
m_apm_multiply(array[jj], tmp1, array[ii+1]);
}
else
m_apm_multiply(array[jj], array[ii], array[ii+1]);
if (++jj == nmul)
break;
ii += 2;
}
if ((kk & 1) == 0)
{
jj = kk >> 1;
m_apm_copy(array[jj], array[kk]);
}
kk = kk >> 1;
}
m_apm_copy(moutput, array[0]);
for (ii=0; ii <= ct; ii++)
{
m_apm_free(array[ii]);
}
m_apm_free(tmp1);
m_apm_free(tmp2);
m_apm_free(iprod1);
m_apm_free(iprod2);
}
/****************************************************************************/
| 2,939 |
5,079 | <gh_stars>1000+
"""
Example setup file for a project using Celery.
This can be used to distribute your tasks and worker
as a Python package, on PyPI or on your own private package index.
"""
from __future__ import absolute_import, unicode_literals
from setuptools import setup, find_packages
setup(
name='example-tasks',
url='http://github.com/example/celery-tasks',
author='<NAME>',
author_email='<EMAIL>',
keywords='our celery integration',
version='1.0',
description='Tasks for my project',
long_description=__doc__,
license='BSD',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
test_suite='nose.collector',
zip_safe=False,
install_requires=[
'celery>=4.0',
# 'requests',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
],
)
| 466 |
460 | #include "../../../src/xmlpatterns/schema/qxsdwildcard_p.h"
| 27 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_POLICY_MESSAGING_LAYER_UTIL_GET_CLOUD_POLICY_CLIENT_H_
#define CHROME_BROWSER_POLICY_MESSAGING_LAYER_UTIL_GET_CLOUD_POLICY_CLIENT_H_
#include "base/callback.h"
#include "components/policy/core/common/cloud/cloud_policy_client.h"
#include "components/reporting/util/statusor.h"
namespace reporting {
using CloudPolicyClientResultCb =
base::OnceCallback<void(StatusOr<policy::CloudPolicyClient*>)>;
using GetCloudPolicyClientCallback =
base::RepeatingCallback<void(CloudPolicyClientResultCb)>;
GetCloudPolicyClientCallback GetCloudPolicyClientCb();
} // namespace reporting
#endif // CHROME_BROWSER_POLICY_MESSAGING_LAYER_UTIL_GET_CLOUD_POLICY_CLIENT_H_
| 312 |
558 | <gh_stars>100-1000
package io.github.microcks.domain;
import org.bson.Document;
import org.springframework.data.annotation.Id;
/**
* Domain class representing a GenericResource created for simple CRUD mocking.
* @author laurent
*/
public class GenericResource {
@Id
private String id;
private String serviceId;
private Document payload;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getServiceId() {
return serviceId;
}
public void setServiceId(String serviceId) {
this.serviceId = serviceId;
}
public Document getPayload() {
return payload;
}
public void setPayload(Document payload) {
this.payload = payload;
}
}
| 264 |
2,372 | //
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
#ifndef RENDER_TARGET
#define RENDER_TARGET
#include "FrameBufferObject.h"
// -------------------------------------------------------------------------------------------
class RenderTarget
{
public:
RenderTarget(int width, int height);
~RenderTarget();
void resize(int width, int height);
void beginCapture();
void endCapture();
GLuint getColorTexId() { return mColorTexId; }
GLuint getDepthTexId() { return mDepthTexId; }
private:
void clear();
GLuint createTexture(GLenum target, int width, int height, GLint internalFormat, GLenum format);
GLuint mColorTexId;
GLuint mDepthTexId;
FrameBufferObject *mFBO;
};
#endif | 616 |
3,897 | <reponame>heuisam/mbed-os<gh_stars>1000+
/****************************************************************************
*
* Copyright 2020 Samsung Electronics All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
/*
* I2C interface Support
* =====================
*/
#ifndef MBED_I2C_DEF_H
#define MBED_I2C_DEF_H
#include <stdint.h> /* standard types definitions */
typedef struct beetle_i2c {
__IO uint32_t CONTROL; /* RW Control register */
__I uint32_t STATUS; /* RO Status register */
__IO uint32_t ADDRESS; /* RW I2C address register */
__IO uint32_t DATA; /* RW I2C data register */
__IO uint32_t IRQ_STATUS; /* RO Interrupt status register ( read only but write to clear bits) */
__IO uint32_t TRANSFER_SIZE; /* RW Transfer size register */
__IO uint32_t SLAVE_MONITOR; /* RW Slave monitor pause register */
__IO uint32_t TIMEOUT; /* RW Time out register */
__I uint32_t IRQ_MASK; /* RO Interrupt mask register */
__O uint32_t IRQ_ENABLE; /* WO Interrupt enable register */
__O uint32_t IRQ_DISABLE; /* WO Interrupt disable register */
} I2C_TypeDef;
// revise me
#define I2C0_BASE (0x83017000) /* Shield Header I2C Base Address */
#define I2C1_BASE (0x4000E000ul) /* Onboard I2C Base Address */
#define SHIELD_I2C ((I2C_TypeDef *) I2C0_BASE )
#define BOARD_I2C ((I2C_TypeDef *) I2C1_BASE )
/* Control Register Masks */
#define I2C_CTRL_RW 0x0001 /* Transfer direction */
#define I2C_CTRL_MS 0x0002 /* Mode (master / slave) */
#define I2C_CTRL_NEA 0x0004 /* Addressing mode */
#define I2C_CTRL_ACKEN 0x0008 /* ACK enable */
#define I2C_CTRL_HOLD 0x0010 /* Clock hold enable */
#define I2C_SLVMON 0x0020 /* Slave monitor mode */
#define I2C_CTRL_CLR_FIFO 0x0040 /* Force clear of FIFO */
#define I2C_CTRL_DIVISOR_B 0x3F00 /* Stage B clock divider */
#define I2C_CTRL_DIVISOR_A 0xA000 /* Stage A clock divider */
#define I2C_CTRL_DIVISORS 0xFF00 /* Combined A and B fields */
#define I2C_CTRL_DIVISOR_OFFSET 8 /* Offset of the clock divisor in
* the CONTROL register
*/
#define I2C_CTRL_DIVISOR_A_BIT_MASK 0x03
/*
* First part of the clock
* divisor in the CONTROL register
*/
#define I2C_CTRL_DIVISOR_B_BIT_MASK 0x3F
/*
* Second part of the clock
* divisor in the CONTROL register
*/
/* Status Register Masks */
#define I2C_STATUS_RXRW 0x0008 /* Mode of transmission from master */
#define I2C_STATUS_RXDV 0x0020 /* Valid data waiting to be read */
#define I2C_STATUS_TXDV 0x0040 /* Still a data byte to be sent */
#define I2C_STATUS_RXOVF 0x0080 /* Receiver overflow */
#define I2C_STATUS_BA 0x0100 /* Bus active */
/* Address Register Masks */
#define I2C_ADDRESS_7BIT 0x007F
/* Interrupt Status / Enable / Disable Register Masks */
#define I2C_IRQ_COMP 0x0001 /* Transfer complete */
#define I2C_IRQ_DATA 0x0002 /* More data */
#define I2C_IRQ_NACK 0x0004 /* Transfer not acknowledged */
#define I2C_IRQ_TO 0x0008 /* Transfer timed out */
#define I2C_IRQ_SLV_RDY 0x0010 /* Monitored slave ready */
#define I2C_IRQ_RX_OVF 0x0020 /* Receive overflow */
#define I2C_IRQ_TX_OVF 0x0040 /* Transmit overflow */
#define I2C_IRQ_RX_UNF 0x0080 /* Receive underflow */
#define I2C_IRQ_ARB_LOST 0x0200 /* Arbitration lost */
/* Transfer Size Register Masks */
#define I2C_TRANSFER_SIZE 0xFF
/* Error codes */
#define E_SUCCESS 0x0
#define E_INCOMPLETE_DATA 0x1
#define CTL 0x0000
#define FIFO_CTL 0x0004
#define TRAILING_CTL 0x0008
#define INT_EN 0x0020
#define INT_STAT 0x0024
#define FIFO_STAT 0x0030
#define TXDATA 0x0034
#define RXDATA 0x0038
#define I2C_CONF 0x0040
#define I2C_AUTO_CONF 0x0044
#define I2C_TIMEOUT 0x0048
#define I2C_MANUAL_CMD 0x004C
#define I2C_TRANS_STATUS 0x0050
#define I2C_TIMING_HS1 0x0054
#define I2C_TIMING_HS2 0x0058
#define I2C_TIMING_HS3 0x005C
#define I2C_TIMING_FS1 0x0060
#define I2C_TIMING_FS2 0x0064
#define I2C_TIMING_FS3 0x0068
#define I2C_TIMING_SLA 0x006C
#define I2C_ADDR 0x0070
#define I2C_START (1 << 0)
#define I2C_RESTART (1 << 1)
#define I2C_STOP (1 << 2)
#define I2C_SEND_DATA (1 << 3)
#define I2C_READ_DATA (1 << 4)
#define I2C_RX_ACK (1 << 5)
#define I2C_TX_MASK (0xFF << 24)
#define I2C_RX_MASK (0xFF << 16)
#define I2C_SPEED_400KHZ 400000
#define HSI2C_INT_SLAVE_ADDR_MATCH (1 << 15)
#define HSI2C_INT_XFER_DONE_MANUAL (1 << 13)
#define HSI2C_INT_XFER_DONE_NOACK_MANUAL (1 << 12)
#define HSI2C_INT_NO_DEV_AUTO (1 << 10)
#define HSI2C_INT_NO_DEV_ACK_AUTO (1 << 9)
#define HSI2C_INT_TRANS_ABORT_AUTO (1 << 8)
#define HSI2C_INT_TRANSFER_DONE_AUTO (1 << 7)
#define HSI2C_INT_RX_OVERRUN (1 << 5)
#define HSI2C_INT_TX_UNDERRUN (1 << 2)
#define HSI2C_INT_RX_ALMOST_FULL (1 << 1)
#define HSI2C_INT_TX_ALMOST_EMPTY (1 << 0)
#define HSI2C_INT_ALL 0xFFFF
#define I2C_M_READ 0x0001 /**< Read data, from slave to master */
#define I2C_M_TEN 0x0002 /**< Ten bit address */
#define I2C_M_NORESTART 0x0080 /**< Message should not begin with
* (re-)start of transfer */
#define I2C_SLAVE 0x0703 /**< Use this slave address */
#define I2C_SLAVE_FORCE 0x0706 /**< Use this slave address, even if it
* is already in use by a driver! */
#define I2C_TENBIT 0x0704 /**< 0 for 7 bit addrs, != 0 for 10 bit */
#define I2C_RDWR 0x0707 /**< Combined R/W transfer (one STOP only) */
#define I2C_FREQUENCY 0X801
#define I2C_M_IGNORE_NAK 0x1000 /**< if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_NOSTART 0x4000 /**< if I2C_FUNC_NOSTART */
#define I2C_MASTER 0
#define I2C_SLAVE_MODE 1
#define I2C_POLLING 0
#define I2C_INTERRUPT 1
#endif
| 3,376 |
369 | /*****************************************************************************
* Copyright 2014 Microchip Technology Inc. and its subsidiaries.
* You may use this software and any derivatives exclusively with
* Microchip products.
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS".
* NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE,
* INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY,
* AND FITNESS FOR A PARTICULAR PURPOSE, OR ITS INTERACTION WITH MICROCHIP
* PRODUCTS, COMBINATION WITH ANY OTHER PRODUCTS, OR USE IN ANY APPLICATION.
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE.
* TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL
* CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF
* FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
* MICROCHIP PROVIDES THIS SOFTWARE CONDITIONALLY UPON YOUR ACCEPTANCE
* OF THESE TERMS.
*****************************************************************************/
/** @file mec14xx_tfdp.h
*MEC14xx TRACE FIFO Data Port definitions
*/
/** @defgroup MEC14xx Peripherals TFDP
*/
#ifndef _MEC14XX_TFDP_H
#define _MEC14XX_TFDP_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TFDP_NUM_INSTANCES (1u)
#define TFDP_FRAME_START (0xFD)
//
// Offset +00h TFDP Data Register: 8-bit R/W
//
#define TFDP_DATA_REG_OFS (0ul)
#define TFDP_DATA_REG_MASK (0xFFul)
//
// Offset +04h TFDP Control Register 8-bit R/W
//
#define TFDP_CTRL_REG_OFS (0x04ul)
#define TFDP_CTRL_REG_MASK (0x7Ful)
//
#define TFDP_CTRL_ENABLE_BITPOS (0u)
#define TFDP_CTRL_EDGE_SEL_BITPOS (1u)
#define TFDP_CTRL_DIVSEL_BITPOS (2ul)
#define TFDP_CTRL_IP_DELAY_BITPOS (4ul)
// Enable/disable
#define TFDP_CTRL_ENABLE (1u << (TFDP_CTRL_ENABLE_BITPOS))
// Select clock edge data on which data is shifted out
#define TFDP_CTRL_RISING_EDGE (0u << (TFDP_CTRL_EDGE_SEL_BITPOS))
#define TFDP_CTRL_FALLING_EDGE (1u << (TFDP_CTRL_EDGE_SEL_BITPOS))
// TFDP Clock divisor
#define TFDP_CTRL_CLK_DIV2 (0u << (TFDP_CTRL_DIVSEL_BITPOS))
#define TFDP_CTRL_CLK_DIV4 (1u << (TFDP_CTRL_DIVSEL_BITPOS))
#define TFDP_CTRL_CLK_DIV8 (2u << (TFDP_CTRL_DIVSEL_BITPOS))
#define TFDP_CTRL_CLK_DIV2_RSVD (3u << (TFDP_CTRL_DIVSEL_BITPOS))
// Number of clocks to delay between each byte
// Note: this will affect time TFDP block holds off CPU on next
// write to TFDP data register.
#define TFDP_CTRL_IP_1CLKS (0u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_2CLKS (1u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_3CLKS (2u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_4CLKS (3u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_5CLKS (4u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_6CLKS (5u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_7CLKS (6u << (TFDP_CTRL_IP_DELAY_BITPOS))
#define TFDP_CTRL_IP_8CLKS (7u << (TFDP_CTRL_IP_DELAY_BITPOS))
#ifdef __cplusplus
}
#endif
#endif // #ifndef _MEC14XX_TFDP_H
/* end mec14xx_tfdp.h */
/** @}
*/
| 1,488 |
1,148 | <reponame>MjCode01/DS-Algo-Point
def bucketSort(array):
bucket = []
for i in range(len(array)):
bucket.append([])
for j in array:
index_b = int(10 * j)
bucket[index_b].append(j)
for i in range(len(array)):
bucket[i] = sorted(bucket[i])
k = 0
for i in range(len(array)):
for j in range(len(bucket[i])):
array[k] = bucket[i][j]
k += 1
return array
print("Sorted Array in descending order is")
print(bucketSort(array))
"""
Problem : Sort the given array using bucket sort.
Sort the elements by first dividing the elements into several groups or buckets. The elements inside each bucket are sorted using any of the suitable sorting algorithms or recursively calling the same algorithm.
Sample I/O:
INPUT:
3
4
5
2
1
Sorted Array in descending order is
[1, 2, 3, 4, 5]
Time complexity: O(n^2) (worst)
Space complexity: Ω(n+k) (best)
"""
| 371 |
2,610 | <reponame>Rosell777/dnscat2
/* driver_dns.c
* Created July/2013
* By <NAME>
*
* See LICENSE.md
*/
#include <assert.h>
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include "controller/controller.h"
#include "libs/buffer.h"
#include "libs/dns.h"
#include "libs/log.h"
#include "libs/memory.h"
#include "libs/types.h"
#include "libs/udp.h"
#include "driver_dns.h"
#define MAX_FIELD_LENGTH 62
#define MAX_DNS_LENGTH 255
#define WILDCARD_PREFIX "dnscat"
/* The max length is a little complicated:
* 255 because that's the max DNS length
* Halved, because we encode in hex
* Minus the length of the domain, which is appended
* Minus 1, for the period right before the domain
* Minus the number of periods that could appear within the name
*/
#define MAX_DNSCAT_LENGTH(domain) ((255/2) - (domain ? strlen(domain) : strlen(WILDCARD_PREFIX)) - 1 - ((MAX_DNS_LENGTH / MAX_FIELD_LENGTH) + 1))
#define HEXCHAR(c) ((c) < 10 ? ((c)+'0') : (((c)-10) + 'a'))
static SELECT_RESPONSE_t dns_data_closed(void *group, int socket, void *param)
{
LOG_FATAL("DNS socket closed!");
exit(0);
return SELECT_OK;
}
static uint8_t *remove_domain(char *str, char *domain)
{
if(domain)
{
char *fixed = NULL;
if(!strstr(str, domain))
{
LOG_ERROR("The response didn't contain the domain name: %s", str);
return NULL;
}
/* The server returns an empty domain name for all errors. */
if(!strcmp(str, domain))
{
LOG_INFO("The response was just the domain name: %s", str);
return NULL;
}
fixed = safe_strdup(str);
fixed[strlen(str) - strlen(domain) - 1] = '\0';
return (uint8_t*)fixed;
}
else
{
return (uint8_t*)safe_strdup(str += strlen(WILDCARD_PREFIX));
}
}
static uint8_t *buffer_decode_hex(uint8_t *str, size_t *length)
{
size_t i = 0;
buffer_t *out = buffer_create(BO_BIG_ENDIAN);
while(i < *length)
{
uint8_t c1 = 0;
uint8_t c2 = 0;
/* Read the first character, ignoring periods */
do
{
c1 = toupper(str[i++]);
} while(c1 == '.' && i < *length);
/* Make sure we aren't at the end of the buffer. */
if(i >= *length)
{
LOG_ERROR("Couldn't hex-decode the name (name was an odd length): %s", str);
return NULL;
}
/* Make sure we got a hex digit */
if(!isxdigit(c1))
{
LOG_ERROR("Couldn't hex-decode the name (contains non-hex characters): %s", str);
return NULL;
}
/* Read the second character. */
do
{
c2 = toupper(str[i++]);
} while(c2 == '.' && i < *length);
/* Make sure we got a hex digit */
if(!isxdigit(c2))
{
LOG_ERROR("Couldn't hex-decode the name (contains non-hex characters): %s", str);
return NULL;
}
c1 = ((c1 < 'A') ? (c1 - '0') : (c1 - 'A' + 10));
c2 = ((c2 < 'A') ? (c2 - '0') : (c2 - 'A' + 10));
buffer_add_int8(out, (c1 << 4) | c2);
}
return buffer_create_string_and_destroy(out, length);
}
static int cmpfunc_a(const void *a, const void *b)
{
return ((const answer_t*)a)->answer->A.bytes[0] - ((const answer_t*)b)->answer->A.bytes[0];
}
#ifndef WIN32
static int cmpfunc_aaaa(const void *a, const void *b)
{
return ((const answer_t*)a)->answer->AAAA.bytes[0] - ((const answer_t*)b)->answer->AAAA.bytes[0];
}
#endif
static dns_type_t get_type(driver_dns_t *driver)
{
return driver->types[rand() % driver->type_count];
}
static void do_send(driver_dns_t *driver)
{
size_t i;
dns_t *dns;
buffer_t *buffer;
uint8_t *encoded_bytes;
size_t encoded_length;
uint8_t *dns_bytes;
size_t dns_length;
size_t section_length;
size_t length;
uint8_t *data = controller_get_outgoing((size_t*)&length, (size_t)MAX_DNSCAT_LENGTH(driver->domain));
/* If we aren't supposed to send anything (like we're waiting for a timeout),
* data is NULL. */
if(!data)
return;
assert(driver->s != -1); /* Make sure we have a valid socket. */
assert(data); /* Make sure they aren't trying to send NULL. */
assert(length > 0); /* Make sure they aren't trying to send 0 bytes. */
assert(length <= MAX_DNSCAT_LENGTH(driver->domain));
buffer = buffer_create(BO_BIG_ENDIAN);
/* If no domain is set, add the wildcard prefix at the start. */
if(!driver->domain)
{
buffer_add_bytes(buffer, (uint8_t*)WILDCARD_PREFIX, strlen(WILDCARD_PREFIX));
buffer_add_int8(buffer, '.');
}
/* Keep track of the length of the current section (the characters between two periods). */
section_length = 0;
for(i = 0; i < length; i++)
{
buffer_add_int8(buffer, HEXCHAR((data[i] >> 4) & 0x0F));
buffer_add_int8(buffer, HEXCHAR((data[i] >> 0) & 0x0F));
/* Add periods when we need them. */
section_length += 2;
if(i + 1 != length && section_length + 2 >= MAX_FIELD_LENGTH)
{
section_length = 0;
buffer_add_int8(buffer, '.');
}
}
/* If a domain is set, instead of the wildcard prefix, add the domain to the end. */
if(driver->domain)
{
buffer_add_int8(buffer, '.');
buffer_add_bytes(buffer, driver->domain, strlen(driver->domain));
}
buffer_add_int8(buffer, '\0');
/* Get the result out. */
encoded_bytes = buffer_create_string_and_destroy(buffer, &encoded_length);
/* Double-check we didn't mess up the length. */
assert(encoded_length <= MAX_DNS_LENGTH);
dns = dns_create(_DNS_OPCODE_QUERY, _DNS_FLAG_RD, _DNS_RCODE_SUCCESS);
dns_add_question(dns, (char*)encoded_bytes, get_type(driver), _DNS_CLASS_IN);
dns_bytes = dns_to_packet(dns, &dns_length);
LOG_INFO("Sending DNS query for: %s to %s:%d", encoded_bytes, driver->dns_server, driver->dns_port);
udp_send(driver->s, driver->dns_server, driver->dns_port, dns_bytes, dns_length);
safe_free(dns_bytes);
safe_free(encoded_bytes);
safe_free(data);
dns_destroy(dns);
}
static SELECT_RESPONSE_t timeout_callback(void *group, void *param)
{
do_send((driver_dns_t*)param);
controller_heartbeat();
return SELECT_OK;
}
static SELECT_RESPONSE_t recv_socket_callback(void *group, int s, uint8_t *data, size_t length, char *addr, uint16_t port, void *param)
{
/*driver_dns_t *driver_dns = param;*/
dns_t *dns = dns_create_from_packet(data, length);
driver_dns_t *driver = (driver_dns_t*) param;
LOG_INFO("DNS response received (%d bytes)", length);
if(dns->rcode != _DNS_RCODE_SUCCESS)
{
switch(dns->rcode)
{
case _DNS_RCODE_FORMAT_ERROR:
LOG_ERROR("DNS: RCODE_FORMAT_ERROR");
break;
case _DNS_RCODE_SERVER_FAILURE:
LOG_ERROR("DNS: RCODE_SERVER_FAILURE");
break;
case _DNS_RCODE_NAME_ERROR:
LOG_ERROR("DNS: RCODE_NAME_ERROR");
break;
case _DNS_RCODE_NOT_IMPLEMENTED:
LOG_ERROR("DNS: RCODE_NOT_IMPLEMENTED");
break;
case _DNS_RCODE_REFUSED:
LOG_ERROR("DNS: RCODE_REFUSED");
break;
default:
LOG_ERROR("DNS: Unknown error code (0x%04x)", dns->rcode);
break;
}
}
else if(dns->question_count != 1)
{
LOG_ERROR("DNS returned the wrong number of response fields (question_count should be 1, was instead %d).", dns->question_count);
LOG_ERROR("This is probably due to a DNS error");
}
else if(dns->answer_count < 1)
{
LOG_ERROR("DNS didn't return an answer");
LOG_ERROR("This is probably due to a DNS error");
}
else
{
size_t i;
uint8_t *answer = NULL;
uint8_t *tmp_answer = NULL;
size_t answer_length = 0;
dns_type_t type = dns->answers[0].type;
if(type == _DNS_TYPE_TEXT)
{
LOG_INFO("Received a TXT response: %s", dns->answers[0].answer->TEXT.text);
/* Get the answer. */
tmp_answer = dns->answers[0].answer->TEXT.text;
answer_length = dns->answers[0].answer->TEXT.length;
/* Decode it. */
answer = buffer_decode_hex(tmp_answer, &answer_length);
}
else if(type == _DNS_TYPE_CNAME)
{
LOG_INFO("Received a CNAME response: %s", (char*)dns->answers[0].answer->CNAME.name);
/* Get the answer. */
tmp_answer = remove_domain((char*)dns->answers[0].answer->CNAME.name, driver->domain);
if(!tmp_answer)
{
answer = NULL;
}
else
{
answer_length = strlen((char*)tmp_answer);
/* Decode it. */
answer = buffer_decode_hex(tmp_answer, &answer_length);
safe_free(tmp_answer);
}
}
else if(type == _DNS_TYPE_MX)
{
LOG_INFO("Received a MX response: %s", (char*)dns->answers[0].answer->MX.name);
/* Get the answer. */
tmp_answer = remove_domain((char*)dns->answers[0].answer->MX.name, driver->domain);
if(!tmp_answer)
{
answer = NULL;
}
else
{
answer_length = strlen((char*)tmp_answer);
LOG_INFO("Received a MX response (%zu bytes)", answer_length);
/* Decode it. */
answer = buffer_decode_hex(tmp_answer, &answer_length);
safe_free(tmp_answer);
}
}
else if(type == _DNS_TYPE_A)
{
buffer_t *buf = buffer_create(BO_BIG_ENDIAN);
qsort(dns->answers, dns->answer_count, sizeof(answer_t), cmpfunc_a);
for(i = 0; i < dns->answer_count; i++)
buffer_add_bytes(buf, dns->answers[i].answer->A.bytes + 1, 3);
answer_length = buffer_read_next_int8(buf);
LOG_INFO("Received an A response (%zu bytes)", answer_length);
answer = safe_malloc(answer_length);
buffer_read_bytes_at(buf, 1, answer, answer_length);
}
#ifndef WIN32
else if(type == _DNS_TYPE_AAAA)
{
buffer_t *buf = buffer_create(BO_BIG_ENDIAN);
qsort(dns->answers, dns->answer_count, sizeof(answer_t), cmpfunc_aaaa);
for(i = 0; i < dns->answer_count; i++)
buffer_add_bytes(buf, dns->answers[i].answer->AAAA.bytes + 1, 15);
answer_length = buffer_read_next_int8(buf);
LOG_INFO("Received an AAAA response (%zu bytes)", answer_length);
answer = safe_malloc(answer_length);
buffer_read_bytes_at(buf, 1, answer, answer_length);
}
#endif
else
{
LOG_ERROR("Unknown DNS type returned: %d", type);
answer = NULL;
}
if(answer)
{
/*LOG_WARNING("Received a %zu-byte DNS response: %s [0x%04x]", answer_length, answer, type);*/
/* Pass the buffer to the caller */
if(answer_length > 0)
{
/* Pass the data elsewhere. */
if(controller_data_incoming(answer, answer_length))
do_send(driver);
}
safe_free(answer);
}
}
dns_destroy(dns);
return SELECT_OK;
}
driver_dns_t *driver_dns_create(select_group_t *group, char *domain, char *host, uint16_t port, char *types, char *server)
{
driver_dns_t *driver = (driver_dns_t*) safe_malloc(sizeof(driver_dns_t));
char *token = NULL;
/* Create the actual DNS socket. */
LOG_INFO("Creating UDP (DNS) socket on %s", host);
driver->s = udp_create_socket(0, host);
if(driver->s == -1)
{
LOG_FATAL("Couldn't create UDP socket!");
exit(1);
}
/* Set the domain and stuff. */
driver->group = group;
driver->domain = domain;
driver->dns_port = port;
driver->dns_server = server;
/* Allow the user to choose 'any' protocol. */
if(!strcmp(types, "ANY"))
types = DNS_TYPES;
/* Make a copy of types, since strtok() changes it. */
types = safe_strdup(types);
driver->type_count = 0;
for(token = strtok(types, ", "); token && driver->type_count < DNS_MAX_TYPES; token = strtok(NULL, ", "))
{
if(!strcmp(token, "TXT") || !strcmp(token, "TEXT"))
driver->types[driver->type_count++] = _DNS_TYPE_TEXT;
else if(!strcmp(token, "MX"))
driver->types[driver->type_count++] = _DNS_TYPE_MX;
else if(!strcmp(token, "CNAME"))
driver->types[driver->type_count++] = _DNS_TYPE_CNAME;
else if(!strcmp(token, "A"))
driver->types[driver->type_count++] = _DNS_TYPE_A;
#ifndef WIN32
else if(!strcmp(token, "AAAA"))
driver->types[driver->type_count++] = _DNS_TYPE_AAAA;
#endif
}
/* Now that we no longer need types. */
safe_free(types);
if(driver->type_count == 0)
{
LOG_FATAL("You didn't pass any valid DNS types to use! Allowed types are "DNS_TYPES);
exit(1);
}
/* If it succeeds, add it to the select_group */
select_group_add_socket(group, driver->s, SOCKET_TYPE_STREAM, driver);
select_set_recv(group, driver->s, recv_socket_callback);
select_set_timeout(group, timeout_callback, driver);
select_set_closed(group, driver->s, dns_data_closed);
return driver;
}
void driver_dns_destroy(driver_dns_t *driver)
{
safe_free(driver);
}
void driver_dns_go(driver_dns_t *driver)
{
/* Do a fake timeout at the start so we can get going more quickly. */
timeout_callback(driver->group, driver);
/* Loop forever and poke the socket. */
while(TRUE)
select_group_do_select(driver->group, 50);
}
| 5,506 |
453 | /*
* Copyright 1996 Massachusetts Institute of Technology
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that both the above copyright notice and this
* permission notice appear in all copies, that both the above
* copyright notice and this permission notice appear in all
* supporting documentation, and that the name of M.I.T. not be used
* in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission. M.I.T. makes
* no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $ANA: ascii2addr.c,v 1.2 1996/06/13 18:46:02 wollman Exp $
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <errno.h>
#include <string.h>
#include <net/if_dl.h>
#include <netinet/in.h>
#include <arpa/inet.h>
int
ascii2addr(af, ascii, result)
int af;
const char *ascii;
void *result;
{
struct in_addr *ina;
char strbuf[4*sizeof("123")]; /* long enough for V4 only */
switch(af) {
case AF_INET:
ina = result;
strbuf[0] = '\0';
strncat(strbuf, ascii, (sizeof strbuf)-1);
if (inet_aton(strbuf, ina))
return sizeof(struct in_addr);
errno = EINVAL;
break;
default:
errno = EPROTONOSUPPORT;
break;
}
return -1;
}
| 752 |
7,353 | <gh_stars>1000+
/**
* @file PacketBuffer.c
* @author <NAME> <<EMAIL>>
*
* @section LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <misc/debug.h>
#include <misc/balloc.h>
#include <flow/PacketBuffer.h>
static void input_handler_done (PacketBuffer *buf, int in_len);
static void output_handler_done (PacketBuffer *buf);
void input_handler_done (PacketBuffer *buf, int in_len)
{
ASSERT(in_len >= 0)
ASSERT(in_len <= buf->input_mtu)
DebugObject_Access(&buf->d_obj);
// remember if buffer is empty
int was_empty = (buf->buf.output_avail < 0);
// submit packet to buffer
ChunkBuffer2_SubmitPacket(&buf->buf, in_len);
// if there is space, schedule receive
if (buf->buf.input_avail >= buf->input_mtu) {
PacketRecvInterface_Receiver_Recv(buf->input, buf->buf.input_dest);
}
// if buffer was empty, schedule send
if (was_empty) {
PacketPassInterface_Sender_Send(buf->output, buf->buf.output_dest, buf->buf.output_avail);
}
}
void output_handler_done (PacketBuffer *buf)
{
DebugObject_Access(&buf->d_obj);
// remember if buffer is full
int was_full = (buf->buf.input_avail < buf->input_mtu);
// remove packet from buffer
ChunkBuffer2_ConsumePacket(&buf->buf);
// if buffer was full and there is space, schedule receive
if (was_full && buf->buf.input_avail >= buf->input_mtu) {
PacketRecvInterface_Receiver_Recv(buf->input, buf->buf.input_dest);
}
// if there is more data, schedule send
if (buf->buf.output_avail >= 0) {
PacketPassInterface_Sender_Send(buf->output, buf->buf.output_dest, buf->buf.output_avail);
}
}
int PacketBuffer_Init (PacketBuffer *buf, PacketRecvInterface *input, PacketPassInterface *output, int num_packets, BPendingGroup *pg)
{
ASSERT(PacketPassInterface_GetMTU(output) >= PacketRecvInterface_GetMTU(input))
ASSERT(num_packets > 0)
// init arguments
buf->input = input;
buf->output = output;
// init input
PacketRecvInterface_Receiver_Init(buf->input, (PacketRecvInterface_handler_done)input_handler_done, buf);
// set input MTU
buf->input_mtu = PacketRecvInterface_GetMTU(buf->input);
// init output
PacketPassInterface_Sender_Init(buf->output, (PacketPassInterface_handler_done)output_handler_done, buf);
// allocate buffer
int num_blocks = ChunkBuffer2_calc_blocks(buf->input_mtu, num_packets);
if (num_blocks < 0) {
goto fail0;
}
if (!(buf->buf_data = (struct ChunkBuffer2_block *)BAllocArray(num_blocks, sizeof(buf->buf_data[0])))) {
goto fail0;
}
// init buffer
ChunkBuffer2_Init(&buf->buf, buf->buf_data, num_blocks, buf->input_mtu);
// schedule receive
PacketRecvInterface_Receiver_Recv(buf->input, buf->buf.input_dest);
DebugObject_Init(&buf->d_obj);
return 1;
fail0:
return 0;
}
void PacketBuffer_Free (PacketBuffer *buf)
{
DebugObject_Free(&buf->d_obj);
// free buffer
BFree(buf->buf_data);
}
| 1,669 |
398 | <gh_stars>100-1000
"""NSynth: WaveNet Autoencoder.
"""
"""
NSynth model code and utilities are licensed under APL from the
Google Magenta project
----------------------
https://github.com/tensorflow/magenta/blob/master/magenta/models/nsynth
Copyright 2017 <NAME>. See also NOTICE.md.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from scipy.io import wavfile
import numpy as np
from cadl.utils import download_and_extract_tar
from magenta.models.nsynth import utils
from magenta.models.nsynth import reader
from magenta.models.nsynth.wavenet import masked
import os
def get_model():
"""Summary
"""
download_and_extract_tar(
'http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar')
def causal_linear(x, n_inputs, n_outputs, name, filter_length, rate,
batch_size):
"""Summary
Parameters
----------
x : TYPE
Description
n_inputs : TYPE
Description
n_outputs : TYPE
Description
name : TYPE
Description
filter_length : TYPE
Description
rate : TYPE
Description
batch_size : TYPE
Description
Returns
-------
TYPE
Description
"""
# create queue
q_1 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, n_inputs))
q_2 = tf.FIFOQueue(rate, dtypes=tf.float32, shapes=(batch_size, n_inputs))
init_1 = q_1.enqueue_many(tf.zeros((rate, batch_size, n_inputs)))
init_2 = q_2.enqueue_many(tf.zeros((rate, batch_size, n_inputs)))
state_1 = q_1.dequeue()
push_1 = q_1.enqueue(x)
state_2 = q_2.dequeue()
push_2 = q_2.enqueue(state_1)
# get pretrained weights
W = tf.get_variable(
name=name + '/W',
shape=[1, filter_length, n_inputs, n_outputs],
dtype=tf.float32)
b = tf.get_variable(
name=name + '/biases', shape=[n_outputs], dtype=tf.float32)
W_q_2 = tf.slice(W, [0, 0, 0, 0], [-1, 1, -1, -1])
W_q_1 = tf.slice(W, [0, 1, 0, 0], [-1, 1, -1, -1])
W_x = tf.slice(W, [0, 2, 0, 0], [-1, 1, -1, -1])
# perform op w/ cached states
y = tf.expand_dims(
tf.nn.bias_add(
tf.matmul(state_2, W_q_2[0][0]) + tf.matmul(state_1, W_q_1[0][0]) +
tf.matmul(x, W_x[0][0]), b), 0)
return y, (init_1, init_2), (push_1, push_2)
def linear(x, n_inputs, n_outputs, name):
"""Summary
Parameters
----------
x : TYPE
Description
n_inputs : TYPE
Description
n_outputs : TYPE
Description
name : TYPE
Description
Returns
-------
TYPE
Description
"""
W = tf.get_variable(
name=name + '/W', shape=[1, 1, n_inputs, n_outputs], dtype=tf.float32)
b = tf.get_variable(
name=name + '/biases', shape=[n_outputs], dtype=tf.float32)
return tf.expand_dims(tf.nn.bias_add(tf.matmul(x[0], W[0][0]), b), 0)
class FastGenerationConfig(object):
"""Configuration object that helps manage the graph."""
def __init__(self, batch_size=1):
"""."""
self.batch_size = batch_size
def build(self, inputs):
"""Build the graph for this configuration.
Args:
inputs: A dict of inputs. For training, should contain 'wav'.
Returns:
A dict of outputs that includes the 'predictions',
'init_ops', the 'push_ops', and the 'quantized_input'.
"""
num_stages = 10
num_layers = 30
filter_length = 3
width = 512
skip_width = 256
num_z = 16
# Encode the source with 8-bit Mu-Law.
x = inputs['wav']
batch_size = self.batch_size
x_quantized = utils.mu_law(x)
x_scaled = tf.cast(x_quantized, tf.float32) / 128.0
x_scaled = tf.expand_dims(x_scaled, 2)
encoding = tf.placeholder(
name='encoding', shape=[batch_size, num_z], dtype=tf.float32)
en = tf.expand_dims(encoding, 1)
init_ops, push_ops = [], []
###
# The WaveNet Decoder.
###
l = x_scaled
l, inits, pushs = utils.causal_linear(
x=l,
n_inputs=1,
n_outputs=width,
name='startconv',
rate=1,
batch_size=batch_size,
filter_length=filter_length)
for init in inits:
init_ops.append(init)
for push in pushs:
push_ops.append(push)
# Set up skip connections.
s = utils.linear(l, width, skip_width, name='skip_start')
# Residual blocks with skip connections.
for i in range(num_layers):
dilation = 2**(i % num_stages)
# dilated masked cnn
d, inits, pushs = utils.causal_linear(
x=l,
n_inputs=width,
n_outputs=width * 2,
name='dilatedconv_%d' % (i + 1),
rate=dilation,
batch_size=batch_size,
filter_length=filter_length)
for init in inits:
init_ops.append(init)
for push in pushs:
push_ops.append(push)
# local conditioning
d += utils.linear(en, num_z, width * 2, name='cond_map_%d' % (i + 1))
# gated cnn
assert d.get_shape().as_list()[2] % 2 == 0
m = d.get_shape().as_list()[2] // 2
d = tf.sigmoid(d[:, :, :m]) * tf.tanh(d[:, :, m:])
# residuals
l += utils.linear(d, width, width, name='res_%d' % (i + 1))
# skips
s += utils.linear(d, width, skip_width, name='skip_%d' % (i + 1))
s = tf.nn.relu(s)
s = (utils.linear(s, skip_width, skip_width, name='out1') + utils.linear(
en, num_z, skip_width, name='cond_map_out1'))
s = tf.nn.relu(s)
###
# Compute the logits and get the loss.
###
logits = utils.linear(s, skip_width, 256, name='logits')
logits = tf.reshape(logits, [-1, 256])
probs = tf.nn.softmax(logits, name='softmax')
return {
'init_ops': init_ops,
'push_ops': push_ops,
'predictions': probs,
'encoding': encoding,
'quantized_input': x_quantized,
}
class Config(object):
"""Configuration object that helps manage the graph."""
def __init__(self, train_path=None):
self.num_iters = 200000
self.learning_rate_schedule = {
0: 2e-4,
90000: 4e-4 / 3,
120000: 6e-5,
150000: 4e-5,
180000: 2e-5,
210000: 6e-6,
240000: 2e-6,
}
self.ae_hop_length = 512
self.ae_bottleneck_width = 16
self.train_path = train_path
def get_batch(self, batch_size):
assert self.train_path is not None
data_train = reader.NSynthDataset(self.train_path, is_training=True)
return data_train.get_wavenet_batch(batch_size, length=6144)
@staticmethod
def _condition(x, encoding):
"""Condition the input on the encoding.
Args:
x: The [mb, length, channels] float tensor input.
encoding: The [mb, encoding_length, channels] float tensor encoding.
Returns:
The output after broadcasting the encoding to x's shape and adding them.
"""
mb, length, channels = x.get_shape().as_list()
enc_mb, enc_length, enc_channels = encoding.get_shape().as_list()
assert enc_mb == mb
assert enc_channels == channels
encoding = tf.reshape(encoding, [mb, enc_length, 1, channels])
x = tf.reshape(x, [mb, enc_length, -1, channels])
x += encoding
x = tf.reshape(x, [mb, length, channels])
x.set_shape([mb, length, channels])
return x
def build(self, inputs, is_training):
"""Build the graph for this configuration.
Args:
inputs: A dict of inputs. For training, should contain 'wav'.
is_training: Whether we are training or not. Not used in this config.
Returns:
A dict of outputs that includes the 'predictions', 'loss', the 'encoding',
the 'quantized_input', and whatever metrics we want to track for eval.
"""
del is_training
num_stages = 10
num_layers = 30
filter_length = 3
width = 512
skip_width = 256
ae_num_stages = 10
ae_num_layers = 30
ae_filter_length = 3
ae_width = 128
# Encode the source with 8-bit Mu-Law.
x = inputs['wav']
x_quantized = utils.mu_law(x)
x_scaled = tf.cast(x_quantized, tf.float32) / 128.0
x_scaled = tf.expand_dims(x_scaled, 2)
###
# The Non-Causal Temporal Encoder.
###
en = masked.conv1d(
x_scaled,
causal=False,
num_filters=ae_width,
filter_length=ae_filter_length,
name='ae_startconv')
for num_layer in range(ae_num_layers):
dilation = 2**(num_layer % ae_num_stages)
d = tf.nn.relu(en)
d = masked.conv1d(
d,
causal=False,
num_filters=ae_width,
filter_length=ae_filter_length,
dilation=dilation,
name='ae_dilatedconv_%d' % (num_layer + 1))
d = tf.nn.relu(d)
en += masked.conv1d(
d,
num_filters=ae_width,
filter_length=1,
name='ae_res_%d' % (num_layer + 1))
en = masked.conv1d(
en,
num_filters=self.ae_bottleneck_width,
filter_length=1,
name='ae_bottleneck')
en = masked.pool1d(en, self.ae_hop_length, name='ae_pool', mode='avg')
encoding = en
###
# The WaveNet Decoder.
###
l = masked.shift_right(x_scaled)
l = masked.conv1d(
l, num_filters=width, filter_length=filter_length, name='startconv')
# Set up skip connections.
s = masked.conv1d(
l, num_filters=skip_width, filter_length=1, name='skip_start')
# Residual blocks with skip connections.
for i in range(num_layers):
dilation = 2**(i % num_stages)
d = masked.conv1d(
l,
num_filters=2 * width,
filter_length=filter_length,
dilation=dilation,
name='dilatedconv_%d' % (i + 1))
d = self._condition(d,
masked.conv1d(
en,
num_filters=2 * width,
filter_length=1,
name='cond_map_%d' % (i + 1)))
assert d.get_shape().as_list()[2] % 2 == 0
m = d.get_shape().as_list()[2] // 2
d_sigmoid = tf.sigmoid(d[:, :, :m])
d_tanh = tf.tanh(d[:, :, m:])
d = d_sigmoid * d_tanh
l += masked.conv1d(
d, num_filters=width, filter_length=1, name='res_%d' % (i + 1))
s += masked.conv1d(
d, num_filters=skip_width, filter_length=1, name='skip_%d' % (i + 1))
s = tf.nn.relu(s)
s = masked.conv1d(s, num_filters=skip_width, filter_length=1, name='out1')
s = self._condition(s,
masked.conv1d(
en,
num_filters=skip_width,
filter_length=1,
name='cond_map_out1'))
s = tf.nn.relu(s)
###
# Compute the logits and get the loss.
###
logits = masked.conv1d(s, num_filters=256, filter_length=1, name='logits')
logits = tf.reshape(logits, [-1, 256])
probs = tf.nn.softmax(logits, name='softmax')
x_indices = tf.cast(tf.reshape(x_quantized, [-1]), tf.int32) + 128
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=x_indices, name='nll'),
0,
name='loss')
return {
'predictions': probs,
'loss': loss,
'eval': {
'nll': loss
},
'quantized_input': x_quantized,
'encoding': encoding,
}
def inv_mu_law(x, mu=255.0):
"""A TF implementation of inverse Mu-Law.
Parameters
----------
x
The Mu-Law samples to decode.
mu
The Mu we used to encode these samples.
Returns
-------
out
The decoded data.
"""
x = np.array(x).astype(np.float32)
out = (x + 0.5) * 2. / (mu + 1)
out = np.sign(out) / mu * ((1 + mu)**np.abs(out) - 1)
out = np.where(np.equal(x, 0), x, out)
return out
def load_audio(wav_file, sample_length=64000):
"""Summary
Parameters
----------
wav_file : TYPE
Description
sample_length : int, optional
Description
Returns
-------
TYPE
Description
"""
wav_data = np.array([utils.load_audio(wav_file)[:sample_length]])
wav_data_padded = np.zeros((1, sample_length))
wav_data_padded[0, :wav_data.shape[1]] = wav_data
wav_data = wav_data_padded
return wav_data
def load_nsynth(batch_size=1, sample_length=64000):
"""Summary
Parameters
----------
encoding : bool, optional
Description
batch_size : int, optional
Description
sample_length : int, optional
Description
Returns
-------
TYPE
Description
"""
config = Config()
with tf.device('/gpu:0'):
X = tf.placeholder(tf.float32, shape=[batch_size, sample_length])
graph = config.build({"wav": X}, is_training=False)
graph.update({'X': X})
return graph
def load_fastgen_nsynth(batch_size=1, sample_length=64000):
"""Summary
Parameters
----------
batch_size : int, optional
Description
sample_length : int, optional
Description
Returns
-------
TYPE
Description
"""
config = FastGenerationConfig(batch_size)
X = tf.placeholder(tf.float32, shape=[batch_size, 1])
graph = config.build({"wav": X})
graph.update({'X': X})
return graph
def sample_categorical(pmf):
"""Sample from a categorical distribution.
Args:
pmf: Probablity mass function. Output of a softmax over categories.
Array of shape [batch_size, number of categories]. Rows sum to 1.
Returns:
idxs: Array of size [batch_size, 1]. Integer of category sampled.
"""
if pmf.ndim == 1:
pmf = np.expand_dims(pmf, 0)
batch_size = pmf.shape[0]
cdf = np.cumsum(pmf, axis=1)
rand_vals = np.random.rand(batch_size)
idxs = np.zeros([batch_size, 1])
for i in range(batch_size):
idxs[i] = cdf[i].searchsorted(rand_vals[i])
return idxs
def load_batch(files, sample_length=64000):
"""Load a batch of data from either .wav or .npy files.
Args:
files: A list of filepaths to .wav or .npy files
sample_length: Maximum sample length
Returns:
batch_data: A padded array of audio or embeddings [batch, length, (dims)]
"""
batch_data = []
max_length = 0
is_npy = (os.path.splitext(files[0])[1] == ".npy")
# Load the data
for f in files:
if is_npy:
data = np.load(f)
batch_data.append(data)
else:
data = utils.load_audio(f, sample_length, sr=16000)
batch_data.append(data)
if data.shape[0] > max_length:
max_length = data.shape[0]
# Add padding
for i, data in enumerate(batch_data):
if data.shape[0] < max_length:
if is_npy:
padded = np.zeros([max_length, +data.shape[1]])
padded[:data.shape[0], :] = data
else:
padded = np.zeros([max_length])
padded[:data.shape[0]] = data
batch_data[i] = padded
# Return arrays
batch_data = np.array(batch_data)
return batch_data
def save_batch(batch_audio, batch_save_paths):
for audio, name in zip(batch_audio, batch_save_paths):
tf.logging.info("Saving: %s" % name)
wavfile.write(name, 16000, audio)
def encode(wav_data, checkpoint_path, sample_length=64000):
"""Generate an array of embeddings from an array of audio.
Args:
wav_data: Numpy array [batch_size, sample_length]
checkpoint_path: Location of the pretrained model.
sample_length: The total length of the final wave file, padded with 0s.
Returns:
encoding: a [mb, 125, 16] encoding (for 64000 sample audio file).
"""
if wav_data.ndim == 1:
wav_data = np.expand_dims(wav_data, 0)
batch_size = 1
elif wav_data.ndim == 2:
batch_size = wav_data.shape[0]
# Load up the model for encoding and find the encoding of "wav_data"
session_config = tf.ConfigProto(allow_soft_placement=True)
with tf.Graph().as_default(), tf.Session(config=session_config) as sess:
hop_length = Config().ae_hop_length
wav_data, sample_length = utils.trim_for_encoding(
wav_data, sample_length, hop_length)
net = load_nsynth(batch_size=batch_size, sample_length=sample_length)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
encodings = sess.run(net["encoding"], feed_dict={net["X"]: wav_data})
return encodings
def synthesize(encodings,
save_paths,
hop_length=None,
checkpoint_path="model.ckpt-200000",
samples_per_save=1000):
"""Synthesize audio from an array of embeddings.
Args:
encodings: Numpy array with shape [batch_size, time, dim].
save_paths: Iterable of output file names.
checkpoint_path: Location of the pretrained model. [model.ckpt-200000]
samples_per_save: Save files after every amount of generated samples.
"""
if hop_length is None:
hop_length = Config().ae_hop_length
# Get lengths
batch_size = encodings.shape[0]
encoding_length = encodings.shape[1]
total_length = encoding_length * hop_length
session_config = tf.ConfigProto(allow_soft_placement=True)
with tf.Graph().as_default(), tf.Session(config=session_config) as sess:
net = load_fastgen_nsynth(batch_size=batch_size)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
# initialize queues w/ 0s
sess.run(net["init_ops"])
# Regenerate the audio file sample by sample
audio_batch = np.zeros((batch_size, total_length,), dtype=np.float32)
audio = np.zeros([batch_size, 1])
for sample_i in range(total_length):
enc_i = sample_i // hop_length
pmf = sess.run(
[net["predictions"], net["push_ops"]],
feed_dict={
net["X"]: audio,
net["encoding"]: encodings[:, enc_i, :]
})[0]
sample_bin = sample_categorical(pmf)
audio = utils.inv_mu_law_numpy(sample_bin - 128)
audio_batch[:, sample_i] = audio[:, 0]
if sample_i % 100 == 0:
tf.logging.info("Sample: %d" % sample_i)
if sample_i % samples_per_save == 0:
save_batch(audio_batch, save_paths)
save_batch(audio_batch, save_paths)
| 10,410 |
325 | <filename>PyFin/tests/Math/Accumulators/testAccumulatorsArithmetic.py
# -*- coding: utf-8 -*-
u"""
Created on 2015-7-27
@author: cheng.li
"""
import unittest
import copy
import tempfile
import pickle
import os
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from PyFin.Math.Accumulators.IAccumulators import Identity
from PyFin.Math.Accumulators.IAccumulators import Exp
from PyFin.Math.Accumulators.IAccumulators import Log
from PyFin.Math.Accumulators.IAccumulators import Sqrt
from PyFin.Math.Accumulators.IAccumulators import Sign
from PyFin.Math.Accumulators.IAccumulators import Abs
from PyFin.Math.Accumulators.IAccumulators import Pow
from PyFin.Math.Accumulators.IAccumulators import Acos
from PyFin.Math.Accumulators.IAccumulators import Acosh
from PyFin.Math.Accumulators.IAccumulators import Asin
from PyFin.Math.Accumulators.IAccumulators import Asinh
from PyFin.Math.Accumulators.IAccumulators import NormInv
from PyFin.Math.Accumulators.IAccumulators import IIF
from PyFin.Math.Accumulators.IAccumulators import Latest
from PyFin.Math.Accumulators.IAccumulators import Ceil
from PyFin.Math.Accumulators.IAccumulators import Floor
from PyFin.Math.Accumulators.IAccumulators import Round
from PyFin.Math.Accumulators.StatefulAccumulators import MovingAverage
from PyFin.Math.Accumulators.StatefulAccumulators import MovingVariance
from PyFin.Math.Accumulators.StatefulAccumulators import MovingMax
from PyFin.Math.Accumulators.StatefulAccumulators import MovingCorrelation
from PyFin.Math.Accumulators.StatelessAccumulators import Sum
from PyFin.Math.Accumulators.StatelessAccumulators import Average
from PyFin.Math.Accumulators.StatelessAccumulators import Min
from PyFin.Math.Accumulators.StatelessAccumulators import Max
class TestAccumulatorsArithmetic(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.sampleOpen = np.random.randn(10000)
self.sampleClose = np.random.randn(10000)
self.sampleRf = np.random.randn(10000)
def testAddedNanValue(self):
m = Max('x')
m.push({'x': 10.0})
m.push({'x': np.nan})
self.assertAlmostEqual(10., m.value)
def testAccumulatorBasic(self):
m = Max('x')
m.push({'x': 10.0})
self.assertAlmostEqual(m.result(), m.value)
def testPlusOperator(self):
ma5 = MovingAverage(5, 'close')
ma20 = MovingAverage(20, 'open')
plusRes = ma5 + ma20
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma5.push(data)
ma20.push(data)
plusRes.push(data)
expected = ma5.result() + ma20.result()
calculated = plusRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRPlusOperator(self):
ma5 = MovingAverage(5, 'close')
ma20 = MovingAverage(20, 'close')
plusRes = 5.0 + MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
ma20.push(data)
plusRes.push(data)
expected = 5.0 + ma20.result()
calculated = plusRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSubOperator(self):
ma5 = MovingAverage(5, 'close')
sumTotal = Sum('open')
subRes = MovingAverage(5, 'close') - Sum('open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma5.push(data)
sumTotal.push(data)
subRes.push(data)
expected = ma5.result() - sumTotal.result()
calculated = subRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRSubOperator(self):
ma20 = MovingAverage(20, 'close')
sumTotal = Sum('close')
subRes = 5.0 - MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
sumTotal.push(data)
subRes.push(data)
expected = 5.0 - ma20.result()
calculated = subRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testMultiplyOperator(self):
mv5 = MovingVariance(5, 'close')
average = Average('open')
mulRes = MovingVariance(5, 'close') * Average('open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
mv5.push(data)
average.push(data)
mulRes.push(data)
if i >= 1:
expected = mv5.result() * average.result()
calculated = mulRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRMultiplyOperator(self):
ma20 = MovingAverage(20, 'close')
average = Average('open')
mulRes = 5.0 * MovingAverage(20, 'close')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
average.push(data)
ma20.push(data)
mulRes.push(data)
expected = 5.0 * ma20.result()
calculated = mulRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testDivOperator(self):
mc5 = MovingCorrelation(5, 'open', 'close')
minum = Min('open')
divRes = Min('open') / MovingCorrelation(5, 'open', 'close')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
mc5.push(data)
minum.push(data)
divRes.push(data)
if i >= 1:
expected = minum.result() / mc5.result()
calculated = divRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRDivOperator(self):
ma20 = MovingAverage(20, 'close')
divRes = 5.0 / MovingAverage(20, 'close')
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
divRes.push(data)
expected = 5.0 / ma20.result()
calculated = divRes.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testMultipleOperators(self):
ma20 = MovingAverage(20, 'close')
ma120 = MovingAverage(120, 'close')
mmax = MovingMax(50, 'open')
res = (MovingAverage(20, 'close') - MovingAverage(120, 'close')) / MovingMax(50, 'open')
for i, (open, close) in enumerate(zip(self.sampleOpen, self.sampleClose)):
data = {'close': close, 'open': open}
ma20.push(data)
ma120.push(data)
mmax.push(data)
res.push(data)
expected = (ma20.result() - ma120.result()) / mmax.result()
calculated = res.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testNegativeOperator(self):
ma20 = MovingAverage(20, 'close')
negma20 = -ma20
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma20.push(data)
negma20.push(data)
expected = -ma20.result()
calculated = negma20.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testCompoundedOperator(self):
ma5 = MovingAverage(5, 'x')
maxer = Max('close')
max5ma = Max('close') >> MovingAverage(5, 'max')
max5ma2 = MovingAverage(5, Max('close'))
for i, close in enumerate(self.sampleClose):
data = {'close': close, 'open': 1.}
maxer.push(data)
data2 = {'x': maxer.result()}
ma5.push(data2)
max5ma.push(data)
max5ma2.push(data)
expected = ma5.result()
calculated = max5ma.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
calculated = max5ma2.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
with self.assertRaises(ValueError):
_ = Max('close') >> math.sqrt
def testLessOrEqualOperators(self):
m1 = Max('x')
m2 = Min('x')
cmp = m1 <= m2
cmp.push(dict(x=1.0))
self.assertEqual(True, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(False, cmp.result())
def testLessOperator(self):
m1 = Min('x')
m2 = Max('x')
cmp = m1 < m2
cmp.push(dict(x=1.0))
self.assertEqual(False, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(True, cmp.result())
def testGreaterOrEqualOperator(self):
m1 = Min('x')
m2 = Max('x')
cmp = m1 >= m2
cmp.push(dict(x=1.0))
self.assertEqual(True, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(False, cmp.result())
def testGreaterOperator(self):
m1 = Max('x')
m2 = Min('x')
cmp = m1 > m2
cmp.push(dict(x=1.0))
self.assertEqual(False, cmp.result())
cmp.push(dict(x=2.0))
self.assertEqual(True, cmp.result())
def testExpFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Exp(MovingAverage(5, 'close'))
holder2 = MovingAverage(5, 'close') >> Exp
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
holder2.push(data)
expected = math.exp(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
calculated = holder2.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testLogFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Log(ma5)
sampleClose = np.exp(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.log(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSignFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Sign(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = 1 if ma5.result() >= 0 else -1
calculated = holder.result()
self.assertEqual(calculated, expected, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testSqrtFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Sqrt(ma5)
sampleClose = np.square(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.sqrt(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAbsFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Abs(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = abs(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testPowFunction(self):
ma5min = MovingAverage(5, 'close') >> Min
holder = Pow(ma5min, 3)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5min.push(data)
holder.push(data)
expected = math.pow(ma5min.result(), 3)
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAcosFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Acos(ma5)
sampleClose = np.cos(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.acos(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAcoshFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Acosh(ma5)
sampleClose = np.cosh(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.acosh(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAsinFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Asin(ma5)
sampleClose = np.sin(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.asin(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testAsinhFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Asinh(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.asinh(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testNormInvFunction(self):
ma5 = MovingAverage(5, 'close')
holder = NormInv(ma5)
sampleClose = norm.cdf(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = norm.ppf(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 6, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
holder = NormInv(ma5, fullAcc=True)
sampleClose = norm.cdf(self.sampleClose)
for i, close in enumerate(sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = norm.ppf(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testCeilFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Ceil(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.ceil(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testFloorFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Floor(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = math.floor(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testRoundFunction(self):
ma5 = MovingAverage(5, 'close')
holder = Round(ma5)
for i, close in enumerate(self.sampleClose):
data = {'close': close}
ma5.push(data)
holder.push(data)
expected = round(ma5.result())
calculated = holder.result()
self.assertAlmostEqual(calculated, expected, 12, "at index {0:d}\n"
"expected: {1:f}\n"
"calculated: {2:f}".format(i, expected, calculated))
def testArithmeticFunctionsDeepcopy(self):
data = {'x': 1}
test = Exp('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.exp(data['x']), copied.value)
test = Log('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.log(data['x']), copied.value)
test = Sqrt('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.sqrt(data['x']), copied.value)
data['x'] = -1.
test = Pow('x', 2)
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(data['x'] ** 2, copied.value)
test = Abs('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(abs(data['x']), copied.value)
test = Sign('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(-1., copied.value)
data['x'] = 1.
test = Acos('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.acos(data['x']), copied.value)
test = Asin('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.asin(data['x']), copied.value)
test = Acosh('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.acosh(data['x']), copied.value)
test = Asinh('x')
test.push(data)
copied = copy.deepcopy(test)
self.assertAlmostEqual(math.asinh(data['x']), copied.value)
def testArithmeticFunctionsPickle(self):
data = {'x': 1}
test = Exp('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Log('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Sqrt('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
data['x'] = -1.
test = Pow('x', 2)
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Abs('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Sign('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
data['x'] = 1.
test = Acos('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Asin('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Acosh('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
test = Asinh('x')
test.push(data)
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
pickle.dump(test, f)
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
self.assertAlmostEqual(test.value, pickled.value)
os.unlink(f.name)
def testAccumulatorTransform(self):
window = 5
ma5 = MovingAverage(window, 'close')
df = pd.DataFrame(self.sampleClose, columns=['close'])
res = ma5.transform(df, name='my_factor')[window-1:]
expected = df.rolling(window).mean()[window - 1:]['close']
self.assertEqual(res.name, 'my_factor')
np.testing.assert_array_almost_equal(res, expected)
def testIIFAccumulator(self):
iif = IIF(Latest('rf') > 0, 'close', 'open')
for i, close in enumerate(self.sampleClose):
data = {'close': close,
'open': self.sampleOpen[i],
'rf': self.sampleRf[i]}
iif.push(data)
if data['rf'] > 0:
self.assertAlmostEqual(iif.result(), data['close'])
else:
self.assertAlmostEqual(iif.result(), data['open'])
def testIdentityStr(self):
s = Identity(2.)
self.assertEqual('2.0', str(s))
def testLatestStr(self):
s = Latest('roe')
self.assertEqual("''\\text{roe}''", str(s))
def testExpStr(self):
s = Exp('roe')
self.assertEqual("\exp(''\\text{roe}'')", str(s))
def testLogStr(self):
s = Log('roe')
self.assertEqual("\ln(''\\text{roe}'')", str(s))
def testSqrtStr(self):
s = Sqrt('roe')
self.assertEqual("\sqrt{''\\text{roe}''}", str(s))
def testPowStr(self):
s = Pow('roe', 3)
self.assertEqual("''\\text{roe}'' ^ {3.0}", str(s))
def testAbsStr(self):
s = Abs('roe')
self.assertEqual("\\left| ''\\text{roe}'' \\right|", str(s))
def testSignStr(self):
s = Sign('roe')
self.assertEqual("\mathrm{sign}(''\\text{roe}'')", str(s))
def testAcosStr(self):
s = Acos('roe')
self.assertEqual("\mathrm{ACos}(''\\text{roe}'')", str(s))
def testAcoshStr(self):
s = Acosh('roe')
self.assertEqual("\mathrm{ACosh}(''\\text{roe}'')", str(s))
def testAsinStr(self):
s = Asin('roe')
self.assertEqual("\mathrm{ASin}(''\\text{roe}'')", str(s))
def testAsinhStr(self):
s = Asinh('roe')
self.assertEqual("\mathrm{ASinh}(''\\text{roe}'')", str(s))
def testNormInvStr(self):
s = NormInv('roe')
self.assertEqual("\mathrm{NormInv}(''\\text{roe}'', fullAcc=0)", str(s))
def testNegStr(self):
s = -Asinh('roe')
self.assertEqual("-\mathrm{ASinh}(''\\text{roe}'')", str(s))
def testAddedStr(self):
s = Latest('x') + Latest('y')
self.assertEqual("''\\text{x}'' + ''\\text{y}''", str(s))
def testMinusStr(self):
s = Latest('x') - Latest('y')
self.assertEqual("''\\text{x}'' - ''\\text{y}''", str(s))
def testMultiplyStr(self):
s = Latest('x') * Latest('y')
self.assertEqual("''\\text{x}'' \\times ''\\text{y}''", str(s))
s = (Latest('x') + Latest('y')) * (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' + ''\\text{y}'') \\times (''\\text{x}'' - ''\\text{y}'')", str(s))
def testDividedStr(self):
s = (Latest('x') + Latest('y')) / (Latest('x') - Latest('y'))
self.assertEqual("\\frac{''\\text{x}'' + ''\\text{y}''}{''\\text{x}'' - ''\\text{y}''}", str(s))
def testLtOperatorStr(self):
s = (Latest('x') + Latest('y')) < (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' + ''\\text{y}'') \lt (''\\text{x}'' - ''\\text{y}'')", str(s))
def testLeOperatorStr(self):
s = (Latest('x') * Latest('y')) <= (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \le (''\\text{x}'' - ''\\text{y}'')", str(s))
def testGeOperatorStr(self):
s = (Latest('x') * Latest('y')) >= (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \ge (''\\text{x}'' - ''\\text{y}'')", str(s))
def testGtOperatorStr(self):
s = (Latest('x') * Latest('y')) > (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \gt (''\\text{x}'' - ''\\text{y}'')", str(s))
def testEqOperatorStr(self):
s = (Latest('x') * Latest('y')) == (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') = (''\\text{x}'' - ''\\text{y}'')", str(s))
def testNeqOperatorStr(self):
s = (Latest('x') * Latest('y')) != (Latest('x') - Latest('y'))
self.assertEqual("(''\\text{x}'' \\times ''\\text{y}'') \\neq (''\\text{x}'' - ''\\text{y}'')", str(s))
| 16,718 |
373 | <reponame>heatd/edk2-platforms<gh_stars>100-1000
/** @file
Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _PCH_LP_HSIO_CX_H_
#define _PCH_LP_HSIO_CX_H_
#define PCH_LP_HSIO_VER_CX 0x34
extern UINT8 PchLpChipsetInitTable_Cx[1548];
extern PCH_SBI_HSIO_TABLE_STRUCT PchLpHsio_Cx[120];
#endif //_PCH_LP_HSIO_CX_H_ | 215 |
848 | /*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <opencv2/opencv.hpp>
#include <string>
cv::Mat process_result(cv::Mat &m1, const vitis::ai::FaceDetectResult &result,
bool is_jpeg) {
cv::Mat image;
cv::resize(m1, image, cv::Size{result.width, result.height});
for (const auto &r : result.rects) {
LOG_IF(INFO, is_jpeg) << " " << r.score << " " //
<< r.x << " " //
<< r.y << " " //
<< r.width << " " //
<< r.height;
cv::rectangle(image,
cv::Rect{cv::Point(r.x * image.cols, r.y * image.rows),
cv::Size{(int)(r.width * image.cols),
(int)(r.height * image.rows)}},
0xff);
}
return image;
}
| 661 |
1,350 | <gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.devtestlabs.implementation;
import com.azure.core.annotation.ServiceClient;
import com.azure.core.http.HttpHeaders;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpResponse;
import com.azure.core.http.rest.Response;
import com.azure.core.management.AzureEnvironment;
import com.azure.core.management.exception.ManagementError;
import com.azure.core.management.exception.ManagementException;
import com.azure.core.management.polling.PollResult;
import com.azure.core.management.polling.PollerFactory;
import com.azure.core.util.Context;
import com.azure.core.util.logging.ClientLogger;
import com.azure.core.util.polling.AsyncPollResponse;
import com.azure.core.util.polling.LongRunningOperationStatus;
import com.azure.core.util.polling.PollerFlux;
import com.azure.core.util.serializer.SerializerAdapter;
import com.azure.core.util.serializer.SerializerEncoding;
import com.azure.resourcemanager.devtestlabs.fluent.ArmTemplatesClient;
import com.azure.resourcemanager.devtestlabs.fluent.ArtifactSourcesClient;
import com.azure.resourcemanager.devtestlabs.fluent.ArtifactsClient;
import com.azure.resourcemanager.devtestlabs.fluent.CostsClient;
import com.azure.resourcemanager.devtestlabs.fluent.CustomImagesClient;
import com.azure.resourcemanager.devtestlabs.fluent.DevTestLabsClient;
import com.azure.resourcemanager.devtestlabs.fluent.DisksClient;
import com.azure.resourcemanager.devtestlabs.fluent.EnvironmentsClient;
import com.azure.resourcemanager.devtestlabs.fluent.FormulasClient;
import com.azure.resourcemanager.devtestlabs.fluent.GalleryImagesClient;
import com.azure.resourcemanager.devtestlabs.fluent.GlobalSchedulesClient;
import com.azure.resourcemanager.devtestlabs.fluent.LabsClient;
import com.azure.resourcemanager.devtestlabs.fluent.NotificationChannelsClient;
import com.azure.resourcemanager.devtestlabs.fluent.OperationsClient;
import com.azure.resourcemanager.devtestlabs.fluent.PoliciesClient;
import com.azure.resourcemanager.devtestlabs.fluent.PolicySetsClient;
import com.azure.resourcemanager.devtestlabs.fluent.ProviderOperationsClient;
import com.azure.resourcemanager.devtestlabs.fluent.SchedulesClient;
import com.azure.resourcemanager.devtestlabs.fluent.SecretsClient;
import com.azure.resourcemanager.devtestlabs.fluent.ServiceFabricSchedulesClient;
import com.azure.resourcemanager.devtestlabs.fluent.ServiceFabricsClient;
import com.azure.resourcemanager.devtestlabs.fluent.ServiceRunnersClient;
import com.azure.resourcemanager.devtestlabs.fluent.UsersClient;
import com.azure.resourcemanager.devtestlabs.fluent.VirtualMachineSchedulesClient;
import com.azure.resourcemanager.devtestlabs.fluent.VirtualMachinesClient;
import com.azure.resourcemanager.devtestlabs.fluent.VirtualNetworksClient;
import java.io.IOException;
import java.lang.reflect.Type;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Map;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
/** Initializes a new instance of the DevTestLabsClientImpl type. */
@ServiceClient(builder = DevTestLabsClientBuilder.class)
public final class DevTestLabsClientImpl implements DevTestLabsClient {
private final ClientLogger logger = new ClientLogger(DevTestLabsClientImpl.class);
/** The subscription ID. */
private final String subscriptionId;
/**
* Gets The subscription ID.
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/** server parameter. */
private final String endpoint;
/**
* Gets server parameter.
*
* @return the endpoint value.
*/
public String getEndpoint() {
return this.endpoint;
}
/** Api Version. */
private final String apiVersion;
/**
* Gets Api Version.
*
* @return the apiVersion value.
*/
public String getApiVersion() {
return this.apiVersion;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/** The default poll interval for long-running operation. */
private final Duration defaultPollInterval;
/**
* Gets The default poll interval for long-running operation.
*
* @return the defaultPollInterval value.
*/
public Duration getDefaultPollInterval() {
return this.defaultPollInterval;
}
/** The ProviderOperationsClient object to access its operations. */
private final ProviderOperationsClient providerOperations;
/**
* Gets the ProviderOperationsClient object to access its operations.
*
* @return the ProviderOperationsClient object.
*/
public ProviderOperationsClient getProviderOperations() {
return this.providerOperations;
}
/** The LabsClient object to access its operations. */
private final LabsClient labs;
/**
* Gets the LabsClient object to access its operations.
*
* @return the LabsClient object.
*/
public LabsClient getLabs() {
return this.labs;
}
/** The OperationsClient object to access its operations. */
private final OperationsClient operations;
/**
* Gets the OperationsClient object to access its operations.
*
* @return the OperationsClient object.
*/
public OperationsClient getOperations() {
return this.operations;
}
/** The GlobalSchedulesClient object to access its operations. */
private final GlobalSchedulesClient globalSchedules;
/**
* Gets the GlobalSchedulesClient object to access its operations.
*
* @return the GlobalSchedulesClient object.
*/
public GlobalSchedulesClient getGlobalSchedules() {
return this.globalSchedules;
}
/** The ArtifactSourcesClient object to access its operations. */
private final ArtifactSourcesClient artifactSources;
/**
* Gets the ArtifactSourcesClient object to access its operations.
*
* @return the ArtifactSourcesClient object.
*/
public ArtifactSourcesClient getArtifactSources() {
return this.artifactSources;
}
/** The ArmTemplatesClient object to access its operations. */
private final ArmTemplatesClient armTemplates;
/**
* Gets the ArmTemplatesClient object to access its operations.
*
* @return the ArmTemplatesClient object.
*/
public ArmTemplatesClient getArmTemplates() {
return this.armTemplates;
}
/** The ArtifactsClient object to access its operations. */
private final ArtifactsClient artifacts;
/**
* Gets the ArtifactsClient object to access its operations.
*
* @return the ArtifactsClient object.
*/
public ArtifactsClient getArtifacts() {
return this.artifacts;
}
/** The CostsClient object to access its operations. */
private final CostsClient costs;
/**
* Gets the CostsClient object to access its operations.
*
* @return the CostsClient object.
*/
public CostsClient getCosts() {
return this.costs;
}
/** The CustomImagesClient object to access its operations. */
private final CustomImagesClient customImages;
/**
* Gets the CustomImagesClient object to access its operations.
*
* @return the CustomImagesClient object.
*/
public CustomImagesClient getCustomImages() {
return this.customImages;
}
/** The FormulasClient object to access its operations. */
private final FormulasClient formulas;
/**
* Gets the FormulasClient object to access its operations.
*
* @return the FormulasClient object.
*/
public FormulasClient getFormulas() {
return this.formulas;
}
/** The GalleryImagesClient object to access its operations. */
private final GalleryImagesClient galleryImages;
/**
* Gets the GalleryImagesClient object to access its operations.
*
* @return the GalleryImagesClient object.
*/
public GalleryImagesClient getGalleryImages() {
return this.galleryImages;
}
/** The NotificationChannelsClient object to access its operations. */
private final NotificationChannelsClient notificationChannels;
/**
* Gets the NotificationChannelsClient object to access its operations.
*
* @return the NotificationChannelsClient object.
*/
public NotificationChannelsClient getNotificationChannels() {
return this.notificationChannels;
}
/** The PolicySetsClient object to access its operations. */
private final PolicySetsClient policySets;
/**
* Gets the PolicySetsClient object to access its operations.
*
* @return the PolicySetsClient object.
*/
public PolicySetsClient getPolicySets() {
return this.policySets;
}
/** The PoliciesClient object to access its operations. */
private final PoliciesClient policies;
/**
* Gets the PoliciesClient object to access its operations.
*
* @return the PoliciesClient object.
*/
public PoliciesClient getPolicies() {
return this.policies;
}
/** The SchedulesClient object to access its operations. */
private final SchedulesClient schedules;
/**
* Gets the SchedulesClient object to access its operations.
*
* @return the SchedulesClient object.
*/
public SchedulesClient getSchedules() {
return this.schedules;
}
/** The ServiceRunnersClient object to access its operations. */
private final ServiceRunnersClient serviceRunners;
/**
* Gets the ServiceRunnersClient object to access its operations.
*
* @return the ServiceRunnersClient object.
*/
public ServiceRunnersClient getServiceRunners() {
return this.serviceRunners;
}
/** The UsersClient object to access its operations. */
private final UsersClient users;
/**
* Gets the UsersClient object to access its operations.
*
* @return the UsersClient object.
*/
public UsersClient getUsers() {
return this.users;
}
/** The DisksClient object to access its operations. */
private final DisksClient disks;
/**
* Gets the DisksClient object to access its operations.
*
* @return the DisksClient object.
*/
public DisksClient getDisks() {
return this.disks;
}
/** The EnvironmentsClient object to access its operations. */
private final EnvironmentsClient environments;
/**
* Gets the EnvironmentsClient object to access its operations.
*
* @return the EnvironmentsClient object.
*/
public EnvironmentsClient getEnvironments() {
return this.environments;
}
/** The SecretsClient object to access its operations. */
private final SecretsClient secrets;
/**
* Gets the SecretsClient object to access its operations.
*
* @return the SecretsClient object.
*/
public SecretsClient getSecrets() {
return this.secrets;
}
/** The ServiceFabricsClient object to access its operations. */
private final ServiceFabricsClient serviceFabrics;
/**
* Gets the ServiceFabricsClient object to access its operations.
*
* @return the ServiceFabricsClient object.
*/
public ServiceFabricsClient getServiceFabrics() {
return this.serviceFabrics;
}
/** The ServiceFabricSchedulesClient object to access its operations. */
private final ServiceFabricSchedulesClient serviceFabricSchedules;
/**
* Gets the ServiceFabricSchedulesClient object to access its operations.
*
* @return the ServiceFabricSchedulesClient object.
*/
public ServiceFabricSchedulesClient getServiceFabricSchedules() {
return this.serviceFabricSchedules;
}
/** The VirtualMachinesClient object to access its operations. */
private final VirtualMachinesClient virtualMachines;
/**
* Gets the VirtualMachinesClient object to access its operations.
*
* @return the VirtualMachinesClient object.
*/
public VirtualMachinesClient getVirtualMachines() {
return this.virtualMachines;
}
/** The VirtualMachineSchedulesClient object to access its operations. */
private final VirtualMachineSchedulesClient virtualMachineSchedules;
/**
* Gets the VirtualMachineSchedulesClient object to access its operations.
*
* @return the VirtualMachineSchedulesClient object.
*/
public VirtualMachineSchedulesClient getVirtualMachineSchedules() {
return this.virtualMachineSchedules;
}
/** The VirtualNetworksClient object to access its operations. */
private final VirtualNetworksClient virtualNetworks;
/**
* Gets the VirtualNetworksClient object to access its operations.
*
* @return the VirtualNetworksClient object.
*/
public VirtualNetworksClient getVirtualNetworks() {
return this.virtualNetworks;
}
/**
* Initializes an instance of DevTestLabsClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param defaultPollInterval The default poll interval for long-running operation.
* @param environment The Azure environment.
* @param subscriptionId The subscription ID.
* @param endpoint server parameter.
*/
DevTestLabsClientImpl(
HttpPipeline httpPipeline,
SerializerAdapter serializerAdapter,
Duration defaultPollInterval,
AzureEnvironment environment,
String subscriptionId,
String endpoint) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.defaultPollInterval = defaultPollInterval;
this.subscriptionId = subscriptionId;
this.endpoint = endpoint;
this.apiVersion = "2018-09-15";
this.providerOperations = new ProviderOperationsClientImpl(this);
this.labs = new LabsClientImpl(this);
this.operations = new OperationsClientImpl(this);
this.globalSchedules = new GlobalSchedulesClientImpl(this);
this.artifactSources = new ArtifactSourcesClientImpl(this);
this.armTemplates = new ArmTemplatesClientImpl(this);
this.artifacts = new ArtifactsClientImpl(this);
this.costs = new CostsClientImpl(this);
this.customImages = new CustomImagesClientImpl(this);
this.formulas = new FormulasClientImpl(this);
this.galleryImages = new GalleryImagesClientImpl(this);
this.notificationChannels = new NotificationChannelsClientImpl(this);
this.policySets = new PolicySetsClientImpl(this);
this.policies = new PoliciesClientImpl(this);
this.schedules = new SchedulesClientImpl(this);
this.serviceRunners = new ServiceRunnersClientImpl(this);
this.users = new UsersClientImpl(this);
this.disks = new DisksClientImpl(this);
this.environments = new EnvironmentsClientImpl(this);
this.secrets = new SecretsClientImpl(this);
this.serviceFabrics = new ServiceFabricsClientImpl(this);
this.serviceFabricSchedules = new ServiceFabricSchedulesClientImpl(this);
this.virtualMachines = new VirtualMachinesClientImpl(this);
this.virtualMachineSchedules = new VirtualMachineSchedulesClientImpl(this);
this.virtualNetworks = new VirtualNetworksClientImpl(this);
}
/**
* Gets default client context.
*
* @return the default client context.
*/
public Context getContext() {
return Context.NONE;
}
/**
* Merges default client context with provided context.
*
* @param context the context to be merged with default client context.
* @return the merged context.
*/
public Context mergeContext(Context context) {
for (Map.Entry<Object, Object> entry : this.getContext().getValues().entrySet()) {
context = context.addData(entry.getKey(), entry.getValue());
}
return context;
}
/**
* Gets long running operation result.
*
* @param activationResponse the response of activation operation.
* @param httpPipeline the http pipeline.
* @param pollResultType type of poll result.
* @param finalResultType type of final result.
* @param context the context shared by all requests.
* @param <T> type of poll result.
* @param <U> type of final result.
* @return poller flux for poll result and final result.
*/
public <T, U> PollerFlux<PollResult<T>, U> getLroResult(
Mono<Response<Flux<ByteBuffer>>> activationResponse,
HttpPipeline httpPipeline,
Type pollResultType,
Type finalResultType,
Context context) {
return PollerFactory
.create(
serializerAdapter,
httpPipeline,
pollResultType,
finalResultType,
defaultPollInterval,
activationResponse,
context);
}
/**
* Gets the final result, or an error, based on last async poll response.
*
* @param response the last async poll response.
* @param <T> type of poll result.
* @param <U> type of final result.
* @return the final result, or an error.
*/
public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) {
if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) {
String errorMessage;
ManagementError managementError = null;
HttpResponse errorResponse = null;
PollResult.Error lroError = response.getValue().getError();
if (lroError != null) {
errorResponse =
new HttpResponseImpl(
lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody());
errorMessage = response.getValue().getError().getMessage();
String errorBody = response.getValue().getError().getResponseBody();
if (errorBody != null) {
// try to deserialize error body to ManagementError
try {
managementError =
this
.getSerializerAdapter()
.deserialize(errorBody, ManagementError.class, SerializerEncoding.JSON);
if (managementError.getCode() == null || managementError.getMessage() == null) {
managementError = null;
}
} catch (IOException | RuntimeException ioe) {
logger.logThrowableAsWarning(ioe);
}
}
} else {
// fallback to default error message
errorMessage = "Long running operation failed.";
}
if (managementError == null) {
// fallback to default ManagementError
managementError = new ManagementError(response.getStatus().toString(), errorMessage);
}
return Mono.error(new ManagementException(errorMessage, errorResponse, managementError));
} else {
return response.getFinalResult();
}
}
private static final class HttpResponseImpl extends HttpResponse {
private final int statusCode;
private final byte[] responseBody;
private final HttpHeaders httpHeaders;
HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) {
super(null);
this.statusCode = statusCode;
this.httpHeaders = httpHeaders;
this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8);
}
public int getStatusCode() {
return statusCode;
}
public String getHeaderValue(String s) {
return httpHeaders.getValue(s);
}
public HttpHeaders getHeaders() {
return httpHeaders;
}
public Flux<ByteBuffer> getBody() {
return Flux.just(ByteBuffer.wrap(responseBody));
}
public Mono<byte[]> getBodyAsByteArray() {
return Mono.just(responseBody);
}
public Mono<String> getBodyAsString() {
return Mono.just(new String(responseBody, StandardCharsets.UTF_8));
}
public Mono<String> getBodyAsString(Charset charset) {
return Mono.just(new String(responseBody, charset));
}
}
}
| 7,767 |
848 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/grappler/optimizers/data/make_stateless.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MakeStateless, Cache) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_INT64}}),
NDef("handle", "Const", {}, {{"value", 1}, {"dtype", DT_RESOURCE}}),
graph_tests_utils::MakeCacheV2Node("cache", "range", "filename",
"handle")},
{});
MakeStateless optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("cache", output));
int index = graph_utils::FindGraphNodeWithName("cache", output);
EXPECT_EQ(output.node(index).op(), "CacheDataset");
EXPECT_EQ(output.node(index).input_size(), 2);
}
TEST(MakeStateless, Shuffle) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("buffer_size", "Const", {}, {{"value", 1}, {"dtype", DT_INT64}}),
NDef("handle", "Const", {}, {{"value", 1}, {"dtype", DT_RESOURCE}}),
graph_tests_utils::MakeShuffleV2Node("shuffle", "range", "buffer_size",
"handle")},
{});
MakeStateless optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("shuffle", output));
int index = graph_utils::FindGraphNodeWithName("shuffle", output);
EXPECT_EQ(output.node(index).op(), "ShuffleDataset");
EXPECT_EQ(output.node(index).input_size(), 4);
}
} // namespace
} // namespace grappler
} // namespace tensorflow
| 1,284 |
458 | package com.example.gitnb.module.user;
import java.util.ArrayList;
import android.content.Context;
import android.graphics.drawable.Animatable;
import android.net.Uri;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.RecyclerView.ViewHolder;
import android.text.Editable;
import android.text.TextUtils;
import android.text.TextWatcher;
import android.view.KeyEvent;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnKeyListener;
import android.view.ViewGroup;
import com.example.gitnb.R;
import com.example.gitnb.model.Organization;
import com.example.gitnb.module.viewholder.LoadMoreViewHolder;
import com.example.gitnb.module.viewholder.OrganizationViewHolder;
import com.example.gitnb.module.viewholder.SearchViewHolder;
import com.facebook.drawee.backends.pipeline.Fresco;
import com.facebook.drawee.interfaces.DraweeController;
public class OrganizationListAdapter extends RecyclerView.Adapter<ViewHolder>{
private Context mContext;
private static final int TYPE_HEADER_VIEW = 2;
private static final int TYPE_FOOTER_VIEW = 1;
private static final int TYPE_NOMAL_VIEW = 0;
private static final int PAGE_COUNT = 30;
protected final LayoutInflater mInflater;
private ArrayList<Organization> mOrgas;
private OnItemClickListener mItemClickListener;
private OnItemClickListener mLoadMoreClickListener;
private OnItemClickListener mSearchClickListener;
private boolean isShowLoadMore = false;
private boolean isShowSearch = false;
private boolean isLoadingMore = false;
private String searchText = "";
public interface OnItemClickListener {
void onItemClick(View view, int position);
}
public OrganizationListAdapter(Context context) {
mContext = context;
mInflater = LayoutInflater.from(mContext);
}
public void setShowLoadMore(boolean value){
this.isShowLoadMore = value;
}
public void setShowSearch(boolean value){
this.isShowSearch = value;
}
public void setOnItemClickListener(final OnItemClickListener mItemClickListener) {
this.mItemClickListener = mItemClickListener;
}
public void setOnLoadMoreClickListener(final OnItemClickListener mLastItemClickListener) {
this.mLoadMoreClickListener = mLastItemClickListener;
}
public void setOnSearchClickListener(final OnItemClickListener mSearchClickListener) {
this.mSearchClickListener = mSearchClickListener;
}
public String getSearchText(){
return searchText;
}
public Organization getItem(int position) {
if(isShowSearch && position == 0){
return null;
}
if(isShowLoadMore && position == getItemCount()-1){
return null;
}
return mOrgas == null ? null : mOrgas.get(position-(isShowSearch?1:0));
}
@Override
public long getItemId(int position) {
return position;
}
public void update(ArrayList<Organization> data){
isShowLoadMore = true;
if(data == null || data.size()<PAGE_COUNT){
isShowLoadMore = false;
}
mOrgas= data;
reset();
}
public void insertAtBack(ArrayList<Organization> data){
if(isShowLoadMore){
if(data == null || data.size()<PAGE_COUNT){
isShowLoadMore = false;
}
else{
isShowLoadMore = true;
}
}
if (data != null && data.size() > 0){
mOrgas.addAll(data);
}
reset();
}
public void reset(){
this.isLoadingMore = false;
notifyDataSetChanged();
}
@Override
public int getItemCount() {
int orther = 0;
if(isShowLoadMore) orther++;
if(isShowSearch) orther++;
if(mOrgas == null){
return 0 + orther;
}
else {
return mOrgas.size() + orther;
}
}
@Override
public int getItemViewType(int position) {
if(isShowSearch && position == 0){
return TYPE_HEADER_VIEW;
}
else if (isShowLoadMore && getItemCount() - 1 == position) { // footer
return TYPE_FOOTER_VIEW;
}
return TYPE_NOMAL_VIEW;
}
@Override
public ViewHolder onCreateViewHolder(ViewGroup viewgroup, int viewType) {
if(viewType == TYPE_FOOTER_VIEW){
View v = mInflater.inflate(R.layout.list_data_load_more,viewgroup,false);
return new LoadMoreView(v);
}
else if(viewType == TYPE_HEADER_VIEW){
View v = mInflater.inflate(R.layout.search,viewgroup,false);
return new SearchView(v);
}
else{
View v = mInflater.inflate(R.layout.organization_list_item,viewgroup,false);
return new OrganizationView(v);
}
}
@Override
public void onBindViewHolder(ViewHolder vh, int position) {
switch(getItemViewType(position)){
case TYPE_FOOTER_VIEW:
LoadMoreView loadMoreViewHolder = (LoadMoreView) vh;
Uri uri = (new Uri.Builder()).scheme("res").path(String.valueOf(R.drawable.github_loading)).build();
DraweeController draweeController= Fresco.newDraweeControllerBuilder()
.setAutoPlayAnimations(isLoadingMore)
.setUri(uri)
.build();
loadMoreViewHolder.loading_gif.setController(draweeController);
break;
case TYPE_NOMAL_VIEW:
OrganizationView viewHolder = (OrganizationView) vh;
Organization orga = getItem(position);
if(orga != null){
viewHolder.orga_avatar.setImageURI(Uri.parse(orga.avatar_url));
viewHolder.orga_name.setText(orga.login);
}
viewHolder.orga_rank.setText(String.valueOf(isShowSearch?position:position+1)+".");
break;
case TYPE_HEADER_VIEW:
SearchView searchHolder = (SearchView) vh;
if(!TextUtils.isEmpty(searchText)) {
searchHolder.search_text.setText(searchText.toCharArray(), 0, searchText.length());
searchHolder.clear_button.setVisibility(View.VISIBLE);
}
else{
searchHolder.clear_button.setVisibility(View.INVISIBLE);
}
//searchHolder.search_text.setEnabled(!isSearching);
break;
}
}
private class OrganizationView extends OrganizationViewHolder implements View.OnClickListener{
public OrganizationView(View view) {
super(view);
view.setOnClickListener(this);
}
@Override
public void onClick(View v) {
if (mItemClickListener != null) {
mItemClickListener.onItemClick(v, getLayoutPosition());
}
}
}
private class LoadMoreView extends LoadMoreViewHolder implements View.OnClickListener{
public LoadMoreView(View view) {
super(view);
view.setOnClickListener(this);
}
@Override
public void onClick(View v) {
DraweeController draweeController = loading_gif.getController();
Animatable animatable = draweeController.getAnimatable();
animatable.start();
isLoadingMore = true;
if (mLoadMoreClickListener != null) {
mLoadMoreClickListener.onItemClick(v, getLayoutPosition());
}
}
}
private class SearchView extends SearchViewHolder{
public SearchView(View view) {
super(view);
search_icon.setOnClickListener( new View.OnClickListener(){
public void onClick(View v) {
if (mSearchClickListener != null) {
mSearchClickListener.onItemClick(v, getLayoutPosition());
}
}
});
clear_button.setOnClickListener( new View.OnClickListener(){
public void onClick(View v) {
search_text.setText("");
searchText = "";
clear_button.setVisibility(View.GONE);
if (mSearchClickListener != null) {
mSearchClickListener.onItemClick(v, getLayoutPosition());
}
}
});
search_text.setOnKeyListener(new OnKeyListener(){
@Override
public boolean onKey(View v, int keyCode, KeyEvent event) {
if(keyCode == KeyEvent.KEYCODE_ENTER){
if (mSearchClickListener != null) {
mSearchClickListener.onItemClick(v, getLayoutPosition());
}
return true;
}
return false;
}
});
search_text.addTextChangedListener(new TextWatcher(){
@Override
public void afterTextChanged(Editable arg0) {
}
@Override
public void beforeTextChanged(CharSequence arg0, int arg1,
int arg2, int arg3) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count){
if(s.length() > 0){
clear_button.setVisibility(View.VISIBLE);
searchText = s.toString();
}
}
});
}
}
}
| 3,494 |
2,023 | <reponame>tdiprima/code
import threading
__author__ = "<NAME>"
class RWLock:
"""Synchronization object used in a solution of so-called second
readers-writers problem. In this problem, many readers can simultaneously
access a share, and a writer has an exclusive access to this share.
Additionally, the following constraints should be met:
1) no reader should be kept waiting if the share is currently opened for
reading unless a writer is also waiting for the share,
2) no writer should be kept waiting for the share longer than absolutely
necessary.
The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7]
with a modification -- adding an additional lock (C{self.__readers_queue})
-- in accordance with [2].
Sources:
[1] <NAME>: "The little book of semaphores", Version 2.1.5, 2008
[2] <NAME>, <NAME>, <NAME>:
"Concurrent Control with 'Readers' and 'Writers'",
Communications of the ACM, 1971 (via [3])
[3] http://en.wikipedia.org/wiki/Readers-writers_problem
"""
def __init__(self):
self.__read_switch = _LightSwitch()
self.__write_switch = _LightSwitch()
self.__no_readers = threading.Lock()
self.__no_writers = threading.Lock()
self.__readers_queue = threading.Lock()
"""A lock giving an even higher priority to the writer in certain
cases (see [2] for a discussion)"""
def reader_acquire(self):
self.__readers_queue.acquire()
self.__no_readers.acquire()
self.__read_switch.acquire(self.__no_writers)
self.__no_readers.release()
self.__readers_queue.release()
def reader_release(self):
self.__read_switch.release(self.__no_writers)
def writer_acquire(self):
self.__write_switch.acquire(self.__no_readers)
self.__no_writers.acquire()
def writer_release(self):
self.__no_writers.release()
self.__write_switch.release(self.__no_readers)
class _LightSwitch:
"""An auxiliary "light switch"-like object. The first thread turns on the
"switch", the last one turns it off (see [1, sec. 4.2.2] for details)."""
def __init__(self):
self.__counter = 0
self.__mutex = threading.Lock()
def acquire(self, lock):
self.__mutex.acquire()
self.__counter += 1
if self.__counter == 1:
lock.acquire()
self.__mutex.release()
def release(self, lock):
self.__mutex.acquire()
self.__counter -= 1
if self.__counter == 0:
lock.release()
self.__mutex.release()
##
## Unit testing code
## =================
##
import unittest
import threading
import time
import copy
class Writer(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time, to_write):
"""
@param buffer_: common buffer_ shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
@param to_write: data that will be appended to the buffer
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.__to_write = to_write
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
self.__rw_lock.writer_acquire()
self.entry_time = time.time()
time.sleep(self.__sleep_time)
self.__buffer.append(self.__to_write)
self.exit_time = time.time()
self.__rw_lock.writer_release()
class Reader(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time):
"""
@param buffer_: common buffer shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.buffer_read = None
"""a copy of a the buffer read while in critical section"""
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
self.__rw_lock.reader_acquire()
self.entry_time = time.time()
time.sleep(self.__sleep_time)
self.buffer_read = copy.deepcopy(self.__buffer)
self.exit_time = time.time()
self.__rw_lock.reader_release()
class RWLockTestCase(unittest.TestCase):
def test_readers_nonexclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Reader(buffer_, rw_lock, 0, 0))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.4, 1))
threads.append(Reader(buffer_, rw_lock, 0.3, 0.3))
threads.append(Reader(buffer_, rw_lock, 0.5, 0))
self.__start_and_join_threads(threads)
## The third reader should enter after the second one but it should
## exit before the second one exits
## (i.e. the readers should be in the critical section
## at the same time)
self.assertEqual([], threads[0].buffer_read)
self.assertEqual([1], threads[2].buffer_read)
self.assertEqual([1], threads[3].buffer_read)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].entry_time <= threads[3].entry_time)
self.assert_(threads[3].exit_time < threads[2].exit_time)
def test_writers_exclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0.4, 1))
threads.append(Writer(buffer_, rw_lock, 0.1, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.2, 0))
self.__start_and_join_threads(threads)
## The second writer should wait for the first one to exit
self.assertEqual([1, 2], threads[2].buffer_read)
self.assert_(threads[0].exit_time <= threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].exit_time)
def test_writer_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.4))
threads.append(Writer(buffer_, rw_lock, 0.2, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
self.__start_and_join_threads(threads)
## The second writer should go before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2], threads[3].buffer_read)
self.assertEqual([1, 2], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
def test_many_writers_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.6))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.1, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.4, 0))
threads.append(Writer(buffer_, rw_lock, 0.5, 0.1, 3))
self.__start_and_join_threads(threads)
## The two last writers should go first -- after the first reader and
## before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2, 3], threads[3].buffer_read)
self.assertEqual([1, 2, 3], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[1].exit_time <= threads[5].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
self.assert_(threads[5].exit_time <= threads[3].entry_time)
self.assert_(threads[5].exit_time <= threads[4].entry_time)
@staticmethod
def __init_variables():
buffer_ = []
rw_lock = RWLock()
threads = []
return (buffer_, rw_lock, threads)
@staticmethod
def __start_and_join_threads(threads):
for t in threads:
t.start()
for t in threads:
t.join()
| 3,220 |
1,489 | <reponame>kimbugp/django-activity-stream
'''
Decide on a JSONField implementation based on available packages.
There are two possible options, preferred in the following order:
- JSONField from django-jsonfield with django-jsonfield-compat
- JSONField from django-mysql (needs MySQL 5.7+)
Raises an ImportError if USE_JSONFIELD is True but none of these are
installed.
Falls back to a simple Django TextField if USE_JSONFIELD is False,
however that field will be removed by migration 0002 directly
afterwards.
'''
import django
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from actstream.settings import USE_JSONFIELD
__all__ = ('DataField', )
DataField = models.TextField
if USE_JSONFIELD:
if django.VERSION >= (3, 1):
from django.db.models import JSONField
DataField = JSONField
else:
try:
from django_jsonfield_backport.models import JSONField
DataField = JSONField
except ImportError:
raise ImproperlyConfigured(
'You must install django-jsonfield-backport, '
'if you wish to use a JSONField on your actions '
'and run Django < 3.1'
)
| 448 |
14,793 | <filename>weixin-java-pay/src/main/java/com/github/binarywang/wxpay/service/impl/RedpackServiceImpl.java
package com.github.binarywang.wxpay.service.impl;
import com.github.binarywang.wxpay.bean.request.WxPayRedpackQueryRequest;
import com.github.binarywang.wxpay.bean.request.WxPaySendMiniProgramRedpackRequest;
import com.github.binarywang.wxpay.bean.request.WxPaySendRedpackRequest;
import com.github.binarywang.wxpay.bean.result.BaseWxPayResult;
import com.github.binarywang.wxpay.bean.result.WxPayRedpackQueryResult;
import com.github.binarywang.wxpay.bean.result.WxPaySendMiniProgramRedpackResult;
import com.github.binarywang.wxpay.bean.result.WxPaySendRedpackResult;
import com.github.binarywang.wxpay.constant.WxPayConstants;
import com.github.binarywang.wxpay.exception.WxPayException;
import com.github.binarywang.wxpay.service.RedpackService;
import com.github.binarywang.wxpay.service.WxPayService;
import lombok.RequiredArgsConstructor;
/**
* 老板加点注释吧.
*
* @author <a href="https://github.com/binarywang"><NAME></a>
* @date 2019-12-26
*/
@RequiredArgsConstructor
public class RedpackServiceImpl implements RedpackService {
private final WxPayService payService;
@Override
public WxPaySendMiniProgramRedpackResult sendMiniProgramRedpack(WxPaySendMiniProgramRedpackRequest request)
throws WxPayException {
request.checkAndSign(this.payService.getConfig());
String url = this.payService.getPayBaseUrl() + "/mmpaymkttransfers/sendminiprogramhb";
String responseContent = this.payService.post(url, request.toXML(), true);
WxPaySendMiniProgramRedpackResult result = BaseWxPayResult.fromXML(responseContent, WxPaySendMiniProgramRedpackResult.class);
result.checkResult(this.payService, request.getSignType(), true);
return result;
}
@Override
public WxPaySendRedpackResult sendRedpack(WxPaySendRedpackRequest request) throws WxPayException {
request.checkAndSign(this.payService.getConfig());
String url = this.payService.getPayBaseUrl() + "/mmpaymkttransfers/sendredpack";
if (request.getAmtType() != null) {
//裂变红包
url = this.payService.getPayBaseUrl() + "/mmpaymkttransfers/sendgroupredpack";
}
String responseContent = this.payService.post(url, request.toXML(), true);
final WxPaySendRedpackResult result = BaseWxPayResult.fromXML(responseContent, WxPaySendRedpackResult.class);
result.checkResult(this.payService, request.getSignType(), true);
return result;
}
@Override
public WxPayRedpackQueryResult queryRedpack(String mchBillNo) throws WxPayException {
WxPayRedpackQueryRequest request = new WxPayRedpackQueryRequest();
request.setMchBillNo(mchBillNo);
return this.queryRedpack(request);
}
@Override
public WxPayRedpackQueryResult queryRedpack(WxPayRedpackQueryRequest request) throws WxPayException {
request.setBillType(WxPayConstants.BillType.MCHT);
request.checkAndSign(this.payService.getConfig());
String url = this.payService.getPayBaseUrl() + "/mmpaymkttransfers/gethbinfo";
String responseContent = this.payService.post(url, request.toXML(), true);
WxPayRedpackQueryResult result = BaseWxPayResult.fromXML(responseContent, WxPayRedpackQueryResult.class);
result.checkResult(this.payService, request.getSignType(), true);
return result;
}
}
| 1,124 |
1,502 | <reponame>yhager/cpp-netlib
// Copyright <NAME> 2007.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef __NETWORK_PROTOCOL_HTTP_REQUEST_20070908_1_HPP__
#define __NETWORK_PROTOCOL_HTTP_REQUEST_20070908_1_HPP__
#include <boost/network/protocol/http/impl/request.hpp>
namespace boost {
namespace network {
namespace http {
template <class Tag, class Directive>
basic_request<Tag>& operator<<(basic_request<Tag>& message,
Directive const& directive) {
directive(message);
return message;
}
} // namespace http
} // namespace network
} // namespace boost
#endif // __NETWORK_PROTOCOL_HTTP_REQUEST_20070908-1_HPP__
| 316 |
695 | def f():
'''f'''
pass
def f1(): pass
f2 = f
if True:
def g(): pass
else:
def h(): pass
class C:
def i(self): pass
def j(self):
def j2(self):
pass
class C2:
def k(self): pass
| 135 |
369 | #include "pose_regression.h"
#include "pose_regression_parameter_search.h"
extern "C" {
void set_pose(HybridPredictionContainer* container, float** pose) {
AffineXform3d* p = container->GetRigidPose();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
(*p)[i][j] = pose[i][j];
}
}
}
void set_pose_gt(vector<AffineXform3d>* pose_container, int pose_id, float** pose) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
((*pose_container)[pose_id])[i][j] = pose[i][j];
}
}
}
void get_pose(HybridPredictionContainer* container, float** pose) {
AffineXform3d* p = container->GetRigidPose();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
pose[i][j] = (*p)[i][j];
}
}
}
void set_point3D_gt(HybridPredictionContainer* container,
float** point3D_gt,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
keypoints->resize(nk);
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 3; j++) {
(*keypoints)[i].point3D_gt[j] = point3D_gt[i][j];
}
}
}
void get_point3D_gt(HybridPredictionContainer* container,
float** point3D_gt,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 3; j++) {
point3D_gt[i][j] = (*keypoints)[i].point3D_gt[j];
}
}
}
void set_point2D_pred(HybridPredictionContainer* container,
float** point2D_pred,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
keypoints->resize(nk);
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 2; j++) {
(*keypoints)[i].point2D_pred[j] = point2D_pred[i][j];
}
}
}
void get_point2D_pred(HybridPredictionContainer* container,
float** point2D_pred,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 2; j++) {
point2D_pred[i][j] = (*keypoints)[i].point2D_pred[j];
}
}
}
void set_point_inv_half_var(HybridPredictionContainer* container,
float** inv_half_var,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
keypoints->resize(nk);
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
(*keypoints)[i].inv_half_var(j, k) = inv_half_var[i][j * 2 + k];
}
}
}
}
void get_point_inv_half_var(HybridPredictionContainer* container,
float** inv_half_var,
int nk) {
vector<Keypoint>* keypoints = container->GetKeypoints();
for (int i = 0; i < nk; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
inv_half_var[i][j * 2 + k] = (*keypoints)[i].inv_half_var(j, k);
}
}
}
}
void set_edge_ids(HybridPredictionContainer* container,
int* start_id,
int* end_id,
int ne) {
vector<EdgeVector>* edges = container->GetEdgeVectors();
edges->resize(ne);
for (int i = 0; i < ne; i++) {
(*edges)[i].start_id = start_id[i];
(*edges)[i].end_id = end_id[i];
(*edges)[i].start_id--;
(*edges)[i].end_id--;
}
}
void set_vec_pred(HybridPredictionContainer* container,
float** vec_pred,
int ne) {
vector<EdgeVector>* edges = container->GetEdgeVectors();
edges->resize(ne);
for (int i = 0; i < ne; i++) {
(*edges)[i].vec_pred[0] = vec_pred[i][0];
(*edges)[i].vec_pred[1] = vec_pred[i][1];
}
}
void get_vec_pred(HybridPredictionContainer* container,
float** vec_pred,
int ne) {
vector<EdgeVector>* edges = container->GetEdgeVectors();
for (int i = 0; i < ne; i++) {
vec_pred[i][0] = (*edges)[i].vec_pred[0];
vec_pred[i][1] = (*edges)[i].vec_pred[1];
}
}
void set_edge_inv_half_var(HybridPredictionContainer* container,
float** inv_half_var,
int ne) {
vector<EdgeVector>* edges = container->GetEdgeVectors();
edges->resize(ne);
for (int i = 0; i < ne; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
(*edges)[i].inv_half_var(j, k) = inv_half_var[i][j * 2 + k];
}
}
}
}
void get_edge_inv_half_var(HybridPredictionContainer* container,
float** inv_half_var,
int ne) {
vector<EdgeVector>* edges = container->GetEdgeVectors();
for (int i = 0; i < ne; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
inv_half_var[i][j * 2 + k] = (*edges)[i].inv_half_var(j, k);
}
}
}
}
void set_qs1_cross_qs2(HybridPredictionContainer* container,
float** qs1_cross_qs2,
int ns) {
vector<SymmetryCorres>* symcorres = container->GetSymmetryCorres();
symcorres->resize(ns);
for (int i = 0; i < ns; i++) {
for (int j = 0; j < 3; j++) {
(*symcorres)[i].qs1_cross_qs2[j] = qs1_cross_qs2[i][j];
}
}
}
void get_qs1_cross_qs2(HybridPredictionContainer* container,
float** qs1_cross_qs2,
int ns) {
vector<SymmetryCorres>* symcorres = container->GetSymmetryCorres();
for (int i = 0; i < ns; i++) {
for (int j = 0; j < 3; j++) {
qs1_cross_qs2[i][j] = (*symcorres)[i].qs1_cross_qs2[j];
}
}
}
void set_symmetry_weight(HybridPredictionContainer* container,
float* weight,
int ns) {
vector<SymmetryCorres>* symcorres = container->GetSymmetryCorres();
symcorres->resize(ns);
double sum = 0.0;
for (int i = 0; i < ns; i++) {
(*symcorres)[i].weight = weight[i];
sum += weight[i];
}
for (int i = 0; i < ns; i++) {
(*symcorres)[i].weight /= sum;
}
}
void get_symmetry_weight(HybridPredictionContainer* container,
float* weight,
int ns) {
vector<SymmetryCorres>* symcorres = container->GetSymmetryCorres();
for (int i = 0; i < ns; i++) {
weight[i] = (*symcorres)[i].weight = weight[i];
}
}
void set_normal_gt(HybridPredictionContainer* container,
float* normal_gt) {
Vector3d normal;
for (int i = 0; i < 3; i++) {
normal[i] = normal_gt[i];
}
container->SetReflectionPlaneNormal(normal);
}
HybridPredictionContainer* initialize_pose(HybridPredictionContainer* predictions,
PRInitPara* pi_para) {
AffineXform3d pose;
PoseRegression pr;
PoseRegressionPara para;
para.gamma_edge = pi_para->gamma_edge;
para.gamma_sym = pi_para->gamma_sym;
para.gamma_kpts = para.gamma_kpts;
pr.InitializePose(*predictions, para, &pose);
AffineXform3d* p = predictions->GetRigidPose();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
(*p)[i][j] = pose[i][j];
}
}
return predictions;
}
HybridPredictionContainer* refine_pose(HybridPredictionContainer* predictions,
PRRefinePara* pr_para) {
AffineXform3d pose = *(predictions->GetRigidPose());;
PoseRegression pr;
PoseRegressionPara para;
para.beta_edge = pr_para->beta_edge;
para.beta_sym = pr_para->beta_sym;
para.beta_kpts = para.beta_kpts;
para.alpha_kpts = pr_para->alpha_kpts;
para.alpha_edge = pr_para->alpha_edge;
para.alpha_sym = pr_para->alpha_sym;
pr.RefinePose(*predictions, para, &pose);
AffineXform3d* p = predictions->GetRigidPose();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
(*p)[i][j] = pose[i][j];
}
}
return predictions;
}
PRRefinePara* search_pose_refine(vector<HybridPredictionContainer>* predictions_para,
vector<AffineXform3d>* poses_gt, int data_size, double diameter) {
predictions_para->resize(data_size);
poses_gt->resize(data_size);
PoseRegressionParameterSearch pr_ps;
ParameterSearchConfig ps_config;
ps_config.lambda_trans = 4. / (diameter * diameter);
PRRefinePara* pr_para = new PRRefinePara();
pr_ps.Refinement(*predictions_para, *poses_gt, (const ParameterSearchConfig) ps_config, pr_para);
return pr_para;
}
PRInitPara* search_pose_initial(vector<HybridPredictionContainer>* predictions_para,
vector<AffineXform3d>* poses_gt, int data_size, double diameter) {
predictions_para->resize(data_size);
poses_gt->resize(data_size);
PoseRegressionParameterSearch pr_ps;
ParameterSearchConfig ps_config;
ps_config.lambda_trans = 4. / (diameter * diameter);
PRInitPara* pi_para = new PRInitPara();
pr_ps.Initialization(*predictions_para, *poses_gt, (const ParameterSearchConfig) ps_config, pi_para);
// set pose initial for validation set using searched parameters
for (unsigned id = 0; id < data_size; id++) {
initialize_pose(&((*predictions_para)[id]), pi_para);
}
return pi_para;
}
HybridPredictionContainer* new_container() {
HybridPredictionContainer* hpc = new HybridPredictionContainer();
int start_id[] = {2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 4, 5, 6, 7, 8, 5, 6, 7, 8, 6, 7, 8, 7, 8, 8};
int end_id[] = {1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 7};
set_edge_ids(hpc, start_id, end_id, 28);
return hpc;
}
vector<HybridPredictionContainer>* new_container_para() {
// a vector of intermediate predictions for N=20 examples in the val set
vector<HybridPredictionContainer>* hpc_para = new vector<HybridPredictionContainer>();
(*hpc_para).resize(50);
int start_id[] = {2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 4, 5, 6, 7, 8, 5, 6, 7, 8, 6, 7, 8, 7, 8, 8};
int end_id[] = {1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 7};
for (unsigned id = 0; id < (*hpc_para).size(); ++id)
set_edge_ids(&((*hpc_para)[id]), start_id, end_id, 28);
return hpc_para;
}
HybridPredictionContainer* get_prediction_container(vector<HybridPredictionContainer>* predictions_para,
int container_id) {
HybridPredictionContainer *predictions;
predictions = &((*predictions_para)[container_id]);
return predictions;
}
vector<AffineXform3d>* new_container_pose() {
// a vector of ground-truth poses for N=20 exmaples in the val set
vector<AffineXform3d>* poses = new vector<AffineXform3d>();
(*poses).resize(50);
return poses;
}
void delete_container(HybridPredictionContainer* container,
vector<HybridPredictionContainer>* c2,
vector<AffineXform3d>* c3,
PRRefinePara* c4,
PRInitPara* c5) {
delete container;
delete c2;
delete c3;
delete c4;
delete c5;
}
}
| 5,747 |
3,777 | from socketio.handler import SocketIOHandler
import aj
import gevent.ssl
from aj.security.verifier import ClientCertificateVerificator
from OpenSSL import crypto
class RequestHandler(SocketIOHandler):
def __init__(self, *args, **kwargs):
SocketIOHandler.__init__(self, *args, **kwargs)
self.server.resource = 'socket.io'
def get_environ(self):
env = SocketIOHandler.get_environ(self)
env['SSL'] = isinstance(self.socket, gevent.ssl.SSLSocket)
env['SSL_CLIENT_AUTH_FORCE'] = aj.config.data['ssl']['client_auth']['force']
env['SSL_CLIENT_VALID'] = False
env['SSL_CLIENT_USER'] = None
if env['SSL']:
peer_cert = self.socket.getpeercert(True)
if peer_cert:
certificate = crypto.load_certificate(crypto.FILETYPE_PEM, gevent.ssl.DER_cert_to_PEM_cert(peer_cert))
env['SSL_CLIENT_CERTIFICATE'] = certificate
if certificate:
user = ClientCertificateVerificator.get(aj.context).verify(certificate)
env['SSL_CLIENT_VALID'] = bool(user)
env['SSL_CLIENT_USER'] = user
env['SSL_CLIENT_DIGEST'] = certificate.digest('sha1')
return env
def handle_one_response(self):
prefix = self.environ.get('HTTP_X_URL_PREFIX', '')
self.server.resource = (prefix + '/socket.io').strip('/')
response = SocketIOHandler.handle_one_response(self)
self.server.resource = 'socket.io'
return response
def _sendall(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
return SocketIOHandler._sendall(self, data)
| 769 |
1,144 | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CRASH_REPORTER_UDEV_COLLECTOR_H_
#define CRASH_REPORTER_UDEV_COLLECTOR_H_
#include <string>
#include <base/files/file_path.h>
#include <base/macros.h>
#include <gtest/gtest_prod.h> // for FRIEND_TEST
#include "crash_collector.h"
// Udev crash collector.
class UdevCollector : public CrashCollector {
public:
UdevCollector();
~UdevCollector() override;
// The udev event string should be formatted as follows:
// "ACTION=[action]:KERNEL=[name]:SUBSYSTEM=[subsystem]"
// The values don't have to be in any particular order. One or more of them
// could be omitted, in which case it would be treated as a wildcard (*).
bool HandleCrash(const std::string& udev_event);
protected:
std::string dev_coredump_directory_;
private:
friend class UdevCollectorTest;
// Process udev crash logs, collecting log files according to the config
// file (crash_reporter_logs.conf).
bool ProcessUdevCrashLogs(const base::FilePath& crash_directory,
const std::string& action,
const std::string& kernel,
const std::string& subsystem);
// Process device coredump, collecting device coredump file.
// |instance_number| is the kernel number of the virtual device for the device
// coredump instance.
bool ProcessDevCoredump(const base::FilePath& crash_directory,
int instance_number);
// Copy device coredump file to crash directory, and perform necessary
// coredump file management.
bool AppendDevCoredump(const base::FilePath& crash_directory,
const base::FilePath& coredump_path,
int instance_number);
// Clear the device coredump file by performing a dummy write to it.
bool ClearDevCoredump(const base::FilePath& coredump_path);
// Return the driver name of the device that generates the coredump.
std::string GetFailingDeviceDriverName(int instance_number);
// Mutator for unit testing.
void set_log_config_path(const std::string& path) {
log_config_path_ = base::FilePath(path);
}
DISALLOW_COPY_AND_ASSIGN(UdevCollector);
};
#endif // CRASH_REPORTER_UDEV_COLLECTOR_H_
| 961 |
11,216 | <gh_stars>1000+
{
"extends": ["next"],
"rules": {
"import/no-anonymous-default-export": "off",
"react/display-name": "off",
"@next/next/no-img-element": "off",
"jsx-a11y/alt-text": "off",
"@next/next/no-css-tags": "off",
"@next/next/no-page-custom-font": "off",
"@next/next/no-sync-scripts": "off",
"react/no-unescaped-entities": "off",
"@next/next/no-html-link-for-pages": "off",
"react/no-unknown-property": "off",
"@next/next/google-font-display": "off"
}
}
| 231 |
412 | /*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "libgraphdb/graphdbp.h"
#include <stdio.h>
#include <syslog.h>
#include "libcl/cl.h"
void graphdb_log(graphdb_handle* graphdb, cl_loglevel lev, char const* fmt,
...) {
if (graphdb == NULL || graphdb->graphdb_loglevel >= lev) {
va_list ap;
va_start(ap, fmt);
if (graphdb != NULL && graphdb->graphdb_vlog)
(*graphdb->graphdb_vlog)(graphdb->graphdb_cl, lev, fmt, ap);
else {
char buf[10 * 1024];
vsnprintf(buf, sizeof buf, fmt, ap);
if (graphdb != NULL) {
if (!graphdb->graphdb_syslog_open) {
openlog("graphdb", 0, LOG_USER);
graphdb->graphdb_syslog_open = 1;
}
syslog(LOG_USER, "%s", buf);
}
/* print to stderr as well */
if (graphdb == NULL || lev >= CL_LEVEL_ERROR)
fprintf(stderr, "%s\n", buf);
}
va_end(ap);
}
}
| 565 |
14,425 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.erasurecode.rawcoder.util;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Implementation of Galois field arithmetic with 2^p elements. The input must
* be unsigned integers. It's ported from HDFS-RAID, slightly adapted.
*/
@InterfaceAudience.Private
public class GaloisField {
// Field size 256 is good for byte based system
private static final int DEFAULT_FIELD_SIZE = 256;
// primitive polynomial 1 + X^2 + X^3 + X^4 + X^8 (substitute 2)
private static final int DEFAULT_PRIMITIVE_POLYNOMIAL = 285;
static private final Map<Integer, GaloisField> instances =
new HashMap<Integer, GaloisField>();
private final int[] logTable;
private final int[] powTable;
private final int[][] mulTable;
private final int[][] divTable;
private final int fieldSize;
private final int primitivePeriod;
private final int primitivePolynomial;
private GaloisField(int fieldSize, int primitivePolynomial) {
assert fieldSize > 0;
assert primitivePolynomial > 0;
this.fieldSize = fieldSize;
this.primitivePeriod = fieldSize - 1;
this.primitivePolynomial = primitivePolynomial;
logTable = new int[fieldSize];
powTable = new int[fieldSize];
mulTable = new int[fieldSize][fieldSize];
divTable = new int[fieldSize][fieldSize];
int value = 1;
for (int pow = 0; pow < fieldSize - 1; pow++) {
powTable[pow] = value;
logTable[value] = pow;
value = value * 2;
if (value >= fieldSize) {
value = value ^ primitivePolynomial;
}
}
// building multiplication table
for (int i = 0; i < fieldSize; i++) {
for (int j = 0; j < fieldSize; j++) {
if (i == 0 || j == 0) {
mulTable[i][j] = 0;
continue;
}
int z = logTable[i] + logTable[j];
z = z >= primitivePeriod ? z - primitivePeriod : z;
z = powTable[z];
mulTable[i][j] = z;
}
}
// building division table
for (int i = 0; i < fieldSize; i++) {
for (int j = 1; j < fieldSize; j++) {
if (i == 0) {
divTable[i][j] = 0;
continue;
}
int z = logTable[i] - logTable[j];
z = z < 0 ? z + primitivePeriod : z;
z = powTable[z];
divTable[i][j] = z;
}
}
}
/**
* Get the object performs Galois field arithmetics
*
* @param fieldSize size of the field
* @param primitivePolynomial a primitive polynomial corresponds to the size
*/
public static GaloisField getInstance(int fieldSize,
int primitivePolynomial) {
int key = ((fieldSize << 16) & 0xFFFF0000)
+ (primitivePolynomial & 0x0000FFFF);
GaloisField gf;
synchronized (instances) {
gf = instances.get(key);
if (gf == null) {
gf = new GaloisField(fieldSize, primitivePolynomial);
instances.put(key, gf);
}
}
return gf;
}
/**
* Get the object performs Galois field arithmetic with default setting
*/
public static GaloisField getInstance() {
return getInstance(DEFAULT_FIELD_SIZE, DEFAULT_PRIMITIVE_POLYNOMIAL);
}
/**
* Return number of elements in the field
*
* @return number of elements in the field
*/
public int getFieldSize() {
return fieldSize;
}
/**
* Return the primitive polynomial in GF(2)
*
* @return primitive polynomial as a integer
*/
public int getPrimitivePolynomial() {
return primitivePolynomial;
}
/**
* Compute the sum of two fields
*
* @param x input field
* @param y input field
* @return result of addition
*/
public int add(int x, int y) {
assert (x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize());
return x ^ y;
}
/**
* Compute the multiplication of two fields
*
* @param x input field
* @param y input field
* @return result of multiplication
*/
public int multiply(int x, int y) {
assert (x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize());
return mulTable[x][y];
}
/**
* Compute the division of two fields
*
* @param x input field
* @param y input field
* @return x/y
*/
public int divide(int x, int y) {
assert (x >= 0 && x < getFieldSize() && y > 0 && y < getFieldSize());
return divTable[x][y];
}
/**
* Compute power n of a field
*
* @param x input field
* @param n power
* @return x^n
*/
public int power(int x, int n) {
assert (x >= 0 && x < getFieldSize());
if (n == 0) {
return 1;
}
if (x == 0) {
return 0;
}
x = logTable[x] * n;
if (x < primitivePeriod) {
return powTable[x];
}
x = x % primitivePeriod;
return powTable[x];
}
/**
* Given a Vandermonde matrix V[i][j]=x[j]^i and vector y, solve for z such
* that Vz=y. The output z will be placed in y.
*
* @param x the vector which describe the Vandermonde matrix
* @param y right-hand side of the Vandermonde system equation. will be
* replaced the output in this vector
*/
public void solveVandermondeSystem(int[] x, int[] y) {
solveVandermondeSystem(x, y, x.length);
}
/**
* Given a Vandermonde matrix V[i][j]=x[j]^i and vector y, solve for z such
* that Vz=y. The output z will be placed in y.
*
* @param x the vector which describe the Vandermonde matrix
* @param y right-hand side of the Vandermonde system equation. will be
* replaced the output in this vector
* @param len consider x and y only from 0...len-1
*/
public void solveVandermondeSystem(int[] x, int[] y, int len) {
assert (x.length <= len && y.length <= len);
for (int i = 0; i < len - 1; i++) {
for (int j = len - 1; j > i; j--) {
y[j] = y[j] ^ mulTable[x[i]][y[j - 1]];
}
}
for (int i = len - 1; i >= 0; i--) {
for (int j = i + 1; j < len; j++) {
y[j] = divTable[y[j]][x[j] ^ x[j - i - 1]];
}
for (int j = i; j < len - 1; j++) {
y[j] = y[j] ^ y[j + 1];
}
}
}
/**
* A "bulk" version to the solving of Vandermonde System
*/
public void solveVandermondeSystem(int[] x, byte[][] y, int[] outputOffsets,
int len, int dataLen) {
int idx1, idx2;
for (int i = 0; i < len - 1; i++) {
for (int j = len - 1; j > i; j--) {
for (idx2 = outputOffsets[j-1], idx1 = outputOffsets[j];
idx1 < outputOffsets[j] + dataLen; idx1++, idx2++) {
y[j][idx1] = (byte) (y[j][idx1] ^ mulTable[x[i]][y[j - 1][idx2] &
0x000000FF]);
}
}
}
for (int i = len - 1; i >= 0; i--) {
for (int j = i + 1; j < len; j++) {
for (idx1 = outputOffsets[j];
idx1 < outputOffsets[j] + dataLen; idx1++) {
y[j][idx1] = (byte) (divTable[y[j][idx1] & 0x000000FF][x[j] ^
x[j - i - 1]]);
}
}
for (int j = i; j < len - 1; j++) {
for (idx2 = outputOffsets[j+1], idx1 = outputOffsets[j];
idx1 < outputOffsets[j] + dataLen; idx1++, idx2++) {
y[j][idx1] = (byte) (y[j][idx1] ^ y[j + 1][idx2]);
}
}
}
}
/**
* A "bulk" version of the solveVandermondeSystem, using ByteBuffer.
*/
public void solveVandermondeSystem(int[] x, ByteBuffer[] y, int len) {
ByteBuffer p;
int idx1, idx2;
for (int i = 0; i < len - 1; i++) {
for (int j = len - 1; j > i; j--) {
p = y[j];
for (idx1 = p.position(), idx2 = y[j-1].position();
idx1 < p.limit(); idx1++, idx2++) {
p.put(idx1, (byte) (p.get(idx1) ^ mulTable[x[i]][y[j-1].get(idx2) &
0x000000FF]));
}
}
}
for (int i = len - 1; i >= 0; i--) {
for (int j = i + 1; j < len; j++) {
p = y[j];
for (idx1 = p.position(); idx1 < p.limit(); idx1++) {
p.put(idx1, (byte) (divTable[p.get(idx1) &
0x000000FF][x[j] ^ x[j - i - 1]]));
}
}
for (int j = i; j < len - 1; j++) {
p = y[j];
for (idx1 = p.position(), idx2 = y[j+1].position();
idx1 < p.limit(); idx1++, idx2++) {
p.put(idx1, (byte) (p.get(idx1) ^ y[j+1].get(idx2)));
}
}
}
}
/**
* Compute the multiplication of two polynomials. The index in the array
* corresponds to the power of the entry. For example p[0] is the constant
* term of the polynomial p.
*
* @param p input polynomial
* @param q input polynomial
* @return polynomial represents p*q
*/
public int[] multiply(int[] p, int[] q) {
int len = p.length + q.length - 1;
int[] result = new int[len];
for (int i = 0; i < len; i++) {
result[i] = 0;
}
for (int i = 0; i < p.length; i++) {
for (int j = 0; j < q.length; j++) {
result[i + j] = add(result[i + j], multiply(p[i], q[j]));
}
}
return result;
}
/**
* Compute the remainder of a dividend and divisor pair. The index in the
* array corresponds to the power of the entry. For example p[0] is the
* constant term of the polynomial p.
*
* @param dividend dividend polynomial, the remainder will be placed
* here when return
* @param divisor divisor polynomial
*/
public void remainder(int[] dividend, int[] divisor) {
for (int i = dividend.length - divisor.length; i >= 0; i--) {
int ratio = divTable[dividend[i +
divisor.length - 1]][divisor[divisor.length - 1]];
for (int j = 0; j < divisor.length; j++) {
int k = j + i;
dividend[k] = dividend[k] ^ mulTable[ratio][divisor[j]];
}
}
}
/**
* Compute the sum of two polynomials. The index in the array corresponds to
* the power of the entry. For example p[0] is the constant term of the
* polynomial p.
*
* @param p input polynomial
* @param q input polynomial
* @return polynomial represents p+q
*/
public int[] add(int[] p, int[] q) {
int len = Math.max(p.length, q.length);
int[] result = new int[len];
for (int i = 0; i < len; i++) {
if (i < p.length && i < q.length) {
result[i] = add(p[i], q[i]);
} else if (i < p.length) {
result[i] = p[i];
} else {
result[i] = q[i];
}
}
return result;
}
/**
* Substitute x into polynomial p(x).
*
* @param p input polynomial
* @param x input field
* @return p(x)
*/
public int substitute(int[] p, int x) {
int result = 0;
int y = 1;
for (int i = 0; i < p.length; i++) {
result = result ^ mulTable[p[i]][y];
y = mulTable[x][y];
}
return result;
}
/**
* A "bulk" version of the substitute.
* Tends to be 2X faster than the "int" substitute in a loop.
*
* @param p input polynomial
* @param q store the return result
* @param x input field
*/
public void substitute(byte[][] p, byte[] q, int x) {
int y = 1;
for (int i = 0; i < p.length; i++) {
byte[] pi = p[i];
for (int j = 0; j < pi.length; j++) {
int pij = pi[j] & 0x000000FF;
q[j] = (byte) (q[j] ^ mulTable[pij][y]);
}
y = mulTable[x][y];
}
}
/**
* A "bulk" version of the substitute.
* Tends to be 2X faster than the "int" substitute in a loop.
*
* @param p input polynomial
* @param offsets
* @param len
* @param q store the return result
* @param offset
* @param x input field
*/
public void substitute(byte[][] p, int[] offsets,
int len, byte[] q, int offset, int x) {
int y = 1, iIdx, oIdx;
for (int i = 0; i < p.length; i++) {
byte[] pi = p[i];
for (iIdx = offsets[i], oIdx = offset;
iIdx < offsets[i] + len; iIdx++, oIdx++) {
int pij = pi != null ? pi[iIdx] & 0x000000FF : 0;
q[oIdx] = (byte) (q[oIdx] ^ mulTable[pij][y]);
}
y = mulTable[x][y];
}
}
/**
* A "bulk" version of the substitute, using ByteBuffer.
* Tends to be 2X faster than the "int" substitute in a loop.
*
* @param p input polynomial
* @param q store the return result
* @param x input field
*/
public void substitute(ByteBuffer[] p, int len, ByteBuffer q, int x) {
int y = 1, iIdx, oIdx;
for (int i = 0; i < p.length; i++) {
ByteBuffer pi = p[i];
int pos = pi != null ? pi.position() : 0;
int limit = pi != null ? pi.limit() : len;
for (oIdx = q.position(), iIdx = pos;
iIdx < limit; iIdx++, oIdx++) {
int pij = pi != null ? pi.get(iIdx) & 0x000000FF : 0;
q.put(oIdx, (byte) (q.get(oIdx) ^ mulTable[pij][y]));
}
y = mulTable[x][y];
}
}
/**
* The "bulk" version of the remainder.
* Warning: This function will modify the "dividend" inputs.
*/
public void remainder(byte[][] dividend, int[] divisor) {
for (int i = dividend.length - divisor.length; i >= 0; i--) {
for (int j = 0; j < divisor.length; j++) {
for (int k = 0; k < dividend[i].length; k++) {
int ratio = divTable[dividend[i + divisor.length - 1][k] &
0x00FF][divisor[divisor.length - 1]];
dividend[j + i][k] = (byte) ((dividend[j + i][k] & 0x00FF) ^
mulTable[ratio][divisor[j]]);
}
}
}
}
/**
* The "bulk" version of the remainder.
* Warning: This function will modify the "dividend" inputs.
*/
public void remainder(byte[][] dividend, int[] offsets,
int len, int[] divisor) {
int idx1, idx2;
for (int i = dividend.length - divisor.length; i >= 0; i--) {
for (int j = 0; j < divisor.length; j++) {
for (idx2 = offsets[j + i], idx1 = offsets[i + divisor.length - 1];
idx1 < offsets[i + divisor.length - 1] + len;
idx1++, idx2++) {
int ratio = divTable[dividend[i + divisor.length - 1][idx1] &
0x00FF][divisor[divisor.length - 1]];
dividend[j + i][idx2] = (byte) ((dividend[j + i][idx2] & 0x00FF) ^
mulTable[ratio][divisor[j]]);
}
}
}
}
/**
* The "bulk" version of the remainder, using ByteBuffer.
* Warning: This function will modify the "dividend" inputs.
*/
public void remainder(ByteBuffer[] dividend, int[] divisor) {
int idx1, idx2;
ByteBuffer b1, b2;
for (int i = dividend.length - divisor.length; i >= 0; i--) {
for (int j = 0; j < divisor.length; j++) {
b1 = dividend[i + divisor.length - 1];
b2 = dividend[j + i];
for (idx1 = b1.position(), idx2 = b2.position();
idx1 < b1.limit(); idx1++, idx2++) {
int ratio = divTable[b1.get(idx1) &
0x00FF][divisor[divisor.length - 1]];
b2.put(idx2, (byte) ((b2.get(idx2) & 0x00FF) ^
mulTable[ratio][divisor[j]]));
}
}
}
}
/**
* Perform Gaussian elimination on the given matrix. This matrix has to be a
* fat matrix (number of rows > number of columns).
*/
public void gaussianElimination(int[][] matrix) {
assert(matrix != null && matrix.length > 0 && matrix[0].length > 0
&& matrix.length < matrix[0].length);
int height = matrix.length;
int width = matrix[0].length;
for (int i = 0; i < height; i++) {
boolean pivotFound = false;
// scan the column for a nonzero pivot and swap it to the diagonal
for (int j = i; j < height; j++) {
if (matrix[i][j] != 0) {
int[] tmp = matrix[i];
matrix[i] = matrix[j];
matrix[j] = tmp;
pivotFound = true;
break;
}
}
if (!pivotFound) {
continue;
}
int pivot = matrix[i][i];
for (int j = i; j < width; j++) {
matrix[i][j] = divide(matrix[i][j], pivot);
}
for (int j = i + 1; j < height; j++) {
int lead = matrix[j][i];
for (int k = i; k < width; k++) {
matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
}
}
}
for (int i = height - 1; i >=0; i--) {
for (int j = 0; j < i; j++) {
int lead = matrix[j][i];
for (int k = i; k < width; k++) {
matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
}
}
}
}
}
| 7,685 |
418 | <gh_stars>100-1000
"""Run mirge tool"""
import os
import sys
import shutil
import glob
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.log import logger
from bcbio import utils
SPS = {'hsa': 'human',
'mmu': 'mouse',
'rno': 'rat',
'dre': 'zebrafish',
'cel': 'nematode',
'dme': 'fruitfly'}
def run(data):
"""Proxy function to run the tool"""
sample = data[0][0]
work_dir = dd.get_work_dir(sample)
out_dir = os.path.join(work_dir, "mirge")
lib = _find_lib(sample)
mirge = _find_mirge(sample)
bowtie = _find_bowtie(sample)
sps = dd.get_species(sample)
species = SPS.get(sps, "")
if not species:
raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps)
if not lib:
raise ValueError("-lib option is not set up in resources for mirge tool."
" Read above warnings lines.")
if not utils.file_exists(out_dir):
with tx_tmpdir() as tmp_dir:
sample_file = _create_sample_file(data, tmp_dir)
do.run(_cmd().format(**locals()), "Running miRge2.0.")
shutil.move(tmp_dir, out_dir)
return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
def _find_mirge(data):
try:
mirge = config_utils.get_program("miRge2.0", data)
return mirge
except config_utils.CmdNotFound:
logger.warning("miRge2.0 is not found. Install it first, and try again.")
return None
def _cmd():
cmd = "{mirge} annotate -s {sample_file} -o {tmp_dir} -pb {bowtie} {lib} -sp {species} -di -ai -gff"
return cmd
def _create_sample_file(data, out_dir):
"""from data list all the fastq files in a file"""
sample_file = os.path.join(out_dir, "sample_file.txt")
with open(sample_file, 'w') as outh:
for sample in data:
outh.write(sample[0]["clean_fastq"] + "\n")
return sample_file
def _find_bowtie(data):
try:
bowtie = config_utils.get_program("bowtie", data)
return os.path.dirname(bowtie)
except config_utils.CmdNotFound:
logger.watning("bowtie is not found. Install it first, and try again.")
return None
def _find_lib(data):
"""Find mirge libs"""
options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", ""))
if options.find("-lib") > -1 and utils.file_exists(options.split()[1]):
return options
if not options:
logger.warning("miRge libraries not found. Follow these instructions to install them:")
logger.warning("https://github.com/mhalushka/miRge#download-libraries")
logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]")
logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
| 1,259 |
488 | /*
*
* Copyright (c) International Business Machines Corp., 2001
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* NAME
* fcntl17.c
*
* DESCRIPTION
* Check deadlock detection for file locking
*
* ALGORITHM
* The parent forks off 3 children. The parent controls the children
* with messages via pipes to create a delayed deadlock between the
* second and third child.
*
* USAGE
* fcntl17
*
* HISTORY
* 07/2001 Ported by <NAME>
* 04/2002 Minor fixes by <NAME> (testcase name
fcntl05 => fcntl17, check signal return for SIG_ERR)
*
* RESTRICTIONS
* None
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <fcntl.h>
#include <errno.h>
#include <signal.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <inttypes.h>
#include "test.h"
#include "usctest.h"
char *TCID = "fcntl17";
int TST_TOTAL = 1;
extern int Tst_count;
#define STRINGSIZE 27
#define STRING "abcdefghijklmnopqrstuvwxyz\n"
#define STOP 0xFFF0
#define TIME_OUT 10
/* global variables */
int parent_pipe[2];
int child_pipe1[2];
int child_pipe2[2];
int child_pipe3[2];
int file_fd;
pid_t parent_pid, child_pid1, child_pid2, child_pid3;
int child_stat;
struct flock lock1 = { (short)F_WRLCK, (short)0, 2, 5, (short)0 };
struct flock lock2 = { (short)F_WRLCK, (short)0, 9, 5, (short)0 };
struct flock lock3 = { (short)F_WRLCK, (short)0, 17, 5, (short)0 };
struct flock lock4 = { (short)F_WRLCK, (short)0, 17, 5, (short)0 };
struct flock lock5 = { (short)F_WRLCK, (short)0, 2, 14, (short)0 };
struct flock unlock = { (short)F_UNLCK, (short)0, 0, 0, (short)0 };
/* prototype declarations */
int setup();
void cleanup();
int parent_wait();
void parent_free();
void child_wait();
void child_free();
void do_child1();
void do_child2();
void do_child3();
int do_test(struct flock *, pid_t);
void stop_children();
void catch_child();
void catch_alarm();
char *str_type();
int setup()
{
char *buf = STRING;
char template[PATH_MAX];
struct sigaction act;
tst_sig(FORK, DEF_HANDLER, NULL); /* capture signals */
umask(0);
TEST_PAUSE; /* Pause if that option is specified */
tst_tmpdir(); /* make temp dir and cd to it */
if (pipe(parent_pipe) < 0) {
tst_resm(TFAIL, "Couldn't create parent_pipe! errno = %d",
errno);
return 1;
}
if (pipe(child_pipe1) < 0) {
tst_resm(TFAIL, "Couldn't create child_pipe1! errno = %d",
errno);
return 1;
}
if (pipe(child_pipe2) < 0) {
tst_resm(TFAIL, "Couldn't create child_pipe2! errno = %d",
errno);
return 1;
}
if (pipe(child_pipe3) < 0) {
tst_resm(TFAIL, "Couldn't create child_pipe3! errno = %d",
errno);
return 1;
}
parent_pid = getpid();
snprintf(template, PATH_MAX, "fcntl17XXXXXX");
if ((file_fd = mkstemp(template)) < 0) {
tst_resm(TFAIL, "Couldn't open temp file! errno = %d", errno);
}
if (write(file_fd, buf, STRINGSIZE) < 0) {
tst_resm(TFAIL, "Couldn't write to temp file! errno = %d",
errno);
}
memset(&act, 0, sizeof(act));
act.sa_handler = catch_alarm;
sigemptyset(&act.sa_mask);
sigaddset(&act.sa_mask, SIGALRM);
if (sigaction(SIGALRM, &act, NULL) < 0) {
tst_resm(TFAIL, "SIGALRM signal setup failed, errno: %d",
errno);
return 1;
}
memset(&act, 0, sizeof(act));
act.sa_handler = catch_child;
sigemptyset(&act.sa_mask);
sigaddset(&act.sa_mask, SIGCLD);
if (sigaction(SIGCLD, &act, NULL) < 0) {
tst_resm(TFAIL, "SIGCLD signal setup failed, errno: %d", errno);
return 1;
}
return 0;
}
void cleanup()
{
close(file_fd);
tst_rmdir();
tst_exit();
}
void do_child1()
{
int err;
close(parent_pipe[0]);
close(child_pipe1[1]);
close(child_pipe2[0]);
close(child_pipe2[1]);
close(child_pipe3[0]);
close(child_pipe3[1]);
child_wait(child_pipe1[0]);
tst_resm(TINFO, "child 1 starting");
if (fcntl(file_fd, F_SETLK, &lock1) < 0) {
err = errno;
tst_resm(TINFO, "child 1 lock err %d", err);
parent_free(err);
} else {
tst_resm(TINFO, "child 1 pid %d locked", getpid());
parent_free(0);
}
child_wait(child_pipe1[0]);
tst_resm(TINFO, "child 1 resuming");
fcntl(file_fd, F_SETLK, &unlock);
tst_resm(TINFO, "child 1 unlocked");
child_wait(child_pipe1[0]);
tst_resm(TINFO, "child 1 exiting");
exit(1);
}
void do_child2()
{
int err;
close(parent_pipe[0]);
close(child_pipe1[0]);
close(child_pipe1[1]);
close(child_pipe2[1]);
close(child_pipe3[0]);
close(child_pipe3[1]);
child_wait(child_pipe2[0]);
tst_resm(TINFO, "child 2 starting");
if (fcntl(file_fd, F_SETLK, &lock2) < 0) {
err = errno;
tst_resm(TINFO, "child 2 lock err %d", err);
parent_free(err);
} else {
tst_resm(TINFO, "child 2 pid %d locked", getpid());
parent_free(0);
}
child_wait(child_pipe2[0]);
tst_resm(TINFO, "child 2 resuming");
if (fcntl(file_fd, F_SETLKW, &lock4) < 0) {
err = errno;
tst_resm(TINFO, "child 2 lockw err %d", err);
parent_free(err);
} else {
tst_resm(TINFO, "child 2 lockw locked");
parent_free(0);
}
child_wait(child_pipe2[0]);
tst_resm(TINFO, "child 2 exiting");
exit(1);
}
void do_child3()
{
int err;
close(parent_pipe[0]);
close(child_pipe1[0]);
close(child_pipe1[1]);
close(child_pipe2[0]);
close(child_pipe2[1]);
close(child_pipe3[1]);
child_wait(child_pipe3[0]);
tst_resm(TINFO, "child 3 starting");
if (fcntl(file_fd, F_SETLK, &lock3) < 0) {
err = errno;
tst_resm(TINFO, "child 3 lock err %d", err);
parent_free(err);
} else {
tst_resm(TINFO, "child 3 pid %d locked", getpid());
parent_free(0);
}
child_wait(child_pipe3[0]);
tst_resm(TINFO, "child 3 resuming");
if (fcntl(file_fd, F_SETLKW, &lock5) < 0) {
err = errno;
tst_resm(TINFO, "child 3 lockw err %d", err);
parent_free(err);
} else {
tst_resm(TINFO, "child 3 lockw locked");
parent_free(0);
}
child_wait(child_pipe3[0]);
tst_resm(TINFO, "child 3 exiting");
exit(1);
}
int do_test(struct flock *lock, pid_t pid)
{
struct flock fl;
fl.l_type = /* lock->l_type */ F_RDLCK;
fl.l_whence = lock->l_whence;
fl.l_start = lock->l_start;
fl.l_len = lock->l_len;
fl.l_pid = (short)0;
if (fcntl(file_fd, F_GETLK, &fl) < 0) {
tst_resm(TFAIL, "fcntl on file failed, errno =%d", errno);
return 1;
}
if (fl.l_type != lock->l_type) {
tst_resm(TFAIL, "lock type is wrong should be %s is %s",
str_type(lock->l_type), str_type(fl.l_type));
return 1;
}
if (fl.l_whence != lock->l_whence) {
tst_resm(TFAIL, "lock whence is wrong should be %d is %d",
lock->l_whence, fl.l_whence);
return 1;
}
if (fl.l_start != lock->l_start) {
tst_resm(TFAIL, "region starts in wrong place, "
"should be %"PRId64" is %"PRId64, (int64_t)lock->l_start, (int64_t)fl.l_start);
return 1;
}
if (fl.l_len != lock->l_len) {
tst_resm(TFAIL, "region length is wrong, should be %"PRId64" is %"PRId64,
(int64_t)lock->l_len, (int64_t)fl.l_len);
return 1;
}
if (fl.l_pid != pid) {
tst_resm(TFAIL, "locking pid is wrong, should be %d is %d",
pid, fl.l_pid);
return 1;
}
return 0;
}
char *str_type(int type)
{
static char buf[20];
switch (type) {
case F_RDLCK:
return ("F_RDLCK");
case F_WRLCK:
return ("F_WRLCK");
case F_UNLCK:
return ("F_UNLCK");
default:
sprintf(buf, "BAD VALUE: %d", type);
return (buf);
}
}
void parent_free(int arg)
{
if (write(parent_pipe[1], &arg, sizeof(arg)) != sizeof(arg)) {
tst_resm(TFAIL, "couldn't send message to parent");
exit(1);
}
}
int parent_wait()
{
int arg;
if (read(parent_pipe[0], &arg, sizeof(arg)) != sizeof(arg)) {
tst_resm(TFAIL, "parent_wait() failed");
return (errno);
}
return (arg);
}
void child_free(int fd, int arg)
{
if (write(fd, &arg, sizeof(arg)) != sizeof(arg)) {
tst_resm(TFAIL, "couldn't send message to child");
exit(1);
}
}
void child_wait(int fd)
{
int arg;
if (read(fd, &arg, sizeof(arg)) != sizeof(arg)) {
tst_resm(TFAIL, "couldn't get message from parent");
exit(1);
} else if (arg == (short)STOP) {
exit(0);
}
}
void stop_children()
{
int arg;
(void)signal(SIGCLD, (void (*)())SIG_DFL);
arg = STOP;
child_free(child_pipe1[1], arg);
child_free(child_pipe2[1], arg);
child_free(child_pipe3[1], arg);
wait(0);
}
void catch_child()
{
tst_resm(TFAIL, "Unexpected death of child process");
cleanup();
/*NOTREACHED*/}
void catch_alarm()
{
sighold(SIGCHLD);
/*
* Timer has runout and the children have not detected the deadlock.
* Need to kill the kids and exit
*/
if (child_pid1 != 0 && (kill(child_pid1, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child 1 failed.");
}
if (child_pid2 != 0 && (kill(child_pid2, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child 2 failed.");
}
if (child_pid3 != 0 && (kill(child_pid3, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child 2 failed.");
}
tst_resm(TFAIL, "Alarm expired, deadlock not detected");
tst_resm(TWARN, "You may need to kill child processes by hand");
cleanup();
/*NOTREACHED*/}
int main(int ac, char **av)
{
int ans;
int lc; /* loop counter */
char *msg; /* message returned from parse_opts */
int fail = 0;
/* parse standard options */
if ((msg = parse_opts(ac, av, (option_t *) NULL, NULL)) != (char *)NULL) {
tst_brkm(TBROK, cleanup, "OPTION PARSING ERROR - %s", msg);
}
#ifdef UCLINUX
maybe_run_child(&do_child1, "nddddddddd", 1, &file_fd,
&parent_pipe[0], &parent_pipe[1],
&child_pipe1[0], &child_pipe1[1],
&child_pipe2[0], &child_pipe2[1],
&child_pipe3[0], &child_pipe3[1]);
maybe_run_child(&do_child2, "nddddddddd", 2, &file_fd,
&parent_pipe[0], &parent_pipe[1],
&child_pipe1[0], &child_pipe1[1],
&child_pipe2[0], &child_pipe2[1],
&child_pipe3[0], &child_pipe3[1]);
maybe_run_child(&do_child3, "nddddddddd", 3, &file_fd,
&parent_pipe[0], &parent_pipe[1],
&child_pipe1[0], &child_pipe1[1],
&child_pipe2[0], &child_pipe2[1],
&child_pipe3[0], &child_pipe3[1]);
#endif
if (setup()) { /* global testup */
tst_resm(TINFO, "setup failed");
cleanup();
/*NOTREACHED*/}
/* check for looping state if -i option is given */
for (lc = 0; TEST_LOOPING(lc); lc++) {
/* reset Tst_count in case we are looping */
Tst_count = 0;
tst_resm(TINFO, "Enter preparation phase");
if ((child_pid1 = FORK_OR_VFORK()) == 0) { /* first child */
#ifdef UCLINUX
if (self_exec(av[0], "nddddddddd", 1, file_fd,
parent_pipe[0], parent_pipe[1],
child_pipe1[0], child_pipe1[1],
child_pipe2[0], child_pipe2[1],
child_pipe3[0], child_pipe3[1]) < 0) {
perror("self_exec failed, child 1");
cleanup();
}
#else
do_child1();
#endif
} else if (child_pid1 < 0) {
perror("Fork failed: child 1");
cleanup();
/*NOTREACHED*/}
/* parent */
if ((child_pid2 = fork()) == 0) { /* second child */
#ifdef UCLINUX
if (self_exec(av[0], "nddddddddd", 2, file_fd,
parent_pipe[0], parent_pipe[1],
child_pipe1[0], child_pipe1[1],
child_pipe2[0], child_pipe2[1],
child_pipe3[0], child_pipe3[1]) < 0) {
perror("self_exec failed, child 2");
cleanup();
}
#else
do_child2();
#endif
} else if (child_pid2 < 0) {
perror("Fork failed: child 2");
if ((kill(child_pid1, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child "
"1 failed");
}
cleanup();
/*NOTREACHED*/}
/* parent */
if ((child_pid3 = fork()) == 0) { /* third child */
#ifdef UCLINUX
if (self_exec(av[0], "nddddddddd", 3, file_fd,
parent_pipe[0], parent_pipe[1],
child_pipe1[0], child_pipe1[1],
child_pipe2[0], child_pipe2[1],
child_pipe3[0], child_pipe3[1]) < 0) {
perror("self_exec failed, child 3");
cleanup();
}
#else
do_child3();
#endif
do_child3();
} else if (child_pid3 < 0) {
perror("Fork failed: child 3");
if ((kill(child_pid1, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child "
"1 failed");
}
if ((kill(child_pid2, SIGKILL)) < 0) {
tst_resm(TFAIL, "Attempt to signal child 2 "
"failed");
}
cleanup();
/*NOTREACHED*/}
/* parent */
close(parent_pipe[1]);
close(child_pipe1[0]);
close(child_pipe2[0]);
close(child_pipe3[0]);
tst_resm(TINFO, "Exit preparation phase");
/* //block1: */
tst_resm(TINFO, "Enter block 1");
fail = 0;
/*
* child 1 puts first lock (bytes 2-7)
*/
child_free(child_pipe1[1], 0);
if (parent_wait()) {
tst_resm(TFAIL, "didn't set first child's lock, "
"errno: %d", errno);
}
if (do_test(&lock1, child_pid1)) {
tst_resm(TINFO, "do_test failed child 1");
fail = 1;
}
/*
* child 2 puts second lock (bytes 9-14)
*/
child_free(child_pipe2[1], 0);
if (parent_wait()) {
tst_resm(TINFO, "didn't set second child's lock, "
"errno: %d", errno);
fail = 1;
}
if (do_test(&lock2, child_pid2)) {
tst_resm(TINFO, "do_test failed child 2");
fail = 1;
}
/*
* child 3 puts third lock (bytes 17-22)
*/
child_free(child_pipe3[1], 0);
if (parent_wait()) {
tst_resm(TFAIL, "didn't set third child's lock, "
"errno: %d", errno);
fail = 1;
}
if (do_test(&lock3, child_pid3)) {
tst_resm(TINFO, "do_test failed child 3");
fail = 1;
}
/*
* child 2 tries to lock same range as
* child 3's first lock.
*/
child_free(child_pipe2[1], 0);
/*
* child 3 tries to lock same range as
* child 1 and child 2's first locks.
*/
child_free(child_pipe3[1], 0);
/*
* Tell child 1 to release its lock. This should cause a
* delayed deadlock between child 2 and child 3.
*/
child_free(child_pipe1[1], 0);
/*
* Setup an alarm to go off in case the deadlock is not
* detected
*/
alarm(TIME_OUT);
/*
* should get a message from child 3 telling that its
* second lock EDEADLOCK
*/
if ((ans = parent_wait()) != EDEADLK) {
tst_resm(TFAIL, "child 2 didn't deadlock, "
"returned: %d", ans);
fail = 1;
}
/*
* Double check that lock 2 and lock 3 are still right
*/
do_test(&lock2, child_pid2);
do_test(&lock3, child_pid3);
stop_children();
if (fail) {
tst_resm(TINFO, "Block 1 FAILED");
} else {
tst_resm(TINFO, "Block 1 PASSED");
}
tst_resm(TINFO, "Exit block 1");
}
waitpid(child_pid1, &child_stat, 0);
waitpid(child_pid2, &child_stat, 0);
waitpid(child_pid3, &child_stat, 0);
cleanup();
return 0;
}
| 6,724 |
310 | <reponame>dreeves/usesthis<filename>gear/hardware/p/potenza.json
{
"name": "Potenza",
"description": "A gamer's desktop PC.",
"url": "https://www.maingear.com/custom/desktops/potenza/"
} | 76 |
396 | <gh_stars>100-1000
/*
Copyright 2018 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS-IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef RESONANCE_AUDIO_BASE_SIMD_UTILS_H_
#define RESONANCE_AUDIO_BASE_SIMD_UTILS_H_
#include <cstddef>
#include <cstdint>
namespace vraudio {
// Checks if the pointer provided is correctly aligned for SIMD.
//
// @param pointer Pointer to check.
// @return True if the pointer is correctly aligned.
bool IsAligned(const float* pointer);
bool IsAligned(const int16_t* pointer);
// Rounds a number of frames up to the next aligned memory address
// based on |memory_alignment_bytes|. This allows for aligning offset pointers
// into a single chunk of allocated memory.
//
// @param length Number of samples before the desired offset pointer.
// @param type_size_bytes Size of the type of each entry in the array.
// @param memory_alignment_bytes Number of bytes to which an address is aligned.
// @return Number of samples into the memory chunk to ensure aligned memory.
size_t FindNextAlignedArrayIndex(size_t length, size_t type_size_bytes,
size_t memory_alignment_bytes);
// Adds a float array |input_a| to another float array |input_b| and stores the
// result in |output|.
//
// @param length Number of floats.
// @param input_a Pointer to the first float in input_a array.
// @param input_b Pointer to the first float in input_b array.
// @param output Pointer to the first float in output array.
void AddPointwise(size_t length, const float* input_a, const float* input_b,
float* output);
// Subtracts a float array |input|, pointwise from another float array |output|.
//
// @param length Number of floats.
// @param input Pointer to the first float in input_a array.
// @param output Pointer to the first float in input_b array.
// @param output Pointer to the first float in output array.
void SubtractPointwise(size_t length, const float* input_a,
const float* input_b, float* output);
// Pointwise multiplies a float array |input_a| with another float array
// |input_b| and stores the result in |output|.
//
// @param length Number of floats.
// @param input Pointer to the first float in input_a array.
// @param input Pointer to the first float in input_b array.
// @param output Pointer to the first float in output array.
void MultiplyPointwise(size_t length, const float* input_a,
const float* input_b, float* output);
// Pointwise multiplies a float array |input_a| with another float array
// |input_b| and adds the result onto |accumulator|.
//
// @param length Number of floats.
// @param input_a Pointer to the first float in input_a array.
// @param input_b Pointer to the first float in input_b array.
// @param accumulator Pointer to the first float in accumulator array.
void MultiplyAndAccumulatePointwise(size_t length, const float* input_a,
const float* input_b, float* accumulator);
// Multiplies a float array |input| by a scalar |gain| over |length| samples.
//
// @param length Number of floats.
// @param gain Scalar value with which to multiply the input.
// @param input Pointer to the first float in input array.
// @param output Pointer to the first float in output array.
void ScalarMultiply(size_t length, float gain, const float* input,
float* output);
// Multiplies a float array |input| by a scalar |gain| over |length| samples and
// adds the result onto |accumulator|.
//
// @param length Number of floats.
// @param gain Scalar value with which to multiply the input.
// @param input Pointer to the first float in input array.
// @param output Pointer to the first float in accumulator array.
void ScalarMultiplyAndAccumulate(size_t length, float gain, const float* input,
float* accumulator);
// Calculates an approximmate reciprocal square root.
//
// @param length Number of floats.
// @param input Pointer to the first float in input array.
// @param output Pointer to the first float in output array.
void ReciprocalSqrt(size_t length, const float* input, float* output);
// Calculates an approximate square root.
//
// @param length Number of floats.
// @param input Pointer to the first float in input array.
// @param output Pointer to the first float in output array.
void Sqrt(size_t length, const float* input, float* output);
// Calculates the approximate magnitudes of interleaved complex numbers.
//
// @param length Number of complex numbers in the input array,
// (i.e. half its length).
// @param input Pointer to the first float in input array. Length: 2 * |length|.
// @param output Pointer to the first float in output array, Length: |length|.
void ApproxComplexMagnitude(size_t length, const float* input, float* output);
// Calculates the complex values in interleaved format (real, imaginary), from a
// vector of magnitudes and of sines and cosines of phase.
//
// @param length Number of total entries (real & imaginary) in the input array.
// @param magnitude Pointer to the first float in the magnitude array, Length:
// |length| / 2
// @param cos_phase Pointer to the first float in the cosine phase array,
// Length: |length| / 2
// @param sin_phase Pointer to the first float in the sine phase array, Length:
// |length| / 2
// @param complex_interleaved_format_output Pointer to the first float in the
// output array. Length: |length|.
void ComplexInterleavedFormatFromMagnitudeAndSinCosPhase(
size_t length, const float* magnitude, const float* cos_phase,
const float* sin_phase, float* complex_interleaved_format_output);
// Generates an identical left and right pair of stereo channels from a mono
// input channel, where each channel is the mono channel times 1/sqrt(2).
//
// @param length Number of floats.
// @param mono Pointer to the first float in an input mono array.
// @param left Pointer to the first float in the left output array.
// @param right Pointer to the first float in the right output array.
void StereoFromMonoSimd(size_t length, const float* mono, float* left,
float* right);
// Generates a mono downmix from a pair of stereo channels, where the output is
// equal to the sum of the two inputs times 1/sqrt(2).
//
// @param length Number of floats.
// @param left Pointer to the first float in the left input array.
// @param right Pointer to the first float in the right input array.
// @param mono Pointer to the first float in an output mono array.
void MonoFromStereoSimd(size_t length, const float* left, const float* right,
float* mono);
// Converts an array of 32 bit float input to clamped 16 bit int output.
//
// @param length Number of floats in the input array and int16_ts in the output.
// @param input Float array.
// @param output Int array.
void Int16FromFloat(size_t length, const float* input, int16_t* output);
// Converts an array of 16 bit int input to 32 bit float output.
//
// @param length Number of int16_ts in the input array and floats in the output.
// @param input Int array.
// @param output Float array.
void FloatFromInt16(size_t length, const int16_t* input, float* output);
// Interleaves a pair of mono buffers of int_16 data into a stereo buffer.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param channel_0 Input buffer of mono data for the first channel.
// @param channel_1 Input buffer of mono data for the second channel.
// @param interleaved_buffer Output buffer of stereo interleaved data.
void InterleaveStereo(size_t length, const int16_t* channel_0,
const int16_t* channel_1, int16_t* interleaved_buffer);
// Interleaves a pair of mono buffers of float data into a stereo buffer.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param channel_0 Input buffer of mono data for the first channel.
// @param channel_1 Input buffer of mono data for the second channel.
// @param interleaved_buffer Output buffer of stereo interleaved data.
void InterleaveStereo(size_t length, const float* channel_0,
const float* channel_1, float* interleaved_buffer);
// Interleaves a pair of mono buffers of float data into a stereo buffer of
// int16_t data.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param channel_0 Input buffer of mono data for the first channel (float).
// @param channel_1 Input buffer of mono data for the second channel (float).
// @param interleaved_buffer Output buffer of stereo interleaved data (int16_t).
void InterleaveStereo(size_t length, const float* channel_0,
const float* channel_1, int16_t* interleaved_buffer);
// Deinterleaves a stereo buffer of int16_t data into a pair of mono buffers.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param interleaved_buffer Input buffer of stereo interleaved data.
// @param channel_0 Output buffer of mono data for the first channel.
// @param channel_1 Output buffer of mono data for the second channel.
void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer,
int16_t* channel_0, int16_t* channel_1);
// Deinterleaves a stereo buffer of float data into a pair of mono buffers.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param interleaved_buffer Input buffer of stereo interleaved data.
// @param channel_0 Output buffer of mono data for the first channel.
// @param channel_1 Output buffer of mono data for the second channel.
void DeinterleaveStereo(size_t length, const float* interleaved_buffer,
float* channel_0, float* channel_1);
// Deinterleaves a stereo buffer of int16_t data into a pair of mono float
// buffers, performing the int16 to floating point conversion.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be twice this size.
// @param interleaved_buffer Input buffer of stereo interleaved data (int16_t).
// @param channel_0 Output buffer of mono data for the first channel (float).
// @param channel_1 Output buffer of mono data for the second channel (float).
void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer,
float* channel_0, float* channel_1);
// Interleaves four mono buffers of int16_t data into a quad buffer.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be four times this size and the workspace must be five times this size.
// @param channel_0 Input buffer of mono data for the first channel.
// @param channel_1 Input buffer of mono data for the second channel.
// @param channel_2 Input buffer of mono data for the third channel.
// @param channel_3 Input buffer of mono data for the fourth channel.
// @param workspace Aligned buffer of 5 * |length| samples in length.
// @param interleaved_buffer Output buffer of quad interleaved data.
void InterleaveQuad(size_t length, const int16_t* channel_0,
const int16_t* channel_1, const int16_t* channel_2,
const int16_t* channel_3, int16_t* workspace,
int16_t* interleaved_buffer);
// Interleaves four mono buffers of float data into a quad buffer.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be four times this size and the workspace must be five times this size.
// @param channel_0 Input buffer of mono data for the first channel.
// @param channel_1 Input buffer of mono data for the second channel.
// @param channel_2 Input buffer of mono data for the third channel.
// @param channel_3 Input buffer of mono data for the fourth channel.
// @param workspace Aligned buffer of 5 * |length| samples in length.
// @param interleaved_buffer Output buffer of quad interleaved data.
void InterleaveQuad(size_t length, const float* channel_0,
const float* channel_1, const float* channel_2,
const float* channel_3, float* workspace,
float* interleaved_buffer);
// Deinterleaves a quad buffer of int16_t data into four mono buffers.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be four times this size and the workspace must be five times this size.
// @param interleaved_buffer Input buffer of quad interleaved data.
// @param workspace Aligned buffer of 5 * |length| samples in length.
// @param channel_0 Output buffer of mono data for the first channel.
// @param channel_1 Output buffer of mono data for the second channel.
// @param channel_2 Output buffer of mono data for the third channel.
// @param channel_3 Output buffer of mono data for the fourth channel.
void DeinterleaveQuad(size_t length, const int16_t* interleaved_buffer,
int16_t* workspace, int16_t* channel_0,
int16_t* channel_1, int16_t* channel_2,
int16_t* channel_3);
// Deinterleaves a quad buffer of float data into four mono buffers.
//
// @param length Number of frames per mono channel. The interleaved buffer must
// be four times this size and the workspace must be five times this size.
// @param interleaved_buffer Input buffer of quad interleaved data.
// @param workspace Aligned buffer of 5 * |length| samples in length.
// @param channel_0 Output buffer of mono data for the first channel.
// @param channel_1 Output buffer of mono data for the second channel.
// @param channel_2 Output buffer of mono data for the third channel.
// @param channel_3 Output buffer of mono data for the fourth channel.
void DeinterleaveQuad(size_t length, const float* interleaved_buffer,
float* workspace, float* channel_0, float* channel_1,
float* channel_2, float* channel_3);
} // namespace vraudio
#endif // RESONANCE_AUDIO_BASE_SIMD_UTILS_H_
| 4,531 |
3,730 | <reponame>jackywyz/jvm.go<filename>test/testclasses/src/main/java/stdlib/net/UrlTest.java
package stdlib.net;
import java.io.InputStream;
import java.net.URL;
public class UrlTest {
public static void main(String[] args) throws Exception {
URL url = new URL("http://cn.bing.com");
try (InputStream is = url.openStream()) {
byte[] bytes = new byte[is.available()];
int n = is.read(bytes);
System.out.println(new String(bytes));
}
System.out.println("OK!");
}
}
| 231 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Erquinvillers","circ":"1ère circonscription","dpt":"Oise","inscrits":144,"abs":79,"votants":65,"blancs":5,"nuls":2,"exp":58,"res":[{"nuance":"LR","nom":"M. <NAME>","voix":37},{"nuance":"REM","nom":"M. <NAME>","voix":21}]} | 112 |
335 | <reponame>Safal08/Hacktoberfest-1<filename>N/News_noun.json
{
"word": "News",
"definitions": [
"Newly received or noteworthy information, especially about recent events.",
"A broadcast or published report of news.",
"Information not previously known to (someone)",
"A person or thing considered interesting enough to be reported in the news."
],
"parts-of-speech": "Noun"
} | 148 |
2,151 | <filename>services/core/java/com/android/server/NativeDaemonEvent.java
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.server;
import android.util.Slog;
import com.google.android.collect.Lists;
import java.io.FileDescriptor;
import java.util.ArrayList;
/**
* Parsed event from native side of {@link NativeDaemonConnector}.
*/
public class NativeDaemonEvent {
// TODO: keep class ranges in sync with ResponseCode.h
// TODO: swap client and server error ranges to roughly mirror HTTP spec
private final int mCmdNumber;
private final int mCode;
private final String mMessage;
private final String mRawEvent;
private final String mLogMessage;
private String[] mParsed;
private FileDescriptor[] mFdList;
private NativeDaemonEvent(int cmdNumber, int code, String message,
String rawEvent, String logMessage, FileDescriptor[] fdList) {
mCmdNumber = cmdNumber;
mCode = code;
mMessage = message;
mRawEvent = rawEvent;
mLogMessage = logMessage;
mParsed = null;
mFdList = fdList;
}
static public final String SENSITIVE_MARKER = "{{sensitive}}";
public int getCmdNumber() {
return mCmdNumber;
}
public int getCode() {
return mCode;
}
public String getMessage() {
return mMessage;
}
public FileDescriptor[] getFileDescriptors() {
return mFdList;
}
@Deprecated
public String getRawEvent() {
return mRawEvent;
}
@Override
public String toString() {
return mLogMessage;
}
/**
* Test if event represents a partial response which is continued in
* additional subsequent events.
*/
public boolean isClassContinue() {
return mCode >= 100 && mCode < 200;
}
/**
* Test if event represents a command success.
*/
public boolean isClassOk() {
return mCode >= 200 && mCode < 300;
}
/**
* Test if event represents a remote native daemon error.
*/
public boolean isClassServerError() {
return mCode >= 400 && mCode < 500;
}
/**
* Test if event represents a command syntax or argument error.
*/
public boolean isClassClientError() {
return mCode >= 500 && mCode < 600;
}
/**
* Test if event represents an unsolicited event from native daemon.
*/
public boolean isClassUnsolicited() {
return isClassUnsolicited(mCode);
}
private static boolean isClassUnsolicited(int code) {
return code >= 600 && code < 700;
}
/**
* Verify this event matches the given code.
*
* @throws IllegalStateException if {@link #getCode()} doesn't match.
*/
public void checkCode(int code) {
if (mCode != code) {
throw new IllegalStateException("Expected " + code + " but was: " + this);
}
}
/**
* Parse the given raw event into {@link NativeDaemonEvent} instance.
*
* @throws IllegalArgumentException when line doesn't match format expected
* from native side.
*/
public static NativeDaemonEvent parseRawEvent(String rawEvent, FileDescriptor[] fdList) {
final String[] parsed = rawEvent.split(" ");
if (parsed.length < 2) {
throw new IllegalArgumentException("Insufficient arguments");
}
int skiplength = 0;
final int code;
try {
code = Integer.parseInt(parsed[0]);
skiplength = parsed[0].length() + 1;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("problem parsing code", e);
}
int cmdNumber = -1;
if (isClassUnsolicited(code) == false) {
if (parsed.length < 3) {
throw new IllegalArgumentException("Insufficient arguemnts");
}
try {
cmdNumber = Integer.parseInt(parsed[1]);
skiplength += parsed[1].length() + 1;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("problem parsing cmdNumber", e);
}
}
String logMessage = rawEvent;
if (parsed.length > 2 && parsed[2].equals(SENSITIVE_MARKER)) {
skiplength += parsed[2].length() + 1;
logMessage = parsed[0] + " " + parsed[1] + " {}";
}
final String message = rawEvent.substring(skiplength);
return new NativeDaemonEvent(cmdNumber, code, message, rawEvent, logMessage, fdList);
}
/**
* Filter the given {@link NativeDaemonEvent} list, returning
* {@link #getMessage()} for any events matching the requested code.
*/
public static String[] filterMessageList(NativeDaemonEvent[] events, int matchCode) {
final ArrayList<String> result = Lists.newArrayList();
for (NativeDaemonEvent event : events) {
if (event.getCode() == matchCode) {
result.add(event.getMessage());
}
}
return result.toArray(new String[result.size()]);
}
/**
* Find the Nth field of the event.
*
* This ignores and code or cmdNum, the first return value is given for N=0.
* Also understands "\"quoted\" multiword responses" and tries them as a single field
*/
public String getField(int n) {
if (mParsed == null) {
mParsed = unescapeArgs(mRawEvent);
}
n += 2; // skip code and command#
if (n > mParsed.length) return null;
return mParsed[n];
}
public static String[] unescapeArgs(String rawEvent) {
final boolean DEBUG_ROUTINE = false;
final String LOGTAG = "unescapeArgs";
final ArrayList<String> parsed = new ArrayList<String>();
final int length = rawEvent.length();
int current = 0;
int wordEnd = -1;
boolean quoted = false;
if (DEBUG_ROUTINE) Slog.e(LOGTAG, "parsing '" + rawEvent + "'");
if (rawEvent.charAt(current) == '\"') {
quoted = true;
current++;
}
while (current < length) {
// find the end of the word
char terminator = quoted ? '\"' : ' ';
wordEnd = current;
while (wordEnd < length && rawEvent.charAt(wordEnd) != terminator) {
if (rawEvent.charAt(wordEnd) == '\\') {
// skip the escaped char
++wordEnd;
}
++wordEnd;
}
if (wordEnd > length) wordEnd = length;
String word = rawEvent.substring(current, wordEnd);
current += word.length();
if (!quoted) {
word = word.trim();
} else {
current++; // skip the trailing quote
}
// unescape stuff within the word
word = word.replace("\\\\", "\\");
word = word.replace("\\\"", "\"");
if (DEBUG_ROUTINE) Slog.e(LOGTAG, "found '" + word + "'");
parsed.add(word);
// find the beginning of the next word - either of these options
int nextSpace = rawEvent.indexOf(' ', current);
int nextQuote = rawEvent.indexOf(" \"", current);
if (DEBUG_ROUTINE) {
Slog.e(LOGTAG, "nextSpace=" + nextSpace + ", nextQuote=" + nextQuote);
}
if (nextQuote > -1 && nextQuote <= nextSpace) {
quoted = true;
current = nextQuote + 2;
} else {
quoted = false;
if (nextSpace > -1) {
current = nextSpace + 1;
}
} // else we just start the next word after the current and read til the end
if (DEBUG_ROUTINE) {
Slog.e(LOGTAG, "next loop - current=" + current +
", length=" + length + ", quoted=" + quoted);
}
}
return parsed.toArray(new String[parsed.size()]);
}
}
| 3,687 |
10,225 | <gh_stars>1000+
package io.quarkus.funqy.runtime.bindings.knative.events;
import io.quarkus.funqy.runtime.FunqyServerResponse;
import io.smallrye.mutiny.Uni;
public class FunqyResponseImpl implements FunqyServerResponse {
protected Uni<?> output;
@Override
public Uni<?> getOutput() {
return output;
}
@Override
public void setOutput(Uni<?> output) {
this.output = output;
}
}
| 168 |
357 | /**
* This file is part of Eclipse Steady.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 SAP SE or an SAP affiliate company and Eclipse Steady contributors
*/
package org.eclipse.steady.backend.repo;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import javax.persistence.EntityNotFoundException;
import javax.persistence.PersistenceException;
import javax.validation.constraints.NotNull;
import org.eclipse.steady.backend.model.AffectedConstructChange;
import org.eclipse.steady.backend.model.AffectedLibrary;
import org.eclipse.steady.backend.model.Application;
import org.eclipse.steady.backend.model.Bug;
import org.eclipse.steady.backend.model.ConstructChange;
import org.eclipse.steady.backend.model.ConstructChangeInDependency;
import org.eclipse.steady.backend.model.ConstructId;
import org.eclipse.steady.backend.model.Dependency;
import org.eclipse.steady.backend.model.GoalExecution;
import org.eclipse.steady.backend.model.Library;
import org.eclipse.steady.backend.model.LibraryId;
import org.eclipse.steady.backend.model.Path;
import org.eclipse.steady.backend.model.Space;
import org.eclipse.steady.backend.model.TouchPoint;
import org.eclipse.steady.backend.model.Trace;
import org.eclipse.steady.backend.model.VulnerableDependency;
import org.eclipse.steady.backend.util.ReferenceUpdater;
import org.eclipse.steady.shared.enums.ConstructType;
import org.eclipse.steady.shared.enums.PathSource;
import org.eclipse.steady.shared.enums.VulnDepOrigin;
import org.eclipse.steady.shared.json.JacksonUtil;
import org.eclipse.steady.shared.json.model.ExemptionSet;
import org.eclipse.steady.shared.json.model.IExemption;
import org.eclipse.steady.shared.util.StopWatch;
import org.eclipse.steady.shared.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.annotation.Transactional;
import com.google.common.collect.Lists;
/**
* <p>ApplicationRepositoryImpl class.</p>
*/
public class ApplicationRepositoryImpl implements ApplicationRepositoryCustom {
private static Logger log = LoggerFactory.getLogger(ApplicationRepositoryImpl.class);
@Autowired ApplicationRepository appRepository;
@Autowired LibraryRepository libRepository;
@Autowired LibraryIdRepository libIdRepository;
@Autowired BugRepository bugRepository;
@Autowired SpaceRepository spaceRepository;
@Autowired TenantRepository tenantRepository;
@Autowired AffectedLibraryRepository affLibRepository;
@Autowired DependencyRepository depRepository;
@Autowired TracesRepository traceRepository;
@Autowired PathRepository pathRepository;
@Autowired GoalExecutionRepository gexeRepository;
@Autowired V_AppVulndepRepository v_appVulnDepRepository;
@Autowired ReferenceUpdater refUpdater;
/** {@inheritDoc} */
@Transactional
public Application customSave(Application _app) {
final StopWatch sw = new StopWatch("Save app " + _app).start();
final String group = _app.getMvnGroup();
final String artifact = _app.getArtifact();
final String version = _app.getVersion();
// Does it already exist?
Application managed_app = null;
try {
managed_app =
ApplicationRepository.FILTER.findOne(
this.appRepository.findByGAV(group, artifact, version, _app.getSpace()));
managed_app.setModifiedAt(Calendar.getInstance());
_app = this.updateDependencies(managed_app, _app);
sw.lap("Updated refs to nested deps of existing application' dependencies");
} catch (EntityNotFoundException e1) {
// if the application does not exist, we create an empty one so that we can later add the
// dependencies incrementally
Application new_app = new Application(group, artifact, version);
new_app.setSpace(_app.getSpace());
managed_app = this.appRepository.save(new_app);
sw.lap(
"Saved new empty application ["
+ _app
+ "], next steps will be to save constructs and dependencies");
}
_app.setId(managed_app.getId());
_app.setCreatedAt(managed_app.getCreatedAt());
_app.setModifiedAt(managed_app.getModifiedAt());
_app.setLastScan(managed_app.getLastScan());
_app.setLastVulnChange(managed_app.getLastVulnChange());
// Update refs to independent entities
_app.setConstructs(refUpdater.saveNestedConstructIds(_app.getConstructs()));
sw.lap("Updated refs to nested constructs");
_app = this.updateLibraries(_app);
sw.lap("Updated refs to nested libs");
_app.orderDependenciesByDepth();
_app = this.saveDependencyTree(_app);
sw.lap("Saved and updated refs to dependencies' (including parents)");
// Save (if existing, saves the updated fields, otherwise it should all already be there)
try {
managed_app = this.appRepository.save(_app);
sw.stop();
} catch (Exception e) {
sw.stop(e);
throw new PersistenceException(
"Error while saving application " + _app + ": " + e.getMessage());
}
return managed_app;
}
/**
* <p>Saves the dependency tree ensuring that no transient entities are used for parent dependencies. This is done by setting the parent with
* its already saved entity. Note that after the save operation the entity is still just handled by Hibernate and may not be persisted in the database yet.
* Still the handling of Hibernate ensures that once persisted the id will be set correctly linking the parent to its entity).</p>
*
* @return a {@link org.eclipse.steady.backend.model.Application} object.
*/
private Application saveDependencyTree(@NotNull Application _app) {
for (Dependency d : _app.getDependencies()) {
if (d.getParent() != null) {
d.setParent(getManagedParent(d.getParent(), _app));
}
d = this.depRepository.save(d);
}
return _app;
}
/**
* <p> Given a dependency parent <code>_dep</code>, it retrieves the dependency within the plain list of all dependencies of the application <code>_app</code>.
* Because of how dependencies are saved, this operation amounts to retrieve the managed dependency (with id) corresponding to the provided one <code>_dep</code>.</p>
*
* @return a {@link org.eclipse.steady.backend.model.Dependency} object.
*/
private Dependency getManagedParent(Dependency _dep, Application _app) {
if (_dep.getParent() != null) _dep.setParent(getManagedParent(_dep.getParent(), _app));
for (Dependency d : _app.getDependencies()) {
if (d.equalLibParentRelPath(_dep)) return d;
}
throw new PersistenceException(
"Error while saving parent dependency on lib "
+ _dep.getLib()
+ "] of application "
+ _app
+ ": parent does not exist in application collection");
}
/**
* <p>Updates the provided dependencies of the provided application with managed ones of the managed application if the same dependency exists in both.</p>
*
* @return a {@link org.eclipse.steady.backend.model.Application} object.
*/
private Application updateDependencies(
@NotNull Application _managed_app, @NotNull Application _provided_app) {
for (Dependency provided_dep : _provided_app.getDependencies()) {
for (Dependency managed_dep : _managed_app.getDependencies()) {
// if a dependency with the same lib parent and relPath exists, we update it
// with the newly provided values of transitive, declared, etc.
// and we keep the previously computed values for traces, reachable constructs, etc.
if (provided_dep.equalLibParentRelPath(managed_dep)) {
provided_dep.setId(managed_dep.getId());
// as they have the same parent, we have to take also the existing parent to get its db id
provided_dep.setParent(managed_dep.getParent());
provided_dep.setTraced(managed_dep.getTraced());
provided_dep.setReachableConstructIds(managed_dep.getReachableConstructIds());
provided_dep.setTouchPoints((Set<TouchPoint>) managed_dep.getTouchPoints());
provided_dep.setTraces(managed_dep.getTraces());
break;
}
// if no dependency with the same lib parent and relPath exists, we do not look for a
// library with same lib only but we consider it a new dependency
// here we check whether it has a parent to see whether it already exists among the managed
// entities
// else if(provided_dep.getParent()!=null){
// updateParents(provided_dep,_managed_app);
// }
}
}
return _provided_app;
}
private Application updateLibraries(@NotNull Application _provided_app)
throws PersistenceException {
for (Dependency d : _provided_app.getDependencies()) {
updateLibrary(d, _provided_app);
}
return _provided_app;
}
private void updateLibrary(Dependency _dep, Application _provided_app) {
if (_dep.getParent() != null) {
updateLibrary(_dep.getParent(), _provided_app);
}
_dep.setLib(findManagedLibrary(_dep, _provided_app));
}
private Library findManagedLibrary(Dependency _dep, Application _provided_app) {
Library managed_lib = null;
try {
managed_lib =
LibraryRepository.FILTER.findOne(
this.libRepository.findByDigest(_dep.getLib().getDigest()));
} catch (EntityNotFoundException e) {
ApplicationRepositoryImpl.log.error(
"EntityNotFoundException while finding dependency library ["
+ _dep.getLib().getDigest()
+ "] of application "
+ _provided_app
+ ": "
+ e.getMessage());
managed_lib = this.libRepository.saveIncomplete(_dep.getLib());
} catch (Exception e) {
throw new PersistenceException(
"Error while saving dependency on lib "
+ _dep.getLib()
+ "] of application "
+ _provided_app
+ ": "
+ e.getMessage());
}
return managed_lib;
}
/** {@inheritDoc} */
public void updateFlags(TreeSet<VulnerableDependency> _vd_list, Boolean _withChangeList) {
for (VulnerableDependency vd : _vd_list) {
this.updateFlags(vd, _withChangeList);
}
}
/**
* {@inheritDoc}
*
* Calls {@link VulnerableDependency#setTraced(int)} and {@link VulnerableDependency#setReachable(int)} depending
* on the following conditions: If the {@link Bug} has a change list, it is checked whether vulnerable constructs
* were traced during dynamic analysis or found reachable during static analysis. If it does not have a change
* list, it is checked whether any of the library's constructs has been traced or found reachable.
*/
@Override
public void updateFlags(VulnerableDependency _vd, Boolean _withChangeList) {
// 1) Bugs WITH change list
if (_withChangeList) {
_vd.setVulnDepOrigin(VulnDepOrigin.CC);
// 1.1) Dynamic analysis
final List<Trace> trace_list =
traceRepository.findVulnerableTracesOfLibraries(
_vd.getDep().getApp(), _vd.getDep().getLib(), _vd.getBugId());
if (trace_list.size() > 0) _vd.setTraced(1);
else _vd.setTraced(0);
// 1.2) Static analysis
final List<Path> path_list =
pathRepository.findPathsForLibraryBug(
_vd.getDep().getApp(), _vd.getDep().getLib(), _vd.getBugId());
boolean reachable = false;
for (Path p : path_list) {
if (p.getSource() != PathSource.X2C) {
reachable = true;
break;
}
}
if (reachable) _vd.setReachable(1);
else _vd.setReachable(0);
}
// 2) Bugs WITHOUT change list
else {
_vd.setVulnDepOrigin(VulnDepOrigin.AFFLIBID);
// 2.1) Dynamic analysis
if (_vd.getDep().isTraced()) _vd.setTraced(1);
else _vd.setTraced(0);
// 2.2) Static analysis
final Collection<ConstructId> reachable_constructs = _vd.getDep().getReachableConstructIds();
if (reachable_constructs != null && reachable_constructs.size() > 0) _vd.setReachable(1);
else _vd.setReachable(0);
}
}
/**
* {@inheritDoc}
*
* Returns a {@link SortedSet} of all {@link Application}s of the given {@link Space}.
*/
@Override
public SortedSet<Application> getApplications(
boolean _skip_empty, String _space, long _asOfTimestamp) {
final StopWatch w = new StopWatch("Read all apps of space [" + _space + "]").start();
final SortedSet<Application> sorted_apps = new TreeSet<Application>();
List<Application> result = null;
if (_skip_empty) result = this.appRepository.findNonEmptyApps(_space, _asOfTimestamp);
else result = this.appRepository.findAllApps(_space, _asOfTimestamp);
w.stop();
sorted_apps.addAll(result);
return sorted_apps;
}
/** {@inheritDoc} */
@Override
public VulnerableDependency getVulnerableDependencyBugDetails(
Application a,
String digest,
String bugid,
VulnDepOrigin origin,
String bundledDigest,
String bundledGroup,
String bundledArtifact,
String bundledVersion)
throws EntityNotFoundException {
// Bug
final Bug bug = BugRepository.FILTER.findOne(bugRepository.findByBugId(bugid));
this.bugRepository.updateCachedCveData(bug, false);
// Create the vuln dependency (flags to be set afterwards)
VulnerableDependency vd =
new VulnerableDependency(
DependencyRepository.FILTER.findOne(depRepository.findByAppAndLib(a, digest)), bug);
// Required for setting the flags for all elements of the change list (if any)
List<Trace> trace_list = new ArrayList<Trace>();
List<Path> path_list = new ArrayList<Path>();
List<ConstructId> cidList = new ArrayList<ConstructId>();
List<AffectedConstructChange> aff_ccList = new ArrayList<AffectedConstructChange>();
// set affected flag and trace.reachable (if applicable)
if (origin.equals(VulnDepOrigin.CC) || origin.equals(VulnDepOrigin.AFFLIBID)) {
this.affLibRepository.computeAffectedLib(vd, vd.getDep().getLib());
if (vd.getBug().countConstructChanges() > 0) this.updateFlags(vd, true);
else this.updateFlags(vd, false);
// Required for setting the flags for all elements of the change list (if any)
trace_list =
traceRepository.findVulnerableTracesOfLibraries(
vd.getDep().getApp(), vd.getDep().getLib(), vd.getBugId());
path_list =
pathRepository.findPathsForLibraryBug(
vd.getDep().getApp(), vd.getDep().getLib(), vd.getBugId());
cidList =
libRepository.findBuggyConstructIds(
vd.getDep().getLib().getDigest(), vd.getBug().getBugId());
aff_ccList = affLibRepository.findByBugAndLib(vd.getBug(), vd.getDep().getLib());
} else if (origin.equals(VulnDepOrigin.BUNDLEDCC)) {
Library library =
LibraryRepository.FILTER.findOne(this.libRepository.findByDigest(bundledDigest));
this.affLibRepository.computeAffectedLib(vd, library);
vd.setVulnDepOrigin(VulnDepOrigin.BUNDLEDCC);
vd.setBundledLib(library);
cidList = libRepository.findBuggyConstructIds(library.getDigest(), vd.getBug().getBugId());
aff_ccList = affLibRepository.findByBugAndLib(vd.getBug(), library);
} else if (origin.equals(VulnDepOrigin.BUNDLEDAFFLIBID)) {
LibraryId libraryId =
LibraryIdRepository.FILTER.findOne(
this.libIdRepository.findBySecondaryKey(
bundledGroup, bundledArtifact, bundledVersion));
Boolean affected =
this.affLibRepository.isBugLibIdAffected(vd.getBug().getBugId(), libraryId);
vd.setAffectedVersion(affected ? 1 : 0);
vd.setAffectedVersionConfirmed(1);
vd.setVulnDepOrigin(VulnDepOrigin.BUNDLEDAFFLIBID);
vd.setBundledLibId(libraryId);
}
List<ConstructChangeInDependency> constructsList = new ArrayList<ConstructChangeInDependency>();
for (ConstructChange c : vd.getBug().getConstructChanges()) {
if (c.getConstructId().getType() == ConstructType.CONS
|| c.getConstructId().getType() == ConstructType.METH
|| c.getConstructId().getType() == ConstructType.INIT) {
ConstructChangeInDependency ccd = new ConstructChangeInDependency(c);
Boolean found = false;
for (Trace t : trace_list) {
if (t.getConstructId().equals(c.getConstructId())) {
found = true;
ccd.setTraced(true);
ccd.setTrace(t);
break;
}
}
if (!found && vd.getTracedConfirmed() == 1) ccd.setTraced(false);
found = false;
// TODO also store the paths?
for (Path p : path_list) {
if (p.getEndConstructId().equals(c.getConstructId())) {
// if(p.getEndConstructId().equals(c.getConstructId())||p.getStartConstructId().equals(c.getConstructId()))
if (p.getSource() != PathSource.X2C) {
found = true;
ccd.setReachable(true);
break;
}
}
}
if (!found && vd.getReachableConfirmed() == 1) ccd.setReachable(false);
if (cidList.contains(c.getConstructId())) {
ccd.setInArchive(true);
} else ccd.setInArchive(false);
for (AffectedConstructChange aff_cc : aff_ccList) {
if (aff_cc.getCc().equals(c)) {
ccd.setAffected(aff_cc.getAffected());
ccd.setClassInArchive(aff_cc.getClassInArchive());
ccd.setEqualChangeType(aff_cc.getEqualChangeType());
ccd.setOverall_change(aff_cc.getOverall_chg());
if (ccd.getInArchive() != aff_cc.getInArchive()) {
log.warn(
"Conclusion of construct In archive from backend is "
+ ccd.getInArchive()
+ " while vulas:check-version concluded "
+ aff_cc.getInArchive());
}
}
}
constructsList.add(ccd);
} else { // if (c.getConstructId().getType()== ConstructType.CLAS){
ConstructChangeInDependency ccd = new ConstructChangeInDependency(c);
if (cidList.contains(c.getConstructId())) {
ccd.setInArchive(true);
} else ccd.setInArchive(false);
constructsList.add(ccd);
}
}
vd.setConstructList(constructsList);
return vd;
}
/**
* {@inheritDoc}
*
* Deletes all the traces and paths collected during the various analyses.
* If requested, also deletes the goal history.
*/
@Override
public void deleteAnalysisResults(Application _app, boolean _clean_goal_history) {
// Delete traces
this.traceRepository.deleteAllTracesForApp(_app);
// Delete all paths
List<Path> paths = this.pathRepository.findByApp(_app);
for (Path p : paths) {
p.setPath(null);
this.pathRepository.save(p);
}
this.pathRepository.deleteAllPathsForApp(_app);
// Delete goal execution history
if (_clean_goal_history) {
for (GoalExecution gexe : this.gexeRepository.findByApp(_app))
this.gexeRepository.delete(gexe);
}
}
/**
* {@inheritDoc}
*
* Finds all @{VulnerableDependency}s for a given {@link Space} and {@link Application}.
*/
@Override
@Transactional(readOnly = true)
public TreeSet<VulnerableDependency> findAppVulnerableDependencies(
Application _app, boolean _add_exemption_info, boolean _log) {
final StopWatch sw = new StopWatch("Query vulnerable dependencies for application " + _app);
if (_log) sw.start();
// 1) Join over construct changes (origin=cc)
final TreeSet<VulnerableDependency> vd_list_cc =
this.appRepository.findJPQLVulnerableDependenciesByGAV(
_app.getMvnGroup(), _app.getArtifact(), _app.getVersion(), _app.getSpace());
if (_log) sw.lap("Found [" + vd_list_cc.size() + "] through joining constructs");
// to improve performances we use a native query to get the affected version (having moved to
// SQL the computation of the affected version source having priority.
// further improvements could be:
// embedding the native query into the JPQL one to only get the bugs according to the requested
// flags, e.g., only affected or only historical ones.
this.affLibRepository.computeAffectedLib(vd_list_cc);
this.updateFlags(vd_list_cc, true);
// 2) Retrieve vuln w/ cc for bundled libids
TreeSet<VulnerableDependency> vd_list_bundled_cc = new TreeSet<VulnerableDependency>();
List<Object[]> bundledDigests = this.libRepository.findBundledLibByApp(_app);
if (_log) sw.lap("Found [" + bundledDigests.size() + "] libs digest for bundled libids.");
for (Object[] e : bundledDigests) {
Dependency depWithBundledLibId =
this.depRepository.findById(((BigInteger) e[0]).longValue()).orElse(null);
Library bundledDigest =
this.libRepository.findById(((BigInteger) e[1]).longValue()).orElse(null);
List<Bug> vulns_cc = this.bugRepository.findByLibrary(bundledDigest);
for (Bug b : vulns_cc) {
VulnerableDependency vulndep = new VulnerableDependency(depWithBundledLibId, b);
vulndep.setVulnDepOrigin(VulnDepOrigin.BUNDLEDCC);
this.affLibRepository.computeAffectedLib(vulndep, bundledDigest);
vulndep.setBundledLibId(bundledDigest.getLibraryId());
vulndep.setBundledLib(bundledDigest);
vd_list_bundled_cc.add(vulndep);
}
// for bundled libraries we do have have traced and reachable information so we skip the query
// this.updateFlags(vd_list_bundled_cc, true);
}
// 3) Retrieve vuln w/o cc for bundled libids
TreeSet<VulnerableDependency> vd_list_bundled_av = new TreeSet<VulnerableDependency>();
List<Object[]> bundledLibIds = this.libIdRepository.findBundledLibIdByApp(_app);
for (Object[] e : bundledLibIds) {
Dependency depWithBundledLibId =
this.depRepository.findById(((BigInteger) e[0]).longValue()).orElse(null);
LibraryId bundledLibId =
this.libIdRepository.findById(((BigInteger) e[1]).longValue()).orElse(null);
List<Bug> vulns_av_true = this.bugRepository.findByLibId(bundledLibId, true);
for (Bug b : vulns_av_true) {
VulnerableDependency vulndep = new VulnerableDependency(depWithBundledLibId, b);
vulndep.setVulnDepOrigin(VulnDepOrigin.BUNDLEDAFFLIBID);
Boolean rebundlingAffected =
this.affLibRepository.isBugLibAffected(
b.getBugId(), depWithBundledLibId.getLib().getDigest());
if (rebundlingAffected == null)
rebundlingAffected =
this.affLibRepository.isBugLibIdAffected(
b.getBugId(), depWithBundledLibId.getLib().getLibraryId());
if (rebundlingAffected != null && !rebundlingAffected) vulndep.setAffectedVersion(0);
else vulndep.setAffectedVersion(1);
vulndep.setAffectedVersionConfirmed(1);
vulndep.setBundledLibId(bundledLibId);
vd_list_bundled_av.add(vulndep);
}
List<Bug> vulns_av_false = this.bugRepository.findByLibId(bundledLibId, false);
for (Bug b : vulns_av_false) {
VulnerableDependency vulndep = new VulnerableDependency(depWithBundledLibId, b);
vulndep.setVulnDepOrigin(VulnDepOrigin.BUNDLEDAFFLIBID);
vulndep.setAffectedVersion(0);
vulndep.setAffectedVersionConfirmed(1);
vulndep.setBundledLibId(bundledLibId);
vd_list_bundled_av.add(vulndep);
}
// for bundled libraries we do have have traced and reachable information so we skip the query
// this.updateFlags(vd_list_bundled_cc, true);
}
if (_log) {
sw.lap("Found [" + vd_list_bundled_cc.size() + "] vulns w/ cc through bundled library ids");
sw.lap("Found [" + vd_list_bundled_av.size() + "] vulns w/o cc through bundled library ids");
}
// 4) Join over libids to identify vulnerable dependencies based on affectedLibraries that were
// created based on the GAV
final TreeSet<VulnerableDependency> vd_list_libid =
this.appRepository.findJPQLVulnerableDependenciesByGAVAndAffVersion(
_app.getMvnGroup(), _app.getArtifact(), _app.getVersion(), _app.getSpace());
// compute affected flag (note that the computation keeps into account the existance of
// affectedLibs defined for the library digest)
this.affLibRepository.computeAffectedLib(vd_list_libid);
if (_log) sw.lap("Found [" + vd_list_libid.size() + "] through joining libids");
// this.affLibRepository.computeAffectedLib(vd_list_libid); this is not required for vulns w/o
// cc as we select by affectedVersions.affected (in this way we save the additional queries to
// get the affected flag afterwards)
this.updateFlags(vd_list_libid, false);
// Merge bugs found joining constructs and libids
final TreeSet<VulnerableDependency> vd_all = new TreeSet<VulnerableDependency>();
vd_all.addAll(
vd_list_cc); // this must be done before the add for bundled vulndeps to ensure that we do
// not consider as bundledcc the cases where the code signature was not
// altered.
vd_all.addAll(vd_list_bundled_cc);
vd_all.addAll(vd_list_bundled_av);
vd_all.addAll(
vd_list_libid); // this must be added after the vd_list_bundled_av, to ensure that we get
// the info that a pair dep,bug comes out of something rebundled, though we
// overwrite it as FP at the level of the rebundling artifact
// Read exemption info from configuration and enrich vuln dep
if (_add_exemption_info) {
final GoalExecution latest = this.gexeRepository.findLatestGoalExecution(_app, null);
if (latest != null) {
final ExemptionSet exempts = ExemptionSet.createFromMap(latest.getConfigurationMap());
for (VulnerableDependency vd : vd_all) {
org.eclipse.steady.shared.json.model.VulnerableDependency svd =
(org.eclipse.steady.shared.json.model.VulnerableDependency)
JacksonUtil.fromTo(
vd,
null,
org.eclipse.steady.backend.model.view.Views.class,
org.eclipse.steady.shared.json.model.VulnerableDependency.class);
final IExemption exempt = exempts.getApplicableExemption(svd);
if (exempt != null) {
vd.setExemption(exempt);
}
}
}
}
if (_log) sw.stop();
return vd_all;
}
// Following stmts were meant to read configuration settings from database
// Fast select
/*final List<Property> properties = this.gexeRepository.findExecutionConfiguration(_app, PropertySource.GOAL_CONFIG, EXCLUDED_SCOPES);
if(properties!=null && properties.size()==1) {
final String[] values = properties.get(0).getPropertyValueAsArray();
excluded_scopes.addAll(Scope.fromStringArray(values));
}*/
// Fast select
/*final List<Property> properties = this.gexeRepository.findExecutionConfiguration(_app, PropertySource.GOAL_CONFIG, EXCLUDED_BUGS);
if(properties!=null && properties.size()==1) {
final String[] values = properties.get(0).getPropertyValueAsArray();
for(String v: values) {
final String setting = EXCLUDED_BUGS + "." + v;
final List<Property> texts = this.gexeRepository.findExecutionConfiguration(_app, PropertySource.GOAL_CONFIG, setting);
if(texts!=null && texts.size()==1)
excluded_bugs.put(v.toLowerCase(), texts.get(0).getPropertyValue());
else
excluded_bugs.put(v.toLowerCase(), "No reason provided, please add using configuration parameter [" + setting + "]");
}
}*/
/**
* <p>findAffectedApps.</p>
*
* @param _bugs an array of {@link java.lang.String} objects.
* @return a {@link java.util.HashMap} object.
*/
public HashMap<Long, HashMap<String, Boolean>> findAffectedApps(String[] _bugs) {
final HashMap<Long, HashMap<String, Boolean>> affected_apps =
new HashMap<Long, HashMap<String, Boolean>>();
final List<Object[]> affected_apps_raw = this.v_appVulnDepRepository.findAffectedApps(_bugs);
if (affected_apps_raw != null && affected_apps_raw.size() > 0) {
for (Object[] o : affected_apps_raw) {
try {
final Long app_id = ((BigInteger) o[0]).longValue();
final String bugid = (String) o[1];
final Boolean affected = (Boolean) o[2];
HashMap<String, Boolean> affected_map = null;
if (affected_apps.containsKey(app_id)) {
affected_map = affected_apps.get(app_id);
} else {
affected_map = new HashMap<String, Boolean>();
}
affected_map.put(bugid, affected);
affected_apps.put(app_id, affected_map);
} catch (ClassCastException e) {
log.error(
"Cannot cast ["
+ StringUtil.join(o, ", ")
+ "] of types ["
+ o[0].getClass().getSimpleName()
+ ", "
+ o[1].getClass().getSimpleName()
+ ", "
+ o[2].getClass().getSimpleName()
+ "] to [BigInt, String, Boolean]");
} catch (ArrayIndexOutOfBoundsException e) {
log.error(
"Cannot cast, too few elements in object array, [" + o.length + "] instead of [3]");
}
}
}
return affected_apps;
}
/** {@inheritDoc} */
@Transactional
public void refreshVulnChangebyChangeList(Collection<ConstructChange> _listOfConstructChanges) {
final StopWatch sw = new StopWatch("Started refresh app vulnChange by CC list").start();
List<ConstructId> listOfConstructs = new ArrayList<ConstructId>();
for (ConstructChange cc : _listOfConstructChanges) {
listOfConstructs.add(cc.getConstructId());
}
List<Application> apps = appRepository.findAppsByCC(listOfConstructs);
sw.lap("LastVulnChange by CC, [" + apps.size() + "] apps to be refreshed");
// we partition the list to work around the PostgreSQL's limit of 32767 bind variables per
// statement
for (List<Application> sub : Lists.partition(apps, 30000))
appRepository.updateAppLastVulnChange(sub);
sw.stop();
}
/** {@inheritDoc} */
@Transactional
public void refreshVulnChangebyAffLib(AffectedLibrary _affLib) {
final StopWatch sw = new StopWatch("Started refresh app vulnChange by addLib").start();
List<Application> apps = new ArrayList<Application>();
if (_affLib != null && _affLib.getLibraryId() != null) {
if (_affLib.getAffected() != null)
apps.addAll(appRepository.findAppsByAffLib(_affLib.getLibraryId()));
}
sw.lap("LastVulnChange by AffLib, [" + apps.size() + "] apps to be refreshed");
// we partition the list to work around the PostgreSQL's limit of 32767 bind variables per
// statement
for (List<Application> sub : Lists.partition(apps, 30000))
appRepository.updateAppLastVulnChange(sub);
sw.stop();
}
/** {@inheritDoc} */
public void refreshLastScanbyApp(Application _app) {
_app.setLastScan(Calendar.getInstance());
appRepository.save(_app);
}
}
| 12,443 |
17,275 | <gh_stars>1000+
/*
* The MIT License
*
* Copyright (c) 2004-2009, Sun Microsystems, Inc., <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.slaves;
import edu.umd.cs.findbugs.annotations.CheckForNull;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import hudson.DescriptorExtensionList;
import hudson.Extension;
import hudson.ExtensionPoint;
import hudson.Util;
import hudson.init.InitMilestone;
import hudson.init.Initializer;
import hudson.model.Actionable;
import hudson.model.Computer;
import hudson.model.Describable;
import hudson.model.Descriptor;
import hudson.model.Label;
import hudson.model.Node;
import hudson.model.Slave;
import hudson.security.ACL;
import hudson.security.AccessControlled;
import hudson.security.Permission;
import hudson.security.PermissionScope;
import hudson.slaves.NodeProvisioner.PlannedNode;
import hudson.util.DescriptorList;
import java.util.Collection;
import java.util.Objects;
import java.util.concurrent.Future;
import jenkins.model.Jenkins;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.DoNotUse;
import org.kohsuke.stapler.DataBoundConstructor;
/**
* Creates {@link Node}s to dynamically expand/shrink the agents attached to Hudson.
*
* <p>
* Put another way, this class encapsulates different communication protocols
* needed to start a new agent programmatically.
*
* <h2>Notes for implementers</h2>
* <h3>Automatically delete idle agents</h3>
* Nodes provisioned from a cloud do not automatically get released just because it's created from {@link Cloud}.
* Doing so requires a use of {@link RetentionStrategy}. Instantiate your {@link Slave} subtype with something
* like {@link CloudSlaveRetentionStrategy} so that it gets automatically deleted after some idle time.
*
* <h3>Freeing an external resource when an agent is removed</h3>
* Whether you do auto scale-down or not, you often want to release an external resource tied to a cloud-allocated
* agent when it is removed.
*
* <p>
* To do this, have your {@link Slave} subtype remember the necessary handle (such as EC2 instance ID)
* as a field. Such fields need to survive the user-initiated re-configuration of {@link Slave}, so you'll need to
* expose it in your {@link Slave} {@code configure-entries.jelly} and read it back in through {@link DataBoundConstructor}.
*
* <p>
* You then implement your own {@link Computer} subtype, override {@link Slave#createComputer()}, and instantiate
* your own {@link Computer} subtype with this handle information.
*
* <p>
* Finally, override {@link Computer#onRemoved()} and use the handle to talk to the "cloud" and de-allocate
* the resource (such as shutting down a virtual machine.) {@link Computer} needs to own this handle information
* because by the time this happens, a {@link Slave} object is already long gone.
*
* <h3>Views</h3>
*
* Since version 2.64, Jenkins clouds are visualized in UI. Implementations can provide {@code top} or {@code main} view
* to be presented at the top of the page or at the bottom respectively. In the middle, actions have their {@code summary}
* views displayed. Actions further contribute to {@code sidepanel} with {@code box} views. All mentioned views are
* optional to preserve backward compatibility.
*
* @author <NAME>
* @see NodeProvisioner
* @see AbstractCloudImpl
*/
public abstract class Cloud extends Actionable implements ExtensionPoint, Describable<Cloud>, AccessControlled {
/**
* Uniquely identifies this {@link Cloud} instance among other instances in {@link jenkins.model.Jenkins#clouds}.
*
* This is expected to be short ID-like string that does not contain any character unsafe as variable name or
* URL path token.
*/
public final String name;
protected Cloud(String name) {
this.name = name;
}
@Override
public String getDisplayName() {
return name;
}
/**
* Get URL of the cloud.
*
* @since 2.64
* @return Jenkins relative URL.
*/
public @NonNull String getUrl() {
return "cloud/" + name;
}
@Override
public @NonNull String getSearchUrl() {
return getUrl();
}
@Override
public ACL getACL() {
return Jenkins.get().getAuthorizationStrategy().getACL(this);
}
/**
* Provisions new {@link Node}s from this cloud.
*
* <p>
* {@link NodeProvisioner} performs a trend analysis on the load,
* and when it determines that it <b>really</b> needs to bring up
* additional nodes, this method is invoked.
*
* <p>
* The implementation of this method asynchronously starts
* node provisioning.
*
* @param label
* The label that indicates what kind of nodes are needed now.
* Newly launched node needs to have this label.
* Only those {@link Label}s that this instance returned true
* from the {@link #canProvision(Label)} method will be passed here.
* This parameter is null if Hudson needs to provision a new {@link Node}
* for jobs that don't have any tie to any label.
* @param excessWorkload
* Number of total executors needed to meet the current demand.
* Always ≥ 1. For example, if this is 3, the implementation
* should launch 3 agents with 1 executor each, or 1 agent with
* 3 executors, etc.
* @return
* {@link PlannedNode}s that represent asynchronous {@link Node}
* provisioning operations. Can be empty but must not be null.
* {@link NodeProvisioner} will be responsible for adding the resulting {@link Node}s
* into Hudson via {@link jenkins.model.Jenkins#addNode(Node)}, so a {@link Cloud} implementation
* just needs to return {@link PlannedNode}s that each contain an object that implements {@link Future}.
* When the {@link Future} has completed its work, {@link Future#get} will be called to obtain the
* provisioned {@link Node} object.
* @deprecated Use {@link #provision(CloudState, int)} instead.
*/
@Deprecated
public Collection<PlannedNode> provision(Label label, int excessWorkload) {
return Util.ifOverridden(() -> provision(new CloudState(label, 0), excessWorkload),
Cloud.class,
getClass(),
"provision",
CloudState.class,
int.class);
}
/**
* Provisions new {@link Node}s from this cloud.
*
* <p>
* {@link NodeProvisioner} performs a trend analysis on the load,
* and when it determines that it <b>really</b> needs to bring up
* additional nodes, this method is invoked.
*
* <p>
* The implementation of this method asynchronously starts
* node provisioning.
*
* @param state the current state.
* @param excessWorkload
* Number of total executors needed to meet the current demand.
* Always ≥ 1. For example, if this is 3, the implementation
* should launch 3 agents with 1 executor each, or 1 agent with
* 3 executors, etc.
* @return
* {@link PlannedNode}s that represent asynchronous {@link Node}
* provisioning operations. Can be empty but must not be null.
* {@link NodeProvisioner} will be responsible for adding the resulting {@link Node}s
* into Hudson via {@link jenkins.model.Jenkins#addNode(Node)}, so a {@link Cloud} implementation
* just needs to return {@link PlannedNode}s that each contain an object that implements {@link Future}.
* When the {@link Future} has completed its work, {@link Future#get} will be called to obtain the
* provisioned {@link Node} object.
*/
public Collection<PlannedNode> provision(CloudState state, int excessWorkload) {
return provision(state.getLabel(), excessWorkload);
}
/**
* Returns true if this cloud is capable of provisioning new nodes for the given label.
* @deprecated Use {@link #canProvision(CloudState)} instead.
*/
@Deprecated
public boolean canProvision(Label label) {
return Util.ifOverridden(() -> canProvision(new CloudState(label, 0)),
Cloud.class,
getClass(),
"canProvision",
CloudState.class);
}
/**
* Returns true if this cloud is capable of provisioning new nodes for the given label.
*/
public boolean canProvision(CloudState state) {
return canProvision(state.getLabel());
}
@Override
public Descriptor<Cloud> getDescriptor() {
return Jenkins.get().getDescriptorOrDie(getClass());
}
/**
* All registered {@link Cloud} implementations.
*
* @deprecated as of 1.286
* Use {@link #all()} for read access, and {@link Extension} for registration.
*/
@Deprecated
public static final DescriptorList<Cloud> ALL = new DescriptorList<>(Cloud.class);
/**
* Returns all the registered {@link Cloud} descriptors.
*/
public static DescriptorExtensionList<Cloud,Descriptor<Cloud>> all() {
return Jenkins.get().getDescriptorList(Cloud.class);
}
private static final PermissionScope PERMISSION_SCOPE = new PermissionScope(Cloud.class);
/**
* Permission constant to control mutation operations on {@link Cloud}.
*
* This includes provisioning a new node, as well as removing it.
*/
public static final Permission PROVISION = new Permission(
Computer.PERMISSIONS, "Provision", Messages._Cloud_ProvisionPermission_Description(), Jenkins.ADMINISTER, PERMISSION_SCOPE
);
@SuppressFBWarnings(value = "RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT", justification = "to guard against potential future compiler optimizations")
@Initializer(before = InitMilestone.SYSTEM_CONFIG_LOADED)
@Restricted(DoNotUse.class)
public static void registerPermissions() {
// Pending JENKINS-17200, ensure that the above permissions have been registered prior to
// allowing plugins to adapt the system configuration, which may depend on these permissions
// having been registered. Since this method is static and since it follows the above
// construction of static permission objects (and therefore their calls to
// PermissionGroup#register), there is nothing further to do in this method. We call
// Objects.hash() to guard against potential future compiler optimizations.
Objects.hash(PERMISSION_SCOPE, PROVISION);
}
/**
* Parameter object for {@link hudson.slaves.Cloud}.
* @since 2.259
*/
public static final class CloudState {
/**
* The label under consideration.
*/
@CheckForNull
private final Label label;
/**
* The additional planned capacity for this {@link #label} and provisioned by previous strategies during the
* current updating of the {@link NodeProvisioner}.
*/
private final int additionalPlannedCapacity;
public CloudState(@CheckForNull Label label, int additionalPlannedCapacity) {
this.label = label;
this.additionalPlannedCapacity = additionalPlannedCapacity;
}
/**
* The label under consideration.
*/
@CheckForNull
public Label getLabel() {
return label;
}
/**
* The additional planned capacity for this {@link #getLabel()} and provisioned by previous strategies during
* the current updating of the {@link NodeProvisioner}.
*/
public int getAdditionalPlannedCapacity() {
return additionalPlannedCapacity;
}
}
}
| 4,407 |
1,253 | //
// FILE: DAC8554.cpp
// AUTHOR: <NAME>
// PURPOSE: Arduino library for DAC8554 SPI Digital Analog Convertor
// VERSION: 0.2.4
// URL: https://github.com/RobTillaart/DAC8554
//
// HISTORY:
// 0.1.0: 2017-12-19 initial version
// 0.1.2 2020-04-06 minor refactor, readme.md
// 0.1.3 2020-06-07 fix library.json
// 0.1.4 2020-07-20 fix URL's in demo's; MIT license; minor edits
// 0.2.0 2020-12-18 add Arduino-CI + unit test
// 0.2.1 2021-01-10 fix slave select hardware SPI + getValue() + getPowerDownMode().
// fix unit test.
// 0.2.2 2021-06-02 compile ESP32
// 0.2.3 2021-08-29 add support for HSPI / VSPI ESP32 ++
// 0.2.4 2021-12-15 update library.json, license, unit test, minor edits
#include "DAC8554.h"
#define DAC8554_BUFFER_WRITE 0x00
#define DAC8554_SINGLE_WRITE 0x10
#define DAC8554_ALL_WRITE 0x20
#define DAC8554_BROADCAST 0x30
DAC8554::DAC8554(uint8_t slaveSelect, uint8_t address)
{
_hwSPI = true;
_select = slaveSelect;
_address = (address & 0x03) << 6;
}
DAC8554::DAC8554(uint8_t spiData, uint8_t spiClock, uint8_t slaveSelect, uint8_t address)
{
_hwSPI = false;
_dataOut = spiData;
_clock = spiClock;
_select = slaveSelect;
_address = (address & 0x03) << 6;
}
// initializes the SPI
// and sets internal state
void DAC8554::begin()
{
pinMode(_select, OUTPUT);
digitalWrite(_select, HIGH);
_spi_settings = SPISettings(_SPIspeed, MSBFIRST, SPI_MODE1);
if(_hwSPI)
{
#if defined(ESP32)
if (_useHSPI) // HSPI
{
mySPI = new SPIClass(HSPI);
mySPI->end();
mySPI->begin(14, 12, 13, _select); // CLK=14 MISO=12 MOSI=13
}
else // VSPI
{
mySPI = new SPIClass(VSPI);
mySPI->end();
mySPI->begin(18, 19, 23, _select); // CLK=18 MISO=19 MOSI=23
}
#else // generic hardware SPI
mySPI = &SPI;
mySPI->end();
mySPI->begin();
#endif
delay(1);
}
else // software SPI
{
pinMode(_dataOut, OUTPUT);
pinMode(_clock, OUTPUT);
digitalWrite(_dataOut, LOW);
digitalWrite(_clock, LOW);
}
for (uint8_t i = 0; i < 4; i++)
{
_register[i] = 0;
_value[i] = 0;
}
}
#if defined(ESP32)
void DAC8554::setGPIOpins(uint8_t clk, uint8_t miso, uint8_t mosi, uint8_t select)
{
_clock = clk;
_dataOut = mosi;
_select = select;
pinMode(_select, OUTPUT);
digitalWrite(_select, HIGH);
mySPI->end(); // disable SPI
mySPI->begin(clk, miso, mosi, select);
}
#endif
//////////////////////////////////////////////////////////////////////
//
// SETVALUE
//
// channel = 0, 1, 2, 3
// value = 0..65535
void DAC8554::bufferValue(uint8_t channel, uint16_t value)
{
uint8_t configRegister = _address;
configRegister |= DAC8554_BUFFER_WRITE;
configRegister |= (channel << 1);
writeDevice(configRegister, value);
}
void DAC8554::setValue(uint8_t channel, uint16_t value)
{
_value[channel] = value;
uint8_t configRegister = _address;
configRegister |= DAC8554_ALL_WRITE;
configRegister |= (channel << 1);
writeDevice(configRegister, value);
}
// channel = 0, 1, 2, 3 depending on type
// returns 0..65535
uint16_t DAC8554::getValue(uint8_t channel)
{
return _value[channel];
}
void DAC8554::setSingleValue(uint8_t channel, uint16_t value)
{
_value[channel] = value;
uint8_t configRegister = _address;
configRegister |= DAC8554_SINGLE_WRITE;
configRegister |= (channel << 1);
writeDevice(configRegister, value);
}
//////////////////////////////////////////////////////////////////////
//
// POWERDOWN
//
// channel = 0, 1, 2, 3
// powerDownMode =
// DAC8554_POWERDOWN_NORMAL 0x00
// DAC8554_POWERDOWN_1K 0x40
// DAC8554_POWERDOWN_100K 0x80
// DAC8554_POWERDOWN_HIGH_IMP 0xC0
//
void DAC8554::bufferPowerDown(uint8_t channel, uint8_t powerDownMode)
{
_register[channel] = powerDownMode;
uint8_t configRegister = _address;
configRegister |= DAC8554_BUFFER_WRITE;
configRegister |= (channel << 1);
uint16_t value = (powerDownMode & 0xC0) << 8;
writeDevice(configRegister, value);
}
void DAC8554::setPowerDown(uint8_t channel, uint8_t powerDownMode)
{
_register[channel] = powerDownMode;
uint8_t configRegister = _address;
configRegister |= DAC8554_ALL_WRITE;
configRegister |= (channel << 1);
uint16_t value = (powerDownMode & 0xC0) << 8;
writeDevice(configRegister, value);
}
void DAC8554::setSinglePowerDown(uint8_t channel, uint8_t powerDownMode)
{
_register[channel] = powerDownMode;
uint8_t configRegister = _address;
configRegister |= DAC8554_SINGLE_WRITE;
configRegister |= (channel << 1);
uint16_t value = (powerDownMode & 0xC0) << 8;
writeDevice(configRegister, value);
}
uint8_t DAC8554::getPowerDownMode(uint8_t channel)
{
return _register[channel]; // slightly different than DAC8552 rrrrrr
}
//////////////////////////////////////////////////////////////////////
//
// BROADCAST (addresses all 8554 on "SPI-bus")
//
void DAC8554::broadcastBuffer()
{
uint8_t configRegister = DAC8554_BROADCAST;
uint16_t value = 0;
writeDevice(configRegister, value);
}
void DAC8554::broadcastValue(uint16_t value)
{
uint8_t configRegister = DAC8554_BROADCAST;
configRegister |= 0x04; // TODO magic number
writeDevice(configRegister, value);
}
void DAC8554::broadcastPowerDown(uint8_t powerDownMode)
{
uint8_t configRegister = DAC8554_BROADCAST;
configRegister |= 0x05; // TODO magic number
uint16_t value = (powerDownMode & 0xC0) << 8;
writeDevice(configRegister, value);
}
void DAC8554::setSPIspeed(uint32_t speed)
{
_SPIspeed = speed;
_spi_settings = SPISettings(_SPIspeed, MSBFIRST, SPI_MODE1);
};
//////////////////////////////////////////////////////////////////
//
// PRIVATE
//
void DAC8554::writeDevice(uint8_t configRegister, uint16_t value)
{
digitalWrite(_select, LOW);
if (_hwSPI)
{
mySPI->beginTransaction(_spi_settings);
mySPI->transfer(configRegister);
mySPI->transfer(value >> 8);
mySPI->transfer(value & 0xFF);
mySPI->endTransaction();;
}
else // Software SPI
{
swSPI_transfer(configRegister);
swSPI_transfer(value >> 8);
swSPI_transfer(value & 0xFF);
}
digitalWrite(_select, HIGH);
}
// simple one mode version
void DAC8554::swSPI_transfer(uint8_t value)
{
uint8_t clk = _clock;
uint8_t dao = _dataOut;
for (uint8_t mask = 0x80; mask; mask >>= 1)
{
digitalWrite(dao,(value & mask));
digitalWrite(clk, HIGH);
digitalWrite(clk, LOW);
}
}
// -- END OF FILE --
| 2,694 |
14,325 | <gh_stars>1000+
package org.apache.hadoop.seaweed.ftp.service;
import org.apache.ftpserver.DataConnectionConfiguration;
import org.apache.ftpserver.DataConnectionConfigurationFactory;
import org.apache.ftpserver.FtpServer;
import org.apache.ftpserver.FtpServerFactory;
import org.apache.ftpserver.command.CommandFactoryFactory;
import org.apache.ftpserver.listener.ListenerFactory;
import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsFileSystemManager;
import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsOverFtpSystem;
import org.apache.hadoop.seaweed.ftp.users.HdfsUserManager;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.io.File;
/**
* reference: https://github.com/iponweb/hdfs-over-ftp
*/
@Component
public class HFtpService {
private static Logger log = Logger.getLogger(HFtpService.class);
@Value("${ftp.port}")
private int port = 0;
@Value("${ftp.passive-address}")
private String passiveAddress;
@Value("${ftp.passive-ports}")
private String passivePorts;
@Value("${hdfs.uri}")
private String hdfsUri;
@Value("${seaweedFs.enable}")
private boolean seaweedFsEnable;
@Value("${seaweedFs.access}")
private String seaweedFsAccess;
@Value("${seaweedFs.replication}")
private String seaweedFsReplication;
private FtpServer ftpServer = null;
public void startServer() throws Exception {
log.info("Starting HDFS-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri);
HdfsOverFtpSystem.setHdfsUri(hdfsUri);
HdfsOverFtpSystem.setSeaweedFsEnable(seaweedFsEnable);
HdfsOverFtpSystem.setSeaweedFsAccess(seaweedFsAccess);
HdfsOverFtpSystem.setSeaweedFsReplication(seaweedFsReplication);
FtpServerFactory server = new FtpServerFactory();
server.setFileSystem(new HdfsFileSystemManager());
ListenerFactory factory = new ListenerFactory();
factory.setPort(port);
DataConnectionConfigurationFactory dccFactory = new DataConnectionConfigurationFactory();
dccFactory.setPassiveAddress("0.0.0.0");
dccFactory.setPassivePorts(passivePorts);
dccFactory.setPassiveExternalAddress(passiveAddress);
DataConnectionConfiguration dcc = dccFactory.createDataConnectionConfiguration();
factory.setDataConnectionConfiguration(dcc);
server.addListener("default", factory.createListener());
HdfsUserManager userManager = new HdfsUserManager();
final File file = loadResource("/users.properties");
userManager.setFile(file);
server.setUserManager(userManager);
CommandFactoryFactory cmFact = new CommandFactoryFactory();
cmFact.setUseDefaultCommands(true);
server.setCommandFactory(cmFact.createCommandFactory());
// start the server
ftpServer = server.createServer();
ftpServer.start();
}
public void stopServer() {
log.info("Stopping Hdfs-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri);
ftpServer.stop();
}
public boolean statusServer() {
try {
return !ftpServer.isStopped();
}catch (Exception e) {
return false;
}
}
private static File loadResource(String resourceName) {
return new File(System.getProperty("user.dir") + resourceName);
}
} | 1,124 |
406 | <filename>tests/widgets/tooltip.py
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap.tooltip import ToolTip
app = ttk.Window(size=(400, 100))
b1 = ttk.Button(app, text="default tooltip")
b1.pack(side=LEFT, padx=10, pady=10, fill=X, expand=YES)
ToolTip(
b1,
text="This is the default tooltip. This yellow background and black foreground will be applied by default.",
)
b2 = ttk.Button(app, text="styled tooltip")
b2.pack(side=LEFT, padx=10, pady=10, fill=X, expand=YES)
ToolTip(
b2,
text="This is a styled tooltip. You can change this style by using the `bootstyle` parameter with label style keywords.",
bootstyle="danger-inverse",
)
app.mainloop()
| 252 |
443 | """Tests for utils."""
import unittest
import torch
from chemicalx.utils import segment_softmax
class TestPipeline(unittest.TestCase):
"""Test the utils."""
def test_segment_softmax(self):
"""Set up the test case with some data."""
logit = torch.FloatTensor([-0.5, -2.5, 0.5, 1.5])
number_of_segments = torch.LongTensor([2])
segmentation_index = torch.LongTensor([0, 0, 1, 1])
index = torch.LongTensor([0, 1, 2, 3])
temperature = torch.LongTensor([2, 2, 2, 2])
truth = torch.FloatTensor([0.7311, 0.2689, 0.3775, 0.6225])
segment_s = segment_softmax(logit, number_of_segments, segmentation_index, index, temperature)
difference = torch.sum(torch.abs(truth - segment_s))
assert difference < 0.001
| 335 |
1,199 | <filename>2009/eblib/tracer.py
#-------------------------------------------------------------------------------
# tracer.py
#
# A decorator for tracing function calls.
# Runs with both Python 2 and 3.
#
# Loosely based on:
# - http://paulbutler.org/archives/python-debugging-with-decorators/
#
# <NAME> (<EMAIL>)
# This code is in the public domain
# Last modified: August 2012
#-------------------------------------------------------------------------------
import sys
from functools import wraps
class TraceCalls(object):
""" Use as a decorator on functions that should be traced. Several
functions can be decorated - they will all be indented according
to their call depth.
"""
def __init__(self, stream=sys.stdout, indent_step=2, show_ret=False):
self.stream = stream
self.indent_step = indent_step
self.show_ret = show_ret
# This is a class attribute since we want to share the indentation
# level between different traced functions, in case they call
# each other.
TraceCalls.cur_indent = 0
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
indent = ' ' * TraceCalls.cur_indent
argstr = ', '.join(
[repr(a) for a in args] +
["%s=%s" % (a, repr(b)) for a, b in kwargs.items()])
self.stream.write('%s%s(%s)\n' % (indent, fn.__name__, argstr))
TraceCalls.cur_indent += self.indent_step
ret = fn(*args, **kwargs)
TraceCalls.cur_indent -= self.indent_step
if self.show_ret:
self.stream.write('%s--> %s\n' % (indent, ret))
return ret
return wrapper
#------------------------------------------------------------------------------
if __name__ == '__main__':
@TraceCalls()
def iseven(n):
return True if n == 0 else isodd(n - 1)
@TraceCalls()
def isodd(n):
return False if n == 0 else iseven(n - 1)
print(iseven(7))
@TraceCalls(indent_step=4, show_ret=True)
def flatten(lst):
if isinstance(lst, list):
return sum((flatten(item) for item in lst), [])
else:
return [lst]
list(flatten([1, 2, [3, [4, 5], 6, [7, [9], 12]], 4, [6, 9]]))
| 951 |
513 | void paranthesis(vector<string> &s,string temp,int a,int n){
if(a>n)
return;
if(a==0){
while(n--)
temp+=")";
s.push_back(temp);
return;
}
temp+='(';
paranthesis(s,temp,a-1,n);
temp[temp.length()-1]=')';
paranthesis(s,temp,a,n-1);
}
vector<string> Solution::generateParenthesis(int A) {
vector<string> s;
string temp="";
paranthesis(s,temp,A,A);
return s;
}
| 199 |
2,260 | // Autogenerated by gameplay-luagen
#ifndef LUA_SCRIPT_H_
#define LUA_SCRIPT_H_
namespace gameplay
{
void luaRegister_Script();
}
#endif
| 58 |
4,537 | <reponame>zagrev/mesos
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __PROVISIONER_APPC_PATHS_HPP__
#define __PROVISIONER_APPC_PATHS_HPP__
#include <string>
#include <stout/hashmap.hpp>
#include <stout/try.hpp>
#include <mesos/appc/spec.hpp>
namespace mesos {
namespace internal {
namespace slave {
namespace appc {
namespace paths {
// The appc store file system layout is as follows:
//
// <store_dir> ('--appc_store_dir' flag)
// |--staging (contains temp directories for staging downloads)
// |
// |--images (stores validated images)
// |--<image_id> (in the form of "sha512-<128_character_hash_sum>")
// |--manifest
// |--rootfs
// |--... (according to the ACI spec)
//
// TODO(xujyan): The staging directory is unused for now (it's
// externally managed) but implemented to illustrate the need for a
// separate 'images' directory. Complete the layout diagram when the
// staging directory is utilized by the provisioner.
std::string getStagingDir(const std::string& storeDir);
std::string getImagesDir(const std::string& storeDir);
std::string getImagePath(
const std::string& storeDir,
const std::string& imageId);
std::string getImageRootfsPath(
const std::string& storeDir,
const std::string& imageId);
std::string getImageManifestPath(
const std::string& storeDir,
const std::string& imageId);
} // namespace paths {
} // namespace appc {
} // namespace slave {
} // namespace internal {
} // namespace mesos {
#endif // __PROVISIONER_APPC_PATHS_HPP__
| 701 |
14,668 | <filename>ash/quick_pair/message_stream/message_stream_lookup_impl.h<gh_stars>1000+
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_QUICK_PAIR_MESSAGE_STREAM_MESSAGE_STREAM_LOOKUP_IMPL_H_
#define ASH_QUICK_PAIR_MESSAGE_STREAM_MESSAGE_STREAM_LOOKUP_IMPL_H_
#include "ash/quick_pair/message_stream/message_stream_lookup.h"
#include <string>
#include "ash/quick_pair/message_stream/message_stream.h"
#include "base/containers/flat_map.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/scoped_observation.h"
#include "device/bluetooth/bluetooth_adapter.h"
namespace device {
class BluetoothDevice;
class BluetoothSocket;
} // namespace device
namespace ash {
namespace quick_pair {
class MessageStreamLookupImpl : public MessageStreamLookup,
public device::BluetoothAdapter::Observer {
public:
MessageStreamLookupImpl();
MessageStreamLookupImpl(const MessageStreamLookupImpl&) = delete;
MessageStreamLookupImpl& operator=(const MessageStreamLookupImpl&) = delete;
~MessageStreamLookupImpl() override;
void AddObserver(MessageStreamLookup::Observer* observer) override;
void RemoveObserver(MessageStreamLookup::Observer* observer) override;
MessageStream* GetMessageStream(const std::string& device_address) override;
private:
// device::BluetoothAdapter::Observer
void DeviceConnectedStateChanged(device::BluetoothAdapter* adapter,
device::BluetoothDevice* device,
bool is_now_connected) override;
void DeviceRemoved(device::BluetoothAdapter* adapter,
device::BluetoothDevice* device) override;
// Helper functions to create and remove message stream objects and open and
// close RFCOMM channels based on whether the device is connected or
// disconnected from the adapter.
void CreateMessageStream(device::BluetoothDevice* device);
void RemoveMessageStream(const std::string& device_address);
// Create RFCOMM connection callbacks.
void OnConnected(std::string device_address,
scoped_refptr<device::BluetoothSocket> socket);
void OnConnectError(const std::string& error_message);
// Internal method called by BluetoothAdapterFactory to provide the adapter
// object.
void OnGetAdapter(scoped_refptr<device::BluetoothAdapter> adapter);
base::ObserverList<MessageStreamLookup::Observer> observers_;
base::flat_map<std::string, std::unique_ptr<MessageStream>> message_streams_;
scoped_refptr<device::BluetoothAdapter> adapter_;
base::ScopedObservation<device::BluetoothAdapter,
device::BluetoothAdapter::Observer>
adapter_observation_{this};
base::WeakPtrFactory<MessageStreamLookupImpl> weak_ptr_factory_{this};
};
} // namespace quick_pair
} // namespace ash
#endif // ASH_QUICK_PAIR_MESSAGE_STREAM_MESSAGE_STREAM_LOOKUP_IMPL_H_
| 1,048 |
777 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "core/input/MouseEventManager.h"
#include "core/clipboard/DataObject.h"
#include "core/clipboard/DataTransfer.h"
#include "core/dom/Element.h"
#include "core/dom/ElementTraversal.h"
#include "core/dom/TaskRunnerHelper.h"
#include "core/editing/FrameSelection.h"
#include "core/editing/SelectionController.h"
#include "core/events/DragEvent.h"
#include "core/events/MouseEvent.h"
#include "core/frame/FrameView.h"
#include "core/frame/LocalFrame.h"
#include "core/frame/Settings.h"
#include "core/html/HTMLCanvasElement.h"
#include "core/input/EventHandler.h"
#include "core/input/EventHandlingUtil.h"
#include "core/input/InputDeviceCapabilities.h"
#include "core/input/KeyboardEventManager.h"
#include "core/layout/HitTestResult.h"
#include "core/layout/api/LayoutViewItem.h"
#include "core/page/AutoscrollController.h"
#include "core/page/DragController.h"
#include "core/page/DragState.h"
#include "core/page/FocusController.h"
#include "core/paint/PaintLayer.h"
#include "core/svg/SVGDocumentExtensions.h"
#include "platform/geometry/FloatQuad.h"
namespace blink {
namespace {
PlatformMouseEvent mouseEventWithRegion(Node* node,
const PlatformMouseEvent& mouseEvent) {
if (!node->isElementNode())
return mouseEvent;
Element* element = toElement(node);
if (!element->isInCanvasSubtree())
return mouseEvent;
HTMLCanvasElement* canvas =
Traversal<HTMLCanvasElement>::firstAncestorOrSelf(*element);
// In this case, the event target is canvas and mouse rerouting doesn't
// happen.
if (canvas == element)
return mouseEvent;
String region = canvas->getIdFromControl(element);
PlatformMouseEvent newMouseEvent = mouseEvent;
newMouseEvent.setRegion(region);
return newMouseEvent;
}
// The amount of time to wait before sending a fake mouse event triggered
// during a scroll.
const double kFakeMouseMoveInterval = 0.1;
// TODO(crbug.com/653490): Read these values from the OS.
#if OS(MACOSX)
const int kDragThresholdX = 3;
const int kDragThresholdY = 3;
const TimeDelta kTextDragDelay = TimeDelta::FromSecondsD(0.15);
#else
const int kDragThresholdX = 4;
const int kDragThresholdY = 4;
const TimeDelta kTextDragDelay = TimeDelta::FromSecondsD(0.0);
#endif
} // namespace
enum class DragInitiator { Mouse, Touch };
MouseEventManager::MouseEventManager(LocalFrame& frame,
ScrollManager& scrollManager)
: m_frame(frame),
m_scrollManager(scrollManager),
m_fakeMouseMoveEventTimer(
TaskRunnerHelper::get(TaskType::UserInteraction, &frame),
this,
&MouseEventManager::fakeMouseMoveEventTimerFired) {
clear();
}
void MouseEventManager::clear() {
m_nodeUnderMouse = nullptr;
m_mousePressNode = nullptr;
m_mouseDownMayStartAutoscroll = false;
m_mouseDownMayStartDrag = false;
m_capturesDragging = false;
m_isMousePositionUnknown = true;
m_lastKnownMousePosition = IntPoint();
m_lastKnownMouseGlobalPosition = IntPoint();
m_mousePressed = false;
m_clickCount = 0;
m_clickNode = nullptr;
m_mouseDownPos = IntPoint();
m_mouseDownTimestamp = TimeTicks();
m_mouseDown = PlatformMouseEvent();
m_svgPan = false;
m_dragStartPos = LayoutPoint();
m_fakeMouseMoveEventTimer.stop();
}
MouseEventManager::~MouseEventManager() = default;
DEFINE_TRACE(MouseEventManager) {
visitor->trace(m_frame);
visitor->trace(m_scrollManager);
visitor->trace(m_nodeUnderMouse);
visitor->trace(m_mousePressNode);
visitor->trace(m_clickNode);
SynchronousMutationObserver::trace(visitor);
}
MouseEventManager::MouseEventBoundaryEventDispatcher::
MouseEventBoundaryEventDispatcher(
MouseEventManager* mouseEventManager,
const PlatformMouseEvent* platformMouseEvent,
EventTarget* exitedTarget)
: m_mouseEventManager(mouseEventManager),
m_platformMouseEvent(platformMouseEvent),
m_exitedTarget(exitedTarget) {}
void MouseEventManager::MouseEventBoundaryEventDispatcher::dispatchOut(
EventTarget* target,
EventTarget* relatedTarget) {
dispatch(
target, relatedTarget, EventTypeNames::mouseout,
mouseEventWithRegion(m_exitedTarget->toNode(), *m_platformMouseEvent),
false);
}
void MouseEventManager::MouseEventBoundaryEventDispatcher::dispatchOver(
EventTarget* target,
EventTarget* relatedTarget) {
dispatch(target, relatedTarget, EventTypeNames::mouseover,
*m_platformMouseEvent, false);
}
void MouseEventManager::MouseEventBoundaryEventDispatcher::dispatchLeave(
EventTarget* target,
EventTarget* relatedTarget,
bool checkForListener) {
dispatch(
target, relatedTarget, EventTypeNames::mouseleave,
mouseEventWithRegion(m_exitedTarget->toNode(), *m_platformMouseEvent),
checkForListener);
}
void MouseEventManager::MouseEventBoundaryEventDispatcher::dispatchEnter(
EventTarget* target,
EventTarget* relatedTarget,
bool checkForListener) {
dispatch(target, relatedTarget, EventTypeNames::mouseenter,
*m_platformMouseEvent, checkForListener);
}
AtomicString
MouseEventManager::MouseEventBoundaryEventDispatcher::getLeaveEvent() {
return EventTypeNames::mouseleave;
}
AtomicString
MouseEventManager::MouseEventBoundaryEventDispatcher::getEnterEvent() {
return EventTypeNames::mouseenter;
}
void MouseEventManager::MouseEventBoundaryEventDispatcher::dispatch(
EventTarget* target,
EventTarget* relatedTarget,
const AtomicString& type,
const PlatformMouseEvent& platformMouseEvent,
bool checkForListener) {
m_mouseEventManager->dispatchMouseEvent(target, type, platformMouseEvent,
relatedTarget, checkForListener);
}
void MouseEventManager::sendBoundaryEvents(
EventTarget* exitedTarget,
EventTarget* enteredTarget,
const PlatformMouseEvent& mousePlatformEvent) {
MouseEventBoundaryEventDispatcher boundaryEventDispatcher(
this, &mousePlatformEvent, exitedTarget);
boundaryEventDispatcher.sendBoundaryEvents(exitedTarget, enteredTarget);
}
WebInputEventResult MouseEventManager::dispatchMouseEvent(
EventTarget* target,
const AtomicString& mouseEventType,
const PlatformMouseEvent& mouseEvent,
EventTarget* relatedTarget,
bool checkForListener) {
if (target && target->toNode() &&
(!checkForListener || target->hasEventListeners(mouseEventType))) {
Node* targetNode = target->toNode();
int clickCount = 0;
if (mouseEventType == EventTypeNames::mouseup ||
mouseEventType == EventTypeNames::mousedown ||
mouseEventType == EventTypeNames::click ||
mouseEventType == EventTypeNames::auxclick ||
mouseEventType == EventTypeNames::dblclick) {
clickCount = m_clickCount;
}
MouseEvent* event = MouseEvent::create(
mouseEventType, targetNode->document().domWindow(), mouseEvent,
clickCount, relatedTarget ? relatedTarget->toNode() : nullptr);
DispatchEventResult dispatchResult = target->dispatchEvent(event);
return EventHandlingUtil::toWebInputEventResult(dispatchResult);
}
return WebInputEventResult::NotHandled;
}
WebInputEventResult MouseEventManager::setMousePositionAndDispatchMouseEvent(
Node* targetNode,
const AtomicString& eventType,
const PlatformMouseEvent& platformMouseEvent) {
// If the target node is a text node, dispatch on the parent node.
if (targetNode && targetNode->isTextNode())
targetNode = FlatTreeTraversal::parent(*targetNode);
setNodeUnderMouse(targetNode, platformMouseEvent);
return dispatchMouseEvent(m_nodeUnderMouse, eventType, platformMouseEvent,
nullptr);
}
WebInputEventResult MouseEventManager::dispatchMouseClickIfNeeded(
const MouseEventWithHitTestResults& mev) {
// We only prevent click event when the click may cause contextmenu to popup.
// However, we always send auxclick.
bool contextMenuEvent = !RuntimeEnabledFeatures::auxclickEnabled() &&
mev.event().pointerProperties().button ==
WebPointerProperties::Button::Right;
#if OS(MACOSX)
// FIXME: The Mac port achieves the same behavior by checking whether the
// context menu is currently open in WebPage::mouseEvent(). Consider merging
// the implementations.
if (mev.event().pointerProperties().button ==
WebPointerProperties::Button::Left &&
mev.event().getModifiers() & PlatformEvent::CtrlKey)
contextMenuEvent = true;
#endif
WebInputEventResult clickEventResult = WebInputEventResult::NotHandled;
const bool shouldDispatchClickEvent =
m_clickCount > 0 && !contextMenuEvent && mev.innerNode() && m_clickNode &&
mev.innerNode()->canParticipateInFlatTree() &&
m_clickNode->canParticipateInFlatTree() &&
!(m_frame->eventHandler().selectionController().hasExtendedSelection() &&
isLinkSelection(mev));
if (shouldDispatchClickEvent) {
Node* clickTargetNode = nullptr;
// Updates distribution because a 'mouseup' event listener can make the
// tree dirty at dispatchMouseEvent() invocation above.
// Unless distribution is updated, commonAncestor would hit ASSERT.
if (m_clickNode == mev.innerNode()) {
clickTargetNode = m_clickNode;
clickTargetNode->updateDistribution();
} else if (m_clickNode->document() == mev.innerNode()->document()) {
m_clickNode->updateDistribution();
mev.innerNode()->updateDistribution();
clickTargetNode = mev.innerNode()->commonAncestor(
*m_clickNode, EventHandlingUtil::parentForClickEvent);
}
if (clickTargetNode) {
clickEventResult = dispatchMouseEvent(
clickTargetNode, !RuntimeEnabledFeatures::auxclickEnabled() ||
(mev.event().pointerProperties().button ==
WebPointerProperties::Button::Left)
? EventTypeNames::click
: EventTypeNames::auxclick,
mev.event(), nullptr);
}
}
return clickEventResult;
}
void MouseEventManager::fakeMouseMoveEventTimerFired(TimerBase* timer) {
TRACE_EVENT0("input", "MouseEventManager::fakeMouseMoveEventTimerFired");
DCHECK(timer == &m_fakeMouseMoveEventTimer);
DCHECK(!m_mousePressed);
if (m_isMousePositionUnknown)
return;
FrameView* view = m_frame->view();
if (!view)
return;
if (!m_frame->page() || !m_frame->page()->focusController().isActive())
return;
// Don't dispatch a synthetic mouse move event if the mouse cursor is not
// visible to the user.
if (!m_frame->page()->isCursorVisible())
return;
PlatformMouseEvent fakeMouseMoveEvent(
m_lastKnownMousePosition, m_lastKnownMouseGlobalPosition,
WebPointerProperties::Button::NoButton, PlatformEvent::MouseMoved, 0,
static_cast<PlatformEvent::Modifiers>(
KeyboardEventManager::getCurrentModifierState()),
PlatformMouseEvent::RealOrIndistinguishable, TimeTicks::Now(),
WebPointerProperties::PointerType::Mouse);
Vector<PlatformMouseEvent> coalescedEvents;
m_frame->eventHandler().handleMouseMoveEvent(fakeMouseMoveEvent,
coalescedEvents);
}
void MouseEventManager::cancelFakeMouseMoveEvent() {
m_fakeMouseMoveEventTimer.stop();
}
void MouseEventManager::setNodeUnderMouse(
Node* target,
const PlatformMouseEvent& platformMouseEvent) {
Node* lastNodeUnderMouse = m_nodeUnderMouse;
m_nodeUnderMouse = target;
PaintLayer* layerForLastNode =
EventHandlingUtil::layerForNode(lastNodeUnderMouse);
PaintLayer* layerForNodeUnderMouse =
EventHandlingUtil::layerForNode(m_nodeUnderMouse.get());
Page* page = m_frame->page();
if (lastNodeUnderMouse &&
(!m_nodeUnderMouse ||
m_nodeUnderMouse->document() != m_frame->document())) {
// The mouse has moved between frames.
if (LocalFrame* frame = lastNodeUnderMouse->document().frame()) {
if (FrameView* frameView = frame->view())
frameView->mouseExitedContentArea();
}
} else if (page && (layerForLastNode &&
(!layerForNodeUnderMouse ||
layerForNodeUnderMouse != layerForLastNode))) {
// The mouse has moved between layers.
if (ScrollableArea* scrollableAreaForLastNode =
EventHandlingUtil::associatedScrollableArea(layerForLastNode))
scrollableAreaForLastNode->mouseExitedContentArea();
}
if (m_nodeUnderMouse &&
(!lastNodeUnderMouse ||
lastNodeUnderMouse->document() != m_frame->document())) {
// The mouse has moved between frames.
if (LocalFrame* frame = m_nodeUnderMouse->document().frame()) {
if (FrameView* frameView = frame->view())
frameView->mouseEnteredContentArea();
}
} else if (page && (layerForNodeUnderMouse &&
(!layerForLastNode ||
layerForNodeUnderMouse != layerForLastNode))) {
// The mouse has moved between layers.
if (ScrollableArea* scrollableAreaForNodeUnderMouse =
EventHandlingUtil::associatedScrollableArea(layerForNodeUnderMouse))
scrollableAreaForNodeUnderMouse->mouseEnteredContentArea();
}
if (lastNodeUnderMouse &&
lastNodeUnderMouse->document() != m_frame->document()) {
lastNodeUnderMouse = nullptr;
}
sendBoundaryEvents(lastNodeUnderMouse, m_nodeUnderMouse, platformMouseEvent);
}
void MouseEventManager::nodeChildrenWillBeRemoved(ContainerNode& container) {
if (container == m_clickNode)
return;
if (!container.isShadowIncludingInclusiveAncestorOf(m_clickNode.get()))
return;
m_clickNode = nullptr;
}
void MouseEventManager::nodeWillBeRemoved(Node& nodeToBeRemoved) {
if (nodeToBeRemoved.isShadowIncludingInclusiveAncestorOf(m_clickNode.get())) {
// We don't dispatch click events if the mousedown node is removed
// before a mouseup event. It is compatible with IE and Firefox.
m_clickNode = nullptr;
}
}
Node* MouseEventManager::getNodeUnderMouse() {
return m_nodeUnderMouse;
}
WebInputEventResult MouseEventManager::handleMouseFocus(
const HitTestResult& hitTestResult,
InputDeviceCapabilities* sourceCapabilities) {
// If clicking on a frame scrollbar, do not mess up with content focus.
if (hitTestResult.scrollbar() && !m_frame->contentLayoutItem().isNull()) {
if (hitTestResult.scrollbar()->getScrollableArea() ==
m_frame->contentLayoutItem().getScrollableArea())
return WebInputEventResult::NotHandled;
}
// The layout needs to be up to date to determine if an element is focusable.
m_frame->document()->updateStyleAndLayoutIgnorePendingStylesheets();
Element* element = nullptr;
if (m_nodeUnderMouse) {
element = m_nodeUnderMouse->isElementNode()
? toElement(m_nodeUnderMouse)
: m_nodeUnderMouse->parentOrShadowHostElement();
}
for (; element; element = element->parentOrShadowHostElement()) {
if (element->isFocusable() && element->isFocusedElementInDocument())
return WebInputEventResult::NotHandled;
if (element->isMouseFocusable())
break;
}
DCHECK(!element || element->isMouseFocusable());
// To fix <rdar://problem/4895428> Can't drag selected ToDo, we don't focus
// a node on mouse down if it's selected and inside a focused node. It will
// be focused if the user does a mouseup over it, however, because the
// mouseup will set a selection inside it, which will call
// FrameSelection::setFocusedNodeIfNeeded.
if (element && m_frame->selection().isRange()) {
// TODO(yosin) We should not create |Range| object for calling
// |isNodeFullyContained()|.
if (createRange(
m_frame->selection().selection().toNormalizedEphemeralRange())
->isNodeFullyContained(*element) &&
element->isDescendantOf(m_frame->document()->focusedElement()))
return WebInputEventResult::NotHandled;
}
// Only change the focus when clicking scrollbars if it can transfered to a
// mouse focusable node.
if (!element && hitTestResult.scrollbar())
return WebInputEventResult::HandledSystem;
if (Page* page = m_frame->page()) {
// If focus shift is blocked, we eat the event. Note we should never
// clear swallowEvent if the page already set it (e.g., by canceling
// default behavior).
if (element) {
if (slideFocusOnShadowHostIfNecessary(*element))
return WebInputEventResult::HandledSystem;
if (!page->focusController().setFocusedElement(
element, m_frame,
FocusParams(SelectionBehaviorOnFocus::None, WebFocusTypeMouse,
sourceCapabilities)))
return WebInputEventResult::HandledSystem;
} else {
// We call setFocusedElement even with !element in order to blur
// current focus element when a link is clicked; this is expected by
// some sites that rely on onChange handlers running from form
// fields before the button click is processed.
if (!page->focusController().setFocusedElement(
nullptr, m_frame,
FocusParams(SelectionBehaviorOnFocus::None, WebFocusTypeNone,
sourceCapabilities)))
return WebInputEventResult::HandledSystem;
}
}
return WebInputEventResult::NotHandled;
}
bool MouseEventManager::slideFocusOnShadowHostIfNecessary(
const Element& element) {
if (element.authorShadowRoot() &&
element.authorShadowRoot()->delegatesFocus()) {
Document* doc = m_frame->document();
if (element.isShadowIncludingInclusiveAncestorOf(doc->focusedElement())) {
// If the inner element is already focused, do nothing.
return true;
}
// If the host has a focusable inner element, focus it. Otherwise, the host
// takes focus.
Page* page = m_frame->page();
DCHECK(page);
Element* found =
page->focusController().findFocusableElementInShadowHost(element);
if (found && element.isShadowIncludingInclusiveAncestorOf(found)) {
// Use WebFocusTypeForward instead of WebFocusTypeMouse here to mean the
// focus has slided.
found->focus(FocusParams(SelectionBehaviorOnFocus::Reset,
WebFocusTypeForward, nullptr));
return true;
}
}
return false;
}
void MouseEventManager::handleMousePressEventUpdateStates(
const PlatformMouseEvent& mouseEvent) {
cancelFakeMouseMoveEvent();
m_mousePressed = true;
m_capturesDragging = true;
setLastKnownMousePosition(mouseEvent);
m_mouseDownMayStartDrag = false;
m_mouseDownMayStartAutoscroll = false;
m_mouseDownTimestamp = mouseEvent.timestamp();
if (FrameView* view = m_frame->view()) {
m_mouseDownPos = view->rootFrameToContents(mouseEvent.position());
} else {
invalidateClick();
}
}
bool MouseEventManager::isMousePositionUnknown() {
return m_isMousePositionUnknown;
}
IntPoint MouseEventManager::lastKnownMousePosition() {
return m_lastKnownMousePosition;
}
void MouseEventManager::setLastKnownMousePosition(
const PlatformMouseEvent& event) {
m_isMousePositionUnknown = false;
m_lastKnownMousePosition = event.position();
m_lastKnownMouseGlobalPosition = event.globalPosition();
}
void MouseEventManager::dispatchFakeMouseMoveEventSoon() {
if (m_mousePressed)
return;
if (m_isMousePositionUnknown)
return;
// Reschedule the timer, to prevent dispatching mouse move events
// during a scroll. This avoids a potential source of scroll jank.
m_fakeMouseMoveEventTimer.startOneShot(kFakeMouseMoveInterval,
BLINK_FROM_HERE);
}
void MouseEventManager::dispatchFakeMouseMoveEventSoonInQuad(
const FloatQuad& quad) {
FrameView* view = m_frame->view();
if (!view)
return;
if (!quad.containsPoint(view->rootFrameToContents(m_lastKnownMousePosition)))
return;
dispatchFakeMouseMoveEventSoon();
}
WebInputEventResult MouseEventManager::handleMousePressEvent(
const MouseEventWithHitTestResults& event) {
TRACE_EVENT0("blink", "MouseEventManager::handleMousePressEvent");
// Reset drag state.
dragState().m_dragSrc = nullptr;
cancelFakeMouseMoveEvent();
m_frame->document()->updateStyleAndLayoutIgnorePendingStylesheets();
if (FrameView* frameView = m_frame->view()) {
if (frameView->isPointInScrollbarCorner(event.event().position()))
return WebInputEventResult::NotHandled;
}
bool singleClick = event.event().clickCount() <= 1;
m_mouseDownMayStartDrag =
singleClick && !isLinkSelection(event) && !isExtendingSelection(event);
m_frame->eventHandler().selectionController().handleMousePressEvent(event);
m_mouseDown = event.event();
if (m_frame->document()->isSVGDocument() &&
m_frame->document()->accessSVGExtensions().zoomAndPanEnabled()) {
if (event.event().shiftKey() && singleClick) {
m_svgPan = true;
m_frame->document()->accessSVGExtensions().startPan(
m_frame->view()->rootFrameToContents(event.event().position()));
return WebInputEventResult::HandledSystem;
}
}
// We don't do this at the start of mouse down handling,
// because we don't want to do it until we know we didn't hit a widget.
if (singleClick)
focusDocumentView();
Node* innerNode = event.innerNode();
m_mousePressNode = innerNode;
m_frame->document()->setSequentialFocusNavigationStartingPoint(innerNode);
m_dragStartPos = event.event().position();
bool swallowEvent = false;
m_mousePressed = true;
if (event.event().clickCount() == 2) {
swallowEvent = m_frame->eventHandler()
.selectionController()
.handleMousePressEventDoubleClick(event);
} else if (event.event().clickCount() >= 3) {
swallowEvent = m_frame->eventHandler()
.selectionController()
.handleMousePressEventTripleClick(event);
} else {
swallowEvent = m_frame->eventHandler()
.selectionController()
.handleMousePressEventSingleClick(event);
}
m_mouseDownMayStartAutoscroll =
m_frame->eventHandler().selectionController().mouseDownMayStartSelect() ||
(m_mousePressNode && m_mousePressNode->layoutBox() &&
m_mousePressNode->layoutBox()->canBeProgramaticallyScrolled());
return swallowEvent ? WebInputEventResult::HandledSystem
: WebInputEventResult::NotHandled;
}
WebInputEventResult MouseEventManager::handleMouseReleaseEvent(
const MouseEventWithHitTestResults& event) {
AutoscrollController* controller = m_scrollManager->autoscrollController();
if (controller && controller->autoscrollInProgress())
m_scrollManager->stopAutoscroll();
return m_frame->eventHandler().selectionController().handleMouseReleaseEvent(
event, m_dragStartPos)
? WebInputEventResult::HandledSystem
: WebInputEventResult::NotHandled;
}
void MouseEventManager::updateSelectionForMouseDrag() {
m_frame->eventHandler().selectionController().updateSelectionForMouseDrag(
m_mousePressNode, m_dragStartPos, m_lastKnownMousePosition);
}
bool MouseEventManager::handleDragDropIfPossible(
const GestureEventWithHitTestResults& targetedEvent) {
if (m_frame->settings() && m_frame->settings()->getTouchDragDropEnabled() &&
m_frame->view()) {
const WebGestureEvent& gestureEvent = targetedEvent.event();
unsigned modifiers = gestureEvent.modifiers();
// TODO(mustaq): Suppressing long-tap MouseEvents could break
// drag-drop. Will do separately because of the risk. crbug.com/606938.
PlatformMouseEvent mouseDownEvent(
gestureEvent, WebPointerProperties::Button::Left,
PlatformEvent::MousePressed, 1,
static_cast<PlatformEvent::Modifiers>(modifiers |
PlatformEvent::LeftButtonDown),
PlatformMouseEvent::FromTouch, TimeTicks::Now(),
WebPointerProperties::PointerType::Mouse);
m_mouseDown = mouseDownEvent;
PlatformMouseEvent mouseDragEvent(
gestureEvent, WebPointerProperties::Button::Left,
PlatformEvent::MouseMoved, 1,
static_cast<PlatformEvent::Modifiers>(modifiers |
PlatformEvent::LeftButtonDown),
PlatformMouseEvent::FromTouch, TimeTicks::Now(),
WebPointerProperties::PointerType::Mouse);
HitTestRequest request(HitTestRequest::ReadOnly);
MouseEventWithHitTestResults mev =
EventHandlingUtil::performMouseEventHitTest(m_frame, request,
mouseDragEvent);
m_mouseDownMayStartDrag = true;
dragState().m_dragSrc = nullptr;
m_mouseDownPos =
m_frame->view()->rootFrameToContents(mouseDragEvent.position());
return handleDrag(mev, DragInitiator::Touch);
}
return false;
}
void MouseEventManager::focusDocumentView() {
Page* page = m_frame->page();
if (!page)
return;
page->focusController().focusDocumentView(m_frame);
}
WebInputEventResult MouseEventManager::handleMouseDraggedEvent(
const MouseEventWithHitTestResults& event) {
TRACE_EVENT0("blink", "MouseEventManager::handleMouseDraggedEvent");
// While resetting m_mousePressed here may seem out of place, it turns out
// to be needed to handle some bugs^Wfeatures in Blink mouse event handling:
// 1. Certain elements, such as <embed>, capture mouse events. They do not
// bubble back up. One way for a <embed> to start capturing mouse events
// is on a mouse press. The problem is the <embed> node only starts
// capturing mouse events *after* m_mousePressed for the containing frame
// has already been set to true. As a result, the frame's EventHandler
// never sees the mouse release event, which is supposed to reset
// m_mousePressed... so m_mousePressed ends up remaining true until the
// event handler finally gets another mouse released event. Oops.
// 2. Dragging doesn't start until after a mouse press event, but a drag
// that ends as a result of a mouse release does not send a mouse release
// event. As a result, m_mousePressed also ends up remaining true until
// the next mouse release event seen by the EventHandler.
if (event.event().pointerProperties().button !=
WebPointerProperties::Button::Left)
m_mousePressed = false;
if (!m_mousePressed)
return WebInputEventResult::NotHandled;
if (handleDrag(event, DragInitiator::Mouse))
return WebInputEventResult::HandledSystem;
Node* targetNode = event.innerNode();
if (!targetNode)
return WebInputEventResult::NotHandled;
LayoutObject* layoutObject = targetNode->layoutObject();
if (!layoutObject) {
Node* parent = FlatTreeTraversal::parent(*targetNode);
if (!parent)
return WebInputEventResult::NotHandled;
layoutObject = parent->layoutObject();
if (!layoutObject || !layoutObject->isListBox())
return WebInputEventResult::NotHandled;
}
m_mouseDownMayStartDrag = false;
m_frame->eventHandler().selectionController().handleMouseDraggedEvent(
event, m_mouseDownPos, m_dragStartPos, m_mousePressNode.get(),
m_lastKnownMousePosition);
// The call into handleMouseDraggedEvent may have caused a re-layout,
// so get the LayoutObject again.
layoutObject = targetNode->layoutObject();
if (layoutObject && m_mouseDownMayStartAutoscroll &&
!m_scrollManager->middleClickAutoscrollInProgress() &&
!m_frame->selection().selectedHTMLForClipboard().isEmpty()) {
if (AutoscrollController* controller =
m_scrollManager->autoscrollController()) {
controller->startAutoscrollForSelection(layoutObject);
m_mouseDownMayStartAutoscroll = false;
}
}
return WebInputEventResult::HandledSystem;
}
bool MouseEventManager::handleDrag(const MouseEventWithHitTestResults& event,
DragInitiator initiator) {
DCHECK(event.event().type() == PlatformEvent::MouseMoved);
// Callers must protect the reference to FrameView, since this function may
// dispatch DOM events, causing page/FrameView to go away.
DCHECK(m_frame);
DCHECK(m_frame->view());
if (!m_frame->page())
return false;
if (m_mouseDownMayStartDrag) {
HitTestRequest request(HitTestRequest::ReadOnly);
HitTestResult result(request, m_mouseDownPos);
m_frame->contentLayoutItem().hitTest(result);
Node* node = result.innerNode();
if (node) {
DragController::SelectionDragPolicy selectionDragPolicy =
event.event().timestamp() - m_mouseDownTimestamp < kTextDragDelay
? DragController::DelayedSelectionDragResolution
: DragController::ImmediateSelectionDragResolution;
dragState().m_dragSrc = m_frame->page()->dragController().draggableNode(
m_frame, node, m_mouseDownPos, selectionDragPolicy,
dragState().m_dragType);
} else {
dragState().m_dragSrc = nullptr;
}
if (!dragState().m_dragSrc)
m_mouseDownMayStartDrag = false; // no element is draggable
}
if (!m_mouseDownMayStartDrag) {
return initiator == DragInitiator::Mouse &&
!m_frame->eventHandler()
.selectionController()
.mouseDownMayStartSelect() &&
!m_mouseDownMayStartAutoscroll;
}
// We are starting a text/image/url drag, so the cursor should be an arrow
// FIXME <rdar://7577595>: Custom cursors aren't supported during drag and
// drop (default to pointer).
m_frame->view()->setCursor(pointerCursor());
if (initiator == DragInitiator::Mouse &&
!dragThresholdExceeded(event.event().position())) {
dragState().m_dragSrc = nullptr;
return true;
}
// Once we're past the drag threshold, we don't want to treat this gesture as
// a click.
invalidateClick();
if (!tryStartDrag(event)) {
// Something failed to start the drag, clean up.
clearDragDataTransfer();
dragState().m_dragSrc = nullptr;
}
m_mouseDownMayStartDrag = false;
// Whether or not the drag actually started, no more default handling (like
// selection).
return true;
}
DataTransfer* MouseEventManager::createDraggingDataTransfer() const {
return DataTransfer::create(DataTransfer::DragAndDrop, DataTransferWritable,
DataObject::create());
}
bool MouseEventManager::tryStartDrag(
const MouseEventWithHitTestResults& event) {
// The DataTransfer would only be non-empty if we missed a dragEnd.
// Clear it anyway, just to make sure it gets numbified.
clearDragDataTransfer();
dragState().m_dragDataTransfer = createDraggingDataTransfer();
DragController& dragController = m_frame->page()->dragController();
if (!dragController.populateDragDataTransfer(m_frame, dragState(),
m_mouseDownPos))
return false;
// If dispatching dragstart brings about another mouse down -- one way
// this will happen is if a DevTools user breaks within a dragstart
// handler and then clicks on the suspended page -- the drag state is
// reset. Hence, need to check if this particular drag operation can
// continue even if dispatchEvent() indicates no (direct) cancellation.
// Do that by checking if m_dragSrc is still set.
m_mouseDownMayStartDrag =
dispatchDragSrcEvent(EventTypeNames::dragstart, m_mouseDown) ==
WebInputEventResult::NotHandled &&
!m_frame->selection().isInPasswordField() && dragState().m_dragSrc;
// Invalidate clipboard here against anymore pasteboard writing for security.
// The drag image can still be changed as we drag, but not the pasteboard
// data.
dragState().m_dragDataTransfer->setAccessPolicy(DataTransferImageWritable);
if (m_mouseDownMayStartDrag) {
// Dispatching the event could cause Page to go away. Make sure it's still
// valid before trying to use DragController.
if (m_frame->page() &&
dragController.startDrag(m_frame, dragState(), event.event(),
m_mouseDownPos))
return true;
// Drag was canned at the last minute - we owe m_dragSrc a DRAGEND event
dispatchDragSrcEvent(EventTypeNames::dragend, event.event());
}
return false;
}
// Returns if we should continue "default processing", i.e., whether
// eventhandler canceled.
WebInputEventResult MouseEventManager::dispatchDragSrcEvent(
const AtomicString& eventType,
const PlatformMouseEvent& event) {
return dispatchDragEvent(eventType, dragState().m_dragSrc.get(), event,
dragState().m_dragDataTransfer.get());
}
WebInputEventResult MouseEventManager::dispatchDragEvent(
const AtomicString& eventType,
Node* dragTarget,
const PlatformMouseEvent& event,
DataTransfer* dataTransfer) {
FrameView* view = m_frame->view();
// FIXME: We might want to dispatch a dragleave even if the view is gone.
if (!view)
return WebInputEventResult::NotHandled;
const bool cancelable = eventType != EventTypeNames::dragleave &&
eventType != EventTypeNames::dragend;
DragEvent* me = DragEvent::create(
eventType, true, cancelable, m_frame->document()->domWindow(), 0,
event.globalPosition().x(), event.globalPosition().y(),
event.position().x(), event.position().y(), event.movementDelta().x(),
event.movementDelta().y(), event.getModifiers(), 0,
MouseEvent::platformModifiersToButtons(event.getModifiers()), nullptr,
event.timestamp(), dataTransfer, event.getSyntheticEventType());
return EventHandlingUtil::toWebInputEventResult(
dragTarget->dispatchEvent(me));
}
void MouseEventManager::clearDragDataTransfer() {
if (dragState().m_dragDataTransfer) {
dragState().m_dragDataTransfer->clearDragImage();
dragState().m_dragDataTransfer->setAccessPolicy(DataTransferNumb);
}
}
void MouseEventManager::dragSourceEndedAt(const PlatformMouseEvent& event,
DragOperation operation) {
if (dragState().m_dragSrc) {
dragState().m_dragDataTransfer->setDestinationOperation(operation);
// The return value is ignored because dragend is not cancelable.
dispatchDragSrcEvent(EventTypeNames::dragend, event);
}
clearDragDataTransfer();
dragState().m_dragSrc = nullptr;
// In case the drag was ended due to an escape key press we need to ensure
// that consecutive mousemove events don't reinitiate the drag and drop.
m_mouseDownMayStartDrag = false;
}
DragState& MouseEventManager::dragState() {
DEFINE_STATIC_LOCAL(DragState, state, (new DragState));
return state;
}
bool MouseEventManager::dragThresholdExceeded(
const IntPoint& dragLocationInRootFrame) const {
FrameView* view = m_frame->view();
if (!view)
return false;
IntPoint dragLocation = view->rootFrameToContents(dragLocationInRootFrame);
IntSize delta = dragLocation - m_mouseDownPos;
// WebKit's drag thresholds depend on the type of object being dragged. If we
// want to revive that behavior, we can multiply the threshold constants with
// a number based on dragState().m_dragType.
return abs(delta.width()) >= kDragThresholdX ||
abs(delta.height()) >= kDragThresholdY;
}
void MouseEventManager::clearDragHeuristicState() {
// Used to prevent mouseMoveEvent from initiating a drag before
// the mouse is pressed again.
m_mousePressed = false;
m_capturesDragging = false;
m_mouseDownMayStartDrag = false;
m_mouseDownMayStartAutoscroll = false;
}
bool MouseEventManager::handleSvgPanIfNeeded(bool isReleaseEvent) {
if (!m_svgPan)
return false;
m_svgPan = !isReleaseEvent;
m_frame->document()->accessSVGExtensions().updatePan(
m_frame->view()->rootFrameToContents(m_lastKnownMousePosition));
return true;
}
void MouseEventManager::invalidateClick() {
m_clickCount = 0;
m_clickNode = nullptr;
}
bool MouseEventManager::mousePressed() {
return m_mousePressed;
}
void MouseEventManager::setMousePressed(bool mousePressed) {
m_mousePressed = mousePressed;
}
bool MouseEventManager::capturesDragging() const {
return m_capturesDragging;
}
void MouseEventManager::setCapturesDragging(bool capturesDragging) {
m_capturesDragging = capturesDragging;
}
Node* MouseEventManager::mousePressNode() {
return m_mousePressNode;
}
void MouseEventManager::setMousePressNode(Node* node) {
m_mousePressNode = node;
}
void MouseEventManager::setClickNode(Node* node) {
setContext(node ? node->ownerDocument() : nullptr);
m_clickNode = node;
}
void MouseEventManager::setClickCount(int clickCount) {
m_clickCount = clickCount;
}
bool MouseEventManager::mouseDownMayStartDrag() {
return m_mouseDownMayStartDrag;
}
} // namespace blink
| 12,817 |
504 | package org.dayatang.security.domain;
/**
* Created by yyang on 15/7/27.
*/
public class SecurityException extends RuntimeException {
public SecurityException() {
}
public SecurityException(String message) {
super(message);
}
public SecurityException(String message, Throwable cause) {
super(message, cause);
}
public SecurityException(Throwable cause) {
super(cause);
}
public SecurityException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 206 |
335 | {
"word": "Trisaccharide",
"definitions": [
"Any of the class of sugars whose molecules contain three monosaccharide molecules."
],
"parts-of-speech": "Noun"
} | 72 |
454 | <gh_stars>100-1000
"""
Models for the django-analytical Django application.
This application currently does not use models.
"""
| 36 |
479 | <reponame>jeremyvdw/aurora<gh_stars>100-1000
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.aurora.scheduler.configuration.executor;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.util.Map;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.aurora.gen.apiConstants;
import org.apache.aurora.scheduler.configuration.executor.ExecutorSettingsLoader.ExecutorConfigException;
import org.apache.aurora.scheduler.mesos.TestExecutorSettings;
import org.apache.mesos.v1.Protos.Volume;
import org.apache.mesos.v1.Protos.Volume.Mode;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class ExecutorSettingsLoaderTest {
private static final String EXECUTOR_NAME = apiConstants.AURORA_EXECUTOR_NAME;
private static final Map<String, ExecutorConfig> THERMOS_CONFIG_SINGLE =
ImmutableMap.<String, ExecutorConfig>builder().put(
EXECUTOR_NAME,
new ExecutorConfig(
TestExecutorSettings.THERMOS_CONFIG.getExecutor(),
ImmutableList.of(
Volume.newBuilder()
.setHostPath("/path/to/observer_root")
.setContainerPath("/path/to/observer_root")
.setMode(Mode.RO)
.build(),
Volume.newBuilder()
.setHostPath("/host")
.setContainerPath("/container")
.setMode(Mode.RW)
.build()),
TestExecutorSettings.THERMOS_TASK_PREFIX)).build();
private static final Map<String, ExecutorConfig> THERMOS_CONFIG_MULTI =
ImmutableMap.<String, ExecutorConfig>builder()
.putAll(THERMOS_CONFIG_SINGLE)
.put(EXECUTOR_NAME + "_2",
new ExecutorConfig(
TestExecutorSettings.THERMOS_CONFIG
.getExecutor()
.toBuilder()
.setName(EXECUTOR_NAME + "_2").build(),
ImmutableList.of(
Volume.newBuilder()
.setHostPath("/path/to/observer_root2")
.setContainerPath("/path/to/observer_root2")
.setMode(Mode.RO)
.build(),
Volume.newBuilder()
.setHostPath("/host2")
.setContainerPath("/container2")
.setMode(Mode.RW)
.build()),
TestExecutorSettings.THERMOS_TASK_PREFIX + "2-")).build();
private Map<String, ExecutorConfig> loadResource(String name) throws ExecutorConfigException {
return ExecutorSettingsLoader.read(
new InputStreamReader(getClass().getResourceAsStream(name), Charsets.UTF_8));
}
private void assertParsedResult(Map<String, ExecutorConfig> expected, String file)
throws ExecutorConfigException {
assertEquals(expected, loadResource(file));
}
@Test
public void testParseSingle() throws Exception {
assertParsedResult(THERMOS_CONFIG_SINGLE, "test-single-executor.json");
}
@Test
public void testParseMultiple() throws Exception {
assertParsedResult(THERMOS_CONFIG_MULTI, "test-multiple-executor.json");
}
@Test(expected = ExecutorConfigException.class)
public void testInvalidJson() throws Exception {
ExecutorSettingsLoader.read(new StringReader("this is not json"));
}
@Test(expected = ExecutorConfigException.class)
public void testMissingField() throws Exception {
loadResource("test-missing-field.json");
}
}
| 1,822 |
2,659 | <filename>src/storage/binlog.h
/*
* Copyright 2021 4Paradigm
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <string>
#include "log/log_reader.h"
#include "log/log_writer.h"
#include "storage/table.h"
typedef ::openmldb::base::Skiplist<uint32_t, uint64_t, ::openmldb::base::DefaultComparator> LogParts;
namespace openmldb {
namespace storage {
class Binlog {
public:
Binlog(LogParts* log_part, const std::string& binlog_path);
~Binlog() = default;
bool RecoverFromBinlog(std::shared_ptr<Table> table, uint64_t offset,
uint64_t& latest_offset); // NOLINT
private:
LogParts* log_part_;
std::string log_path_;
};
} // namespace storage
} // namespace openmldb
| 434 |
435 | #
# This file is part of pySMT.
#
# Copyright 2014 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Interpolator(object):
def __init__(self):
self._destroyed = False
def binary_interpolant(self, a, b):
"""Returns a binary interpolant for the pair (a, b), if And(a, b) is
unsatisfaiable, or None if And(a, b) is satisfiable.
"""
raise NotImplementedError
def sequence_interpolant(self, formulas):
"""Returns a sequence interpolant for the conjunction of formulas, or
None if the problem is satisfiable.
"""
raise NotImplementedError
def __enter__(self):
"""Manage entering a Context (i.e., with statement)"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Manage exiting from Context (i.e., with statement)
The default behaviour is to explicitely destroy the interpolator to
free the associated resources.
"""
self.exit()
def exit(self):
"""Destroys the solver and closes associated resources."""
if not self._destroyed:
self._exit()
self._destroyed = True
def _exit(self):
"""Destroys the solver and closes associated resources."""
raise NotImplementedError
| 663 |
679 | <filename>main/sfx2/source/doc/graphhelp.hxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#include <com/sun/star/uno/Reference.hxx>
#include <com/sun/star/io/XStream.hpp>
#include <rtl/ustring.hxx>
class SvMemoryStream;
class GDIMetaFile;
class BitmapEx;
class GraphicHelper
{
static sal_Bool mergeBitmaps_Impl( const BitmapEx& rBmpEx, const BitmapEx& rOverlay,
const Rectangle& rOverlayRect, BitmapEx& rReturn );
static sal_Bool createThumb_Impl( const GDIMetaFile& rMtf,
sal_uInt32 nMaximumExtent,
BitmapEx& rBmpEx,
const BitmapEx* pOverlay = NULL,
const Rectangle* pOverlayRect = NULL );
public:
static SvMemoryStream* getFormatStrFromGDI_Impl( const GDIMetaFile* pGDIMeta, sal_uInt32 nFormat );
static void* getEnhMetaFileFromGDI_Impl( const GDIMetaFile* pGDIMeta );
static void* getWinMetaFileFromGDI_Impl( const GDIMetaFile* pGDIMeta, const Size& aMetaSize );
static sal_Bool supportsMetaFileHandle_Impl();
static sal_Bool getThumbnailFormatFromGDI_Impl(
GDIMetaFile* pMetaFile,
sal_Bool bSigned,
const ::com::sun::star::uno::Reference< ::com::sun::star::io::XStream >& xStream );
static sal_Bool getSignedThumbnailFormatFromBitmap_Impl(
const BitmapEx& aBitmap,
const ::com::sun::star::uno::Reference< ::com::sun::star::io::XStream >& xStream );
static sal_uInt16 getThumbnailReplacementIDByFactoryName_Impl( const ::rtl::OUString& aFactoryShortName,
sal_Bool bIsTemplate );
static sal_Bool getThumbnailReplacement_Impl(
sal_Int32 nResID,
const ::com::sun::star::uno::Reference< ::com::sun::star::io::XStream >& xStream );
};
| 838 |
1,299 | <filename>tika-parsers/tika-parsers-standard/tika-parsers-standard-modules/tika-parser-microsoft-module/src/main/java/org/apache/tika/parser/microsoft/onenote/fsshttpb/streamobj/DataElementPackage.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tika.parser.microsoft.onenote.fsshttpb.streamobj;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.tika.exception.TikaException;
public class DataElementPackage extends StreamObject {
public List<DataElement> dataElements = new ArrayList<>();
public byte reserved;
/**
* Initializes a new instance of the DataElementHash class.
*/
public DataElementPackage() {
super(StreamObjectTypeHeaderStart.DataElementPackage);
}
/**
* Used to de-serialize the element.
*
* @param byteArray A Byte array
* @param currentIndex Start position
* @param lengthOfItems The length of the items
*/
@Override
protected void deserializeItemsFromByteArray(byte[] byteArray, AtomicInteger currentIndex,
int lengthOfItems)
throws TikaException, IOException {
if (lengthOfItems != 1) {
throw new StreamObjectParseErrorException(currentIndex.get(), "DataElementPackage",
"Stream object over-parse error", null);
}
reserved = byteArray[currentIndex.getAndIncrement()];
this.dataElements = new ArrayList<>();
AtomicReference<DataElement> dataElement = new AtomicReference<>();
while (StreamObject.tryGetCurrent(byteArray, currentIndex, dataElement,
DataElement.class)) {
this.dataElements.add(dataElement.get());
}
}
/**
* Used to convert the element into a byte List
*
* @param byteList A Byte list
* @return The number of elements actually contained in the list
*/
@Override
protected int serializeItemsToByteList(List<Byte> byteList) throws TikaException, IOException {
// Add the reserved byte
byteList.add((byte) 0);
for (DataElement dataElement : dataElements) {
byteList.addAll(dataElement.serializeToByteList());
}
return 1;
}
}
| 1,088 |
406 | /* ************************************************************************
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ************************************************************************/
// action.transpose.nonsquare.cpp provides the entry points of "baking"
// nonsquare inplace transpose kernels called in plan.cpp.
// the actual kernel string generation is provided by generator.transpose.cpp
#include "stdafx.h"
#include <math.h>
#include <iomanip>
#include "generator.transpose.h"
#include "action.transpose.h"
#include "generator.stockham.h"
#include "action.h"
FFTGeneratedTransposeNonSquareAction::FFTGeneratedTransposeNonSquareAction(clfftPlanHandle plHandle, FFTPlan * plan, cl_command_queue queue, clfftStatus & err)
: FFTTransposeNonSquareAction(plHandle, plan, queue, err)
{
if (err != CLFFT_SUCCESS)
{
// FFTTransposeNonSquareAction() failed, exit
fprintf(stderr, "FFTTransposeNonSquareAction() failed!\n");
return;
}
// Initialize the FFTAction::FFTKernelGenKeyParams member
err = this->initParams();
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeNonSquareAction::initParams() failed!\n");
return;
}
FFTRepo &fftRepo = FFTRepo::getInstance();
err = this->generateKernel(fftRepo, queue);
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeNonSquareAction::generateKernel failed\n");
return;
}
err = compileKernels(queue, plHandle, plan);
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeNonSquareAction::compileKernels failed\n");
return;
}
err = CLFFT_SUCCESS;
}
bool FFTGeneratedTransposeNonSquareAction::buildForwardKernel()
{
clfftLayout inputLayout = this->getSignatureData()->fft_inputLayout;
clfftLayout outputLayout = this->getSignatureData()->fft_outputLayout;
bool r2c_transform = (inputLayout == CLFFT_REAL);
bool c2r_transform = (outputLayout == CLFFT_REAL);
bool real_transform = (r2c_transform || c2r_transform);
return (!real_transform) || r2c_transform;
}
bool FFTGeneratedTransposeNonSquareAction::buildBackwardKernel()
{
clfftLayout inputLayout = this->getSignatureData()->fft_inputLayout;
clfftLayout outputLayout = this->getSignatureData()->fft_outputLayout;
bool r2c_transform = (inputLayout == CLFFT_REAL);
bool c2r_transform = (outputLayout == CLFFT_REAL);
bool real_transform = (r2c_transform || c2r_transform);
return (!real_transform) || c2r_transform;
}
// These strings represent the names that are used as strKernel parameters
const std::string pmRealIn("pmRealIn");
const std::string pmImagIn("pmImagIn");
const std::string pmRealOut("pmRealOut");
const std::string pmImagOut("pmImagOut");
const std::string pmComplexIn("pmComplexIn");
const std::string pmComplexOut("pmComplexOut");
clfftStatus FFTGeneratedTransposeNonSquareAction::initParams()
{
this->signature.fft_precision = this->plan->precision;
this->signature.fft_placeness = this->plan->placeness;
this->signature.fft_inputLayout = this->plan->inputLayout;
this->signature.fft_outputLayout = this->plan->outputLayout;
this->signature.fft_3StepTwiddle = false;
this->signature.nonSquareKernelType = this->plan->nonSquareKernelType;
this->signature.fft_realSpecial = this->plan->realSpecial;
this->signature.transOutHorizontal = this->plan->transOutHorizontal; // using the twiddle front flag to specify horizontal write
// we do this so as to reuse flags in FFTKernelGenKeyParams
// and to avoid making a new one
ARG_CHECK(this->plan->inStride.size() == this->plan->outStride.size());
if (CLFFT_INPLACE == this->signature.fft_placeness)
{
// If this is an in-place transform the
// input and output layout
// *MUST* be the same.
//
ARG_CHECK(this->signature.fft_inputLayout == this->signature.fft_outputLayout)
/* for (size_t u = this->plan->inStride.size(); u-- > 0; )
{
ARG_CHECK(this->plan->inStride[u] == this->plan->outStride[u]);
}*/
}
this->signature.fft_DataDim = this->plan->length.size() + 1;
int i = 0;
for (i = 0; i < (this->signature.fft_DataDim - 1); i++)
{
this->signature.fft_N[i] = this->plan->length[i];
this->signature.fft_inStride[i] = this->plan->inStride[i];
this->signature.fft_outStride[i] = this->plan->outStride[i];
}
this->signature.fft_inStride[i] = this->plan->iDist;
this->signature.fft_outStride[i] = this->plan->oDist;
if (this->plan->large1D != 0) {
ARG_CHECK(this->signature.fft_N[0] != 0)
//ToDo:ENABLE ASSERT
// ARG_CHECK((this->plan->large1D % this->signature.fft_N[0]) == 0)
this->signature.fft_3StepTwiddle = true;
//ToDo:ENABLE ASSERT
// ARG_CHECK(this->plan->large1D == (this->signature.fft_N[1] * this->signature.fft_N[0]));
}
// Query the devices in this context for their local memory sizes
// How we generate a kernel depends on the *minimum* LDS size for all devices.
//
const FFTEnvelope * pEnvelope = NULL;
OPENCL_V(this->plan->GetEnvelope(&pEnvelope), _T("GetEnvelope failed"));
BUG_CHECK(NULL != pEnvelope);
// TODO: Since I am going with a 2D workgroup size now, I need a better check than this 1D use
// Check: CL_DEVICE_MAX_WORK_GROUP_SIZE/CL_KERNEL_WORK_GROUP_SIZE
// CL_DEVICE_MAX_WORK_ITEM_SIZES
this->signature.fft_R = 1; // Dont think i'll use
this->signature.fft_SIMD = pEnvelope->limit_WorkGroupSize; // Use devices maximum workgroup size
//Set callback if specified
if (this->plan->hasPreCallback)
{
this->signature.fft_hasPreCallback = true;
this->signature.fft_preCallback = this->plan->preCallback;
}
if (this->plan->hasPostCallback)
{
this->signature.fft_hasPostCallback = true;
this->signature.fft_postCallback = this->plan->postCallbackParam;
}
this->signature.limit_LocalMemSize = this->plan->envelope.limit_LocalMemSize;
this->signature.transposeMiniBatchSize = this->plan->transposeMiniBatchSize;
this->signature.nonSquareKernelOrder = this->plan->nonSquareKernelOrder;
this->signature.transposeBatchSize = this->plan->batchsize;
return CLFFT_SUCCESS;
}
static const size_t lwSize = 256;
static const size_t reShapeFactor = 2;
// OpenCL does not take unicode strings as input, so this routine returns only ASCII strings
// Feed this generator the FFTPlan, and it returns the generated program as a string
clfftStatus FFTGeneratedTransposeNonSquareAction::generateKernel(FFTRepo& fftRepo, const cl_command_queue commQueueFFT)
{
std::string programCode;
std::string kernelFuncName;//applied to swap kernel for now
if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED_LEADING)
{
//Requested local memory size by callback must not exceed the device LDS limits after factoring the LDS size required by transpose kernel
if (this->signature.fft_hasPreCallback && this->signature.fft_preCallback.localMemSize > 0)
{
assert(!this->signature.fft_hasPostCallback);
bool validLDSSize = false;
size_t requestedCallbackLDS = 0;
requestedCallbackLDS = this->signature.fft_preCallback.localMemSize;
validLDSSize = ((2 * this->plan->ElementSize() * 16 * reShapeFactor * 16 * reShapeFactor) + requestedCallbackLDS) < this->plan->envelope.limit_LocalMemSize;
if(!validLDSSize)
{
fprintf(stderr, "Requested local memory size not available\n");
return CLFFT_INVALID_ARG_VALUE;
}
}
OPENCL_V(clfft_transpose_generator::genTransposeKernelLeadingDimensionBatched(this->signature, programCode, lwSize, reShapeFactor), _T("genTransposeKernel() failed!"));
}
else if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED)
{
//pre call back check
//Requested local memory size by callback must not exceed the device LDS limits after factoring the LDS size required by transpose kernel
if (this->signature.fft_hasPreCallback && this->signature.fft_preCallback.localMemSize > 0)
{
assert(!this->signature.fft_hasPostCallback);
bool validLDSSize = false;
size_t requestedCallbackLDS = 0;
requestedCallbackLDS = this->signature.fft_preCallback.localMemSize;
validLDSSize = ((2 * this->plan->ElementSize() * 16 * reShapeFactor * 16 * reShapeFactor) + requestedCallbackLDS) < this->plan->envelope.limit_LocalMemSize;
if (!validLDSSize)
{
fprintf(stderr, "Requested local memory size not available\n");
return CLFFT_INVALID_ARG_VALUE;
}
}
OPENCL_V(clfft_transpose_generator::genTransposeKernelBatched(this->signature, programCode, lwSize, reShapeFactor), _T("genTransposeKernel() failed!"));
}
else
{
//pre-callback is possible in swap kernel now
if (this->signature.fft_hasPreCallback && this->signature.fft_preCallback.localMemSize > 0)
{
assert(!this->signature.fft_hasPostCallback);
bool validLDSSize = false;
size_t requestedCallbackLDS = 0;
requestedCallbackLDS = this->signature.fft_preCallback.localMemSize;
//LDS usage of swap lines is exactly 2 lines
size_t lineSize = (this->signature.fft_N[0]) < (this->signature.fft_N[1]) ? this->signature.fft_N[0] : this->signature.fft_N[1];
validLDSSize = ((2 * this->plan->ElementSize() * lineSize) + requestedCallbackLDS) < this->plan->envelope.limit_LocalMemSize;
if (!validLDSSize)
{
fprintf(stderr, "Requested local memory size not available\n");
return CLFFT_INVALID_ARG_VALUE;
}
}
//here we should decide generate what kind of swap kernel. 1:2 and 1:3 probably need different swap kernels
/*
if (this->signature.fft_N[0] == 2 * this->signature.fft_N[1] || 2 * this->signature.fft_N[0] == this->signature.fft_N[1])
{
OPENCL_V(clfft_transpose_generator::genSwapKernel(this->signature, programCode, kernelFuncName, lwSize, reShapeFactor), _T("genSwapKernel() failed!"));
}
else
{
OPENCL_V(clfft_transpose_generator::genSwapKernelGeneral(this->signature, programCode, kernelFuncName, lwSize, reShapeFactor), _T("genSwapKernel() failed!"));
}
*/
//general swap kernel takes care of all ratio
OPENCL_V(clfft_transpose_generator::genSwapKernelGeneral(this->signature, programCode, kernelFuncName, lwSize, reShapeFactor), _T("genSwapKernel() failed!"));
}
//std::cout << programCode << std::endl;
cl_int status = CL_SUCCESS;
cl_device_id Device = NULL;
status = clGetCommandQueueInfo(commQueueFFT, CL_QUEUE_DEVICE, sizeof(cl_device_id), &Device, NULL);
OPENCL_V(status, _T("clGetCommandQueueInfo failed"));
cl_context QueueContext = NULL;
status = clGetCommandQueueInfo(commQueueFFT, CL_QUEUE_CONTEXT, sizeof(cl_context), &QueueContext, NULL);
OPENCL_V(status, _T("clGetCommandQueueInfo failed"));
OPENCL_V(fftRepo.setProgramCode(Transpose_NONSQUARE, this->getSignatureData(), programCode, Device, QueueContext), _T("fftRepo.setclString() failed!"));
if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED_LEADING)
{
// Note: See genFunctionPrototype( )
if (this->signature.fft_3StepTwiddle)
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), "transpose_nonsquare_tw_fwd", "transpose_nonsquare_tw_back", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
else
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), "transpose_nonsquare", "transpose_nonsquare", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
}
else if(this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED)
{
//for non square we do twiddling in swap kernel
/*
if (this->signature.fft_3StepTwiddle && (this->signature.transposeMiniBatchSize == 1))
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), "transpose_square_tw_fwd", "transpose_square_tw_back", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
else
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), "transpose_square", "transpose_square", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
*/
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), "transpose_square", "transpose_square", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
else
{
if (this->signature.fft_3StepTwiddle)//if miniBatchSize > 1 twiddling is done in swap kernel
{
std::string kernelFwdFuncName = kernelFuncName + "_tw_fwd";
std::string kernelBwdFuncName = kernelFuncName + "_tw_back";
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), kernelFwdFuncName.c_str(), kernelBwdFuncName.c_str(), Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
else
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_NONSQUARE, this->getSignatureData(), kernelFuncName.c_str(), kernelFuncName.c_str(), Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
return CLFFT_SUCCESS;
}
clfftStatus FFTGeneratedTransposeNonSquareAction::getWorkSizes(std::vector< size_t >& globalWS, std::vector< size_t >& localWS)
{
size_t wg_slice;
size_t smaller_dim = (this->signature.fft_N[0] < this->signature.fft_N[1]) ? this->signature.fft_N[0] : this->signature.fft_N[1];
size_t bigger_dim = (this->signature.fft_N[0] >= this->signature.fft_N[1]) ? this->signature.fft_N[0] : this->signature.fft_N[1];
size_t dim_ratio = bigger_dim / smaller_dim;
size_t global_item_size;
if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED_LEADING)
{
if (smaller_dim % (16 * reShapeFactor) == 0)
wg_slice = smaller_dim / 16 / reShapeFactor;
else
wg_slice = (smaller_dim / (16 * reShapeFactor)) + 1;
global_item_size = wg_slice*(wg_slice + 1) / 2 * 16 * 16 * this->plan->batchsize;
for (int i = 2; i < this->signature.fft_DataDim - 1; i++)
{
global_item_size *= this->signature.fft_N[i];
}
/*Push the data required for the transpose kernels*/
globalWS.clear();
if(this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED_LEADING)
globalWS.push_back(global_item_size * dim_ratio);
else if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED)
globalWS.push_back(global_item_size);
localWS.clear();
localWS.push_back(lwSize);
}
else if (this->signature.nonSquareKernelType == NON_SQUARE_TRANS_TRANSPOSE_BATCHED)
{
if (smaller_dim % (16 * reShapeFactor) == 0)
wg_slice = smaller_dim / 16 / reShapeFactor;
else
wg_slice = (smaller_dim / (16 * reShapeFactor)) + 1;
global_item_size = wg_slice*(wg_slice + 1) / 2 * 16 * 16 * this->plan->batchsize;
for (int i = 2; i < this->plan->length.size(); i++)
{
global_item_size *= this->plan->length[i];
}
/*Push the data required for the transpose kernels*/
globalWS.clear();
globalWS.push_back(global_item_size);
localWS.clear();
localWS.push_back(lwSize);
}
else
{
/*Now calculate the data for the swap kernels */
// general swap kernel takes care of all ratio. need clean up here
if(dim_ratio == 2 && 0){
//1:2 ratio
size_t input_elm_size_in_bytes;
switch (this->signature.fft_precision)
{
case CLFFT_SINGLE:
case CLFFT_SINGLE_FAST:
input_elm_size_in_bytes = 4;
break;
case CLFFT_DOUBLE:
case CLFFT_DOUBLE_FAST:
input_elm_size_in_bytes = 8;
break;
default:
return CLFFT_TRANSPOSED_NOTIMPLEMENTED;
}
switch (this->signature.fft_outputLayout)
{
case CLFFT_COMPLEX_INTERLEAVED:
case CLFFT_COMPLEX_PLANAR:
input_elm_size_in_bytes *= 2;
break;
case CLFFT_REAL:
break;
default:
return CLFFT_TRANSPOSED_NOTIMPLEMENTED;
}
size_t max_elements_loaded = AVAIL_MEM_SIZE / input_elm_size_in_bytes;
size_t num_elements_loaded;
size_t local_work_size_swap, num_grps_pro_row;
if ((max_elements_loaded >> 1) > smaller_dim)
{
local_work_size_swap = (smaller_dim < 256) ? smaller_dim : 256;
num_elements_loaded = smaller_dim;
num_grps_pro_row = 1;
}
else
{
num_grps_pro_row = (smaller_dim << 1) / max_elements_loaded;
num_elements_loaded = max_elements_loaded >> 1;
local_work_size_swap = (num_elements_loaded < 256) ? num_elements_loaded : 256;
}
size_t num_reduced_row;
size_t num_reduced_col;
if (this->signature.fft_N[1] == smaller_dim)
{
num_reduced_row = smaller_dim;
num_reduced_col = 2;
}
else
{
num_reduced_row = 2;
num_reduced_col = smaller_dim;
}
size_t *cycle_map = new size_t[num_reduced_row * num_reduced_col * 2];
/* The memory required by cycle_map cannot exceed 2 times row*col by design*/
clfft_transpose_generator::get_cycles(cycle_map, num_reduced_row, num_reduced_col);
global_item_size = local_work_size_swap * num_grps_pro_row * cycle_map[0] * this->plan->batchsize;
for (int i = 2; i < this->signature.fft_DataDim - 1; i++)
{
global_item_size *= this->signature.fft_N[i];
}
delete[] cycle_map;
globalWS.push_back(global_item_size);
localWS.push_back(local_work_size_swap);
}
else
{
//if (dim_ratio == 2 || dim_ratio == 3 || dim_ratio == 5 || dim_ratio == 10)
if (dim_ratio % 2 == 0 || dim_ratio % 3 == 0 || dim_ratio % 5 == 0 || dim_ratio % 10 == 0)
{
size_t local_work_size_swap = 256;
std::vector<std::vector<size_t> > permutationTable;
clfft_transpose_generator::permutation_calculation(dim_ratio, smaller_dim, permutationTable);
size_t global_item_size;
if(this->plan->large1D && (dim_ratio > 1))
global_item_size = (permutationTable.size() + 2) * local_work_size_swap * this->plan->batchsize;
else
global_item_size = (permutationTable.size() + 2) * local_work_size_swap * this->plan->batchsize;
//for (int i = 2; i < this->plan->length.size(); i++)
// global_item_size *= this->plan->length[i];
size_t LDS_per_WG = smaller_dim;
while (LDS_per_WG > 1024)//avoiding using too much lds memory. the biggest LDS memory we will allocate would be 1024*sizeof(float2/double2)*2
{
if (LDS_per_WG % 2 == 0)
{
LDS_per_WG /= 2;
continue;
}
if (LDS_per_WG % 3 == 0)
{
LDS_per_WG /= 3;
continue;
}
if (LDS_per_WG % 5 == 0)
{
LDS_per_WG /= 5;
continue;
}
return CLFFT_NOTIMPLEMENTED;
}
size_t WG_per_line = smaller_dim / LDS_per_WG;
global_item_size *= WG_per_line;
globalWS.push_back(global_item_size);
localWS.push_back(local_work_size_swap);
}
else
return CLFFT_NOTIMPLEMENTED;
}
}
return CLFFT_SUCCESS;
}
FFTGeneratedTransposeSquareAction::FFTGeneratedTransposeSquareAction(clfftPlanHandle plHandle, FFTPlan * plan, cl_command_queue queue, clfftStatus & err)
: FFTTransposeSquareAction(plHandle, plan, queue, err)
{
if (err != CLFFT_SUCCESS)
{
// FFTTransposeSquareAction() failed, exit
fprintf(stderr, "FFTTransposeSquareAction() failed!\n");
return;
}
// Initialize the FFTAction::FFTKernelGenKeyParams member
err = this->initParams();
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeSquareAction::initParams() failed!\n");
return;
}
FFTRepo &fftRepo = FFTRepo::getInstance();
err = this->generateKernel(fftRepo, queue);
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeSquareAction::generateKernel failed\n");
return;
}
err = compileKernels(queue, plHandle, plan);
if (err != CLFFT_SUCCESS)
{
fprintf(stderr, "FFTGeneratedTransposeSquareAction::compileKernels failed\n");
return;
}
err = CLFFT_SUCCESS;
}
bool FFTGeneratedTransposeSquareAction::buildForwardKernel()
{
clfftLayout inputLayout = this->getSignatureData()->fft_inputLayout;
clfftLayout outputLayout = this->getSignatureData()->fft_outputLayout;
bool r2c_transform = (inputLayout == CLFFT_REAL);
bool c2r_transform = (outputLayout == CLFFT_REAL);
bool real_transform = (r2c_transform || c2r_transform);
return (!real_transform) || r2c_transform;
}
bool FFTGeneratedTransposeSquareAction::buildBackwardKernel()
{
clfftLayout inputLayout = this->getSignatureData()->fft_inputLayout;
clfftLayout outputLayout = this->getSignatureData()->fft_outputLayout;
bool r2c_transform = (inputLayout == CLFFT_REAL);
bool c2r_transform = (outputLayout == CLFFT_REAL);
bool real_transform = (r2c_transform || c2r_transform);
return (!real_transform) || c2r_transform;
}
/*sqaure action*/
clfftStatus FFTGeneratedTransposeSquareAction::initParams()
{
this->signature.fft_precision = this->plan->precision;
this->signature.fft_placeness = this->plan->placeness;
this->signature.fft_inputLayout = this->plan->inputLayout;
this->signature.fft_outputLayout = this->plan->outputLayout;
this->signature.fft_3StepTwiddle = false;
this->signature.fft_realSpecial = this->plan->realSpecial;
this->signature.transOutHorizontal = this->plan->transOutHorizontal; // using the twiddle front flag to specify horizontal write
// we do this so as to reuse flags in FFTKernelGenKeyParams
// and to avoid making a new one
ARG_CHECK(this->plan->inStride.size() == this->plan->outStride.size());
if (CLFFT_INPLACE == this->signature.fft_placeness)
{
// If this is an in-place transform the
// input and output layout, dimensions and strides
// *MUST* be the same.
//
ARG_CHECK(this->signature.fft_inputLayout == this->signature.fft_outputLayout)
for (size_t u = this->plan->inStride.size(); u-- > 0; )
{
ARG_CHECK(this->plan->inStride[u] == this->plan->outStride[u]);
}
}
this->signature.fft_DataDim = this->plan->length.size() + 1;
int i = 0;
for (i = 0; i < (this->signature.fft_DataDim - 1); i++)
{
this->signature.fft_N[i] = this->plan->length[i];
this->signature.fft_inStride[i] = this->plan->inStride[i];
this->signature.fft_outStride[i] = this->plan->outStride[i];
}
this->signature.fft_inStride[i] = this->plan->iDist;
this->signature.fft_outStride[i] = this->plan->oDist;
if (this->plan->large1D != 0) {
ARG_CHECK(this->signature.fft_N[0] != 0)
ARG_CHECK((this->plan->large1D % this->signature.fft_N[0]) == 0)
this->signature.fft_3StepTwiddle = true;
ARG_CHECK(this->plan->large1D == (this->signature.fft_N[1] * this->signature.fft_N[0]));
}
// Query the devices in this context for their local memory sizes
// How we generate a kernel depends on the *minimum* LDS size for all devices.
//
const FFTEnvelope * pEnvelope = NULL;
OPENCL_V(this->plan->GetEnvelope(&pEnvelope), _T("GetEnvelope failed"));
BUG_CHECK(NULL != pEnvelope);
// TODO: Since I am going with a 2D workgroup size now, I need a better check than this 1D use
// Check: CL_DEVICE_MAX_WORK_GROUP_SIZE/CL_KERNEL_WORK_GROUP_SIZE
// CL_DEVICE_MAX_WORK_ITEM_SIZES
this->signature.fft_R = 1; // Dont think i'll use
this->signature.fft_SIMD = pEnvelope->limit_WorkGroupSize; // Use devices maximum workgroup size
//Set callback if specified
if (this->plan->hasPreCallback)
{
this->signature.fft_hasPreCallback = true;
this->signature.fft_preCallback = this->plan->preCallback;
}
if (this->plan->hasPostCallback)
{
this->signature.fft_hasPostCallback = true;
this->signature.fft_postCallback = this->plan->postCallbackParam;
}
this->signature.limit_LocalMemSize = this->plan->envelope.limit_LocalMemSize;
this->signature.transposeMiniBatchSize = this->plan->transposeMiniBatchSize;
this->signature.transposeBatchSize = this->plan->batchsize;
return CLFFT_SUCCESS;
}
// OpenCL does not take unicode strings as input, so this routine returns only ASCII strings
// Feed this generator the FFTPlan, and it returns the generated program as a string
clfftStatus FFTGeneratedTransposeSquareAction::generateKernel(FFTRepo& fftRepo, const cl_command_queue commQueueFFT)
{
//Requested local memory size by callback must not exceed the device LDS limits after factoring the LDS size required by main FFT kernel
if ((this->signature.fft_hasPreCallback && this->signature.fft_preCallback.localMemSize > 0) ||
(this->signature.fft_hasPostCallback && this->signature.fft_postCallback.localMemSize > 0))
{
assert(!(this->signature.fft_hasPreCallback && this->signature.fft_hasPostCallback));
bool validLDSSize = false;
size_t requestedCallbackLDS = 0;
if (this->signature.fft_hasPreCallback && this->signature.fft_preCallback.localMemSize > 0)
requestedCallbackLDS = this->signature.fft_preCallback.localMemSize;
else if (this->signature.fft_hasPostCallback && this->signature.fft_postCallback.localMemSize > 0)
requestedCallbackLDS = this->signature.fft_postCallback.localMemSize;
validLDSSize = ((2 * this->plan->ElementSize() * 16 * reShapeFactor * 16 * reShapeFactor) + requestedCallbackLDS) < this->plan->envelope.limit_LocalMemSize;
if (!validLDSSize)
{
fprintf(stderr, "Requested local memory size not available\n");
return CLFFT_INVALID_ARG_VALUE;
}
}
std::string programCode;
OPENCL_V(clfft_transpose_generator::genTransposeKernelBatched(this->signature, programCode, lwSize, reShapeFactor), _T("GenerateTransposeKernel() failed!"));
cl_int status = CL_SUCCESS;
cl_device_id Device = NULL;
status = clGetCommandQueueInfo(commQueueFFT, CL_QUEUE_DEVICE, sizeof(cl_device_id), &Device, NULL);
OPENCL_V(status, _T("clGetCommandQueueInfo failed"));
cl_context QueueContext = NULL;
status = clGetCommandQueueInfo(commQueueFFT, CL_QUEUE_CONTEXT, sizeof(cl_context), &QueueContext, NULL);
OPENCL_V(status, _T("clGetCommandQueueInfo failed"));
OPENCL_V(fftRepo.setProgramCode(Transpose_SQUARE, this->getSignatureData(), programCode, Device, QueueContext), _T("fftRepo.setclString() failed!"));
// Note: See genFunctionPrototype( )
if (this->signature.fft_3StepTwiddle)
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_SQUARE, this->getSignatureData(), "transpose_square_tw_fwd", "transpose_square_tw_back", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
else
{
OPENCL_V(fftRepo.setProgramEntryPoints(Transpose_SQUARE, this->getSignatureData(), "transpose_square", "transpose_square", Device, QueueContext), _T("fftRepo.setProgramEntryPoint() failed!"));
}
return CLFFT_SUCCESS;
}
clfftStatus FFTGeneratedTransposeSquareAction::getWorkSizes(std::vector< size_t >& globalWS, std::vector< size_t >& localWS)
{
size_t wg_slice;
if (this->signature.fft_N[0] % (16 * reShapeFactor) == 0)
wg_slice = this->signature.fft_N[0] / 16 / reShapeFactor;
else
wg_slice = (this->signature.fft_N[0] / (16 * reShapeFactor)) + 1;
size_t global_item_size = wg_slice*(wg_slice + 1) / 2 * 16 * 16 * this->plan->batchsize;
for (int i = 2; i < this->signature.fft_DataDim - 1; i++)
{
global_item_size *= this->signature.fft_N[i];
}
globalWS.clear();
globalWS.push_back(global_item_size);
localWS.clear();
localWS.push_back(lwSize);
return CLFFT_SUCCESS;
}
| 11,352 |
577 | <reponame>santiagojenny6675/PlainReader
//
// PRArticle.h
// PlainReader
//
// Created by guo on 10/24/14.
// Copyright (c) 2014 guojiubo. All rights reserved.
//
#import <Foundation/Foundation.h>
typedef NS_ENUM(NSUInteger, PRArticleCacheStatus) {
PRArticleCacheStatusNone,
PRArticleCacheStatusCached,
PRArticleCacheStatusFailed
};
@interface PRArticle : NSObject
@property (nonatomic, copy) NSNumber *articleId;
@property (nonatomic, copy) NSString *title;
@property (nonatomic, copy) NSString *source;
@property (nonatomic, copy) NSString *summary;
@property (nonatomic, copy) NSString *pubTime;
@property (nonatomic, copy) NSString *content;
@property (nonatomic, copy) NSNumber *commentCount;
@property (nonatomic, copy) NSString *sn;
@property (nonatomic, copy) NSString *thumb;
@property (nonatomic, copy) NSNumber *read;
@property (nonatomic, copy) NSNumber *cacheStatus;
@property (nonatomic, readonly, copy) NSString *formattedTime;
@property (nonatomic, readonly) NSArray *imgSrcs;
- (BOOL)isStarred;
- (NSString *)toHTML;
@end
| 363 |
403 | <reponame>igormskru/code<gh_stars>100-1000
package com.camunda.demo.oop;
import java.io.StringReader;
import java.util.logging.Logger;
import javax.inject.Inject;
import javax.inject.Named;
import javax.xml.bind.JAXB;
import net.webservicex.GlobalWeather;
@Named("weatherAdapter")
public class WeatherAdapter {
private static Logger log = Logger.getLogger(WeatherAdapter.class.getName());
@Inject
@Named
private Order order;
public void getWeather() {
String weatherResponse = new GlobalWeather().getGlobalWeatherSoap().getWeather(order.getCity(), "Germany");
log.info("### retrieved weather response :" + weatherResponse);
WeatherResponse weather = JAXB.unmarshal(new StringReader(weatherResponse), WeatherResponse.class);
order.setWeatherInfo(weather.getTemperature());
}
}
| 289 |
1,032 | <gh_stars>1000+
[{"fonts":[{"asset":"fonts/MaterialIcons-Regular.ttf"}],"family":"MaterialIcons"},{"family":"Roboto","fonts":[{"weight":100,"asset":"fonts/Roboto/Roboto-Thin.ttf"},{"weight":300,"asset":"fonts/Roboto/Roboto-Light.ttf"},{"weight":400,"asset":"fonts/Roboto/Roboto-Regular.ttf"},{"weight":500,"asset":"fonts/Roboto/Roboto-Medium.ttf"},{"weight":700,"asset":"fonts/Roboto/Roboto-Bold.ttf"},{"weight":900,"asset":"fonts/Roboto/Roboto-Black.ttf"}]},{"family":"packages/cupertino_icons/CupertinoIcons","fonts":[{"asset":"packages/cupertino_icons/assets/CupertinoIcons.ttf"}]}] | 215 |
2,502 | <gh_stars>1000+
package com.shuzijun.leetcode.plugin.utils;
import com.github.promeg.pinyinhelper.Pinyin;
import com.shuzijun.leetcode.plugin.model.CodeTypeEnum;
import com.shuzijun.leetcode.plugin.model.Config;
import com.shuzijun.leetcode.plugin.model.Constant;
import com.shuzijun.leetcode.plugin.setting.PersistentConfig;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateFormatUtils;
import java.util.Date;
/**
* Provide static tool class, StringUtils document reference <a href="https://commons.apache.org/proper/commons-lang/apidocs/org/apache/commons/lang3/StringUtils.html">doc</a><br>
* 提供的静态工具类,StringUtils文档参考<a href="https://commons.apache.org/proper/commons-lang/apidocs/org/apache/commons/lang3/StringUtils.html">doc</a>
*
* @author shuzijun
*/
public class VelocityTool extends StringUtils {
private static String[] numsAry = {"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"};
/**
* Fill 0 on the left to reach a fixed length <br>
* 在左侧填充0达到固定长度length
*
* @param s
* @param length
* @return
*/
public static String leftPadZeros(String s, int length) {
if (s.length() >= length) {
return s;
}
int nPads = length - s.length();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < nPads; ++i) {
sb.append('0');
}
sb.append(s);
return sb.toString();
}
/**
* Convert characters to camel case (initial letter capitalized) <br>
* 转换字符为驼峰样式(开头字母大写)
*
* @param underscoreName
* @return
*/
public static String camelCaseName(String underscoreName) {
if (isNotBlank(underscoreName)) {
underscoreName = underscoreName.replace(" ", "_");
StringBuilder result = new StringBuilder();
if (isNumeric(underscoreName.substring(0, 1))) {
underscoreName = numsAry[Integer.valueOf(underscoreName.substring(0, 1))] + "-" + underscoreName.substring(1);
}
boolean first = true;
boolean flag = false;
for (int i = 0; i < underscoreName.length(); i++) {
char ch = underscoreName.charAt(i);
if ('_' == ch || '-' == ch) {
flag = true;
} else {
if (flag || first) {
result.append(Character.toUpperCase(ch));
flag = false;
first = false;
} else {
result.append(ch);
}
}
}
return result.toString();
} else {
return underscoreName;
}
}
/**
* Convert characters to camel case (lower case at the beginning) <br>
* 转换字符为小驼峰样式(开头字母小写)
*
* @param underscoreName
* @return
*/
public static String smallCamelCaseName(String underscoreName) {
if (isNotBlank(underscoreName)) {
underscoreName = underscoreName.replace(" ", "_");
StringBuilder result = new StringBuilder();
if (isNumeric(underscoreName.substring(0, 1))) {
underscoreName = numsAry[Integer.valueOf(underscoreName.substring(0, 1))] + "-" + underscoreName.substring(1);
}
boolean first = false;
boolean flag = false;
for (int i = 0; i < underscoreName.length(); i++) {
char ch = underscoreName.charAt(i);
if ('_' == ch || '-' == ch) {
flag = true;
} else {
if (flag || first) {
result.append(Character.toUpperCase(ch));
flag = false;
first = false;
} else {
result.append(ch);
}
}
}
return result.toString();
} else {
return underscoreName;
}
}
/**
* Convert characters to snake style <br>
* 转换字符为蛇形样式
*
* @param underscoreName
* @return
*/
public static String snakeCaseName(String underscoreName) {
if (isNotBlank(underscoreName)) {
underscoreName = underscoreName.replace(" ", "_");
StringBuilder result = new StringBuilder();
for (int i = 0, j = underscoreName.length(); i < j; i++) {
char ch = underscoreName.charAt(i);
if ('_' == ch || '-' == ch) {
if (i + 1 < j) {
result.append("_").append(Character.toLowerCase(underscoreName.charAt(i + 1)));
i = i + 1;
}
} else if (Character.isUpperCase(ch)) {
result.append("_").append(Character.toLowerCase(underscoreName.charAt(i)));
} else {
result.append(ch);
}
}
return result.toString();
} else {
return underscoreName;
}
}
/**
* Get the current time. <br>
* 获取当前时间
*
* @return
*/
public static String date() {
return date("yyyy-MM-dd HH:mm:ss");
}
/**
* Get the current time. <br>
* 获取当前时间
*
* @return
*/
public static String date(String format) {
return DateFormatUtils.format(new Date(), format);
}
/**
* Get start tag <br>
* 获取开始标记
*
* @return
*/
public static String SUBMIT_REGION_BEGIN() {
Config config = PersistentConfig.getInstance().getInitConfig();
String codeType = config.getCodeType();
CodeTypeEnum codeTypeEnum = CodeTypeEnum.getCodeTypeEnum(codeType);
return codeTypeEnum.getComment() + Constant.SUBMIT_REGION_BEGIN;
}
/**
* Get eng tag <br>
* 获取结束标记
*
* @return
*/
public static String SUBMIT_REGION_END() {
Config config = PersistentConfig.getInstance().getInitConfig();
String codeType = config.getCodeType();
CodeTypeEnum codeTypeEnum = CodeTypeEnum.getCodeTypeEnum(codeType);
return codeTypeEnum.getComment() + Constant.SUBMIT_REGION_END;
}
/**
* Convert Chinese characters to Pinyin and remove all spaces<br>
* 将汉字转为为拼音并去除所有空格
*
* @param str
* @return
*/
public static String toPinyinAndTrims(String str) {
if (isBlank(str)) {
return "";
}
str = replace(str, " ", "");
StringBuilder sb = new StringBuilder();
for (char c : str.toCharArray()) {
if (Pinyin.isChinese(c)) {
String pinYin = Pinyin.toPinyin(c);
sb.append(camelCaseName(pinYin.toLowerCase()));
} else {
sb.append(c);
}
}
return sb.toString();
}
}
| 3,633 |
5,169 | {
"name": "KNMExternalViewController",
"version": "1.0.0",
"summary": "A view controller that loads its contents from another storyboard.",
"description": "Use this view controller when you would like to split your project into multiple storyboards.\nPut the view controller into your main storyboard and specify the name of the storyboard and\nscene you would like to jump to when loading the view controller. KNMExternalViewController\nthen automatically loads the contents when shown on the screen.\n",
"homepage": "https://github.com/konoma/ios-external-view-controller",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "7.0"
},
"source": {
"git": "https://github.com/konoma/ios-external-view-controller.git",
"tag": "1.0.0"
},
"source_files": "Sources/**/*.{h,m}",
"public_header_files": "Sources/**/*.h",
"ios": {
"frameworks": [
"Foundation",
"UIKit"
]
},
"requires_arc": true
}
| 360 |
4,812 | <filename>lib/CodeGen/MachineRegionInfo.cpp
//===- lib/Codegen/MachineRegionInfo.cpp ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineRegionInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/RegionInfoImpl.h"
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Pass.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "machine-region-info"
using namespace llvm;
STATISTIC(numMachineRegions, "The # of machine regions");
STATISTIC(numMachineSimpleRegions, "The # of simple machine regions");
namespace llvm {
template class RegionBase<RegionTraits<MachineFunction>>;
template class RegionNodeBase<RegionTraits<MachineFunction>>;
template class RegionInfoBase<RegionTraits<MachineFunction>>;
} // end namespace llvm
//===----------------------------------------------------------------------===//
// MachineRegion implementation
MachineRegion::MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
MachineRegionInfo* RI,
MachineDominatorTree *DT, MachineRegion *Parent) :
RegionBase<RegionTraits<MachineFunction>>(Entry, Exit, RI, DT, Parent) {}
MachineRegion::~MachineRegion() = default;
//===----------------------------------------------------------------------===//
// MachineRegionInfo implementation
MachineRegionInfo::MachineRegionInfo() = default;
MachineRegionInfo::~MachineRegionInfo() = default;
void MachineRegionInfo::updateStatistics(MachineRegion *R) {
++numMachineRegions;
// TODO: Slow. Should only be enabled if -stats is used.
if (R->isSimple())
++numMachineSimpleRegions;
}
void MachineRegionInfo::recalculate(MachineFunction &F,
MachineDominatorTree *DT_,
MachinePostDominatorTree *PDT_,
MachineDominanceFrontier *DF_) {
DT = DT_;
PDT = PDT_;
DF = DF_;
MachineBasicBlock *Entry = GraphTraits<MachineFunction*>::getEntryNode(&F);
TopLevelRegion = new MachineRegion(Entry, nullptr, this, DT, nullptr);
updateStatistics(TopLevelRegion);
calculate(F);
}
//===----------------------------------------------------------------------===//
// MachineRegionInfoPass implementation
//
MachineRegionInfoPass::MachineRegionInfoPass() : MachineFunctionPass(ID) {
initializeMachineRegionInfoPassPass(*PassRegistry::getPassRegistry());
}
MachineRegionInfoPass::~MachineRegionInfoPass() = default;
bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
releaseMemory();
auto DT = &getAnalysis<MachineDominatorTree>();
auto PDT = &getAnalysis<MachinePostDominatorTree>();
auto DF = &getAnalysis<MachineDominanceFrontier>();
RI.recalculate(F, DT, PDT, DF);
LLVM_DEBUG(RI.dump());
return false;
}
void MachineRegionInfoPass::releaseMemory() {
RI.releaseMemory();
}
void MachineRegionInfoPass::verifyAnalysis() const {
// Only do verification when user wants to, otherwise this expensive check
// will be invoked by PMDataManager::verifyPreservedAnalysis when
// a regionpass (marked PreservedAll) finish.
if (MachineRegionInfo::VerifyRegionInfo)
RI.verifyAnalysis();
}
void MachineRegionInfoPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachinePostDominatorTree>();
AU.addRequired<MachineDominanceFrontier>();
MachineFunctionPass::getAnalysisUsage(AU);
}
void MachineRegionInfoPass::print(raw_ostream &OS, const Module *) const {
RI.print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineRegionInfoPass::dump() const {
RI.dump();
}
#endif
char MachineRegionInfoPass::ID = 0;
char &MachineRegionInfoPassID = MachineRegionInfoPass::ID;
INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, DEBUG_TYPE,
"Detect single entry single exit regions", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
INITIALIZE_PASS_END(MachineRegionInfoPass, DEBUG_TYPE,
"Detect single entry single exit regions", true, true)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
// the link time optimization.
namespace llvm {
FunctionPass *createMachineRegionInfoPass() {
return new MachineRegionInfoPass();
}
} // end namespace llvm
| 1,554 |
9,959 | <gh_stars>1000+
@lombok.EqualsAndHashCode(onlyExplicitlyIncluded = true)
class EqualsAndHashCodeExplicitInclude {
int x;
}
| 48 |
902 | <reponame>lxmwq/apidoc
# SPDX-License-Identifier: MIT
const x = "//\""
### line1
const y = "/**\""
const c = 'c'
"""
line1
line2
line3
"""
| 77 |
390 | {
"header": [
{ "title": "Getting started", "url": "/#getting-started", "blank": false },
{ "title": "Documentation", "url": "/documentation.html", "blank": false },
{ "title": "Examples", "url": "/examples.html", "blank": false }
],
"footer": [
{ "title": "Documentation", "url": "/documentation.html" },
{ "title": "Project on Github", "url": "https://github.com/max-barry/google-drive-cms", "blank": true },
{ "title": "More by <EMAIL>", "url": "https://mxbry.com", "blank": true }
]
}
| 226 |
14,668 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
#define MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
#include <alsa/asoundlib.h>
#include <string>
#include "media/base/media_export.h"
namespace media {
class AlsaWrapper;
}
namespace alsa_util {
// When opening ALSA devices, |period_us| is the size of a packet and
// |buffer_us| is the size of the ring buffer, which consists of multiple
// packets. In capture devices, the latency relies more on |period_us|, and thus
// one may require more details upon the value implicitly set by ALSA.
MEDIA_EXPORT snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
const char* device_name,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
int buffer_us,
int period_us);
snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
const char* device_name,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
int buffer_us);
int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle);
snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
const std::string& device_name);
void CloseMixer(media::AlsaWrapper* wrapper,
snd_mixer_t* mixer,
const std::string& device_name);
snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
snd_mixer_t* mixer);
} // namespace alsa_util
#endif // MEDIA_AUDIO_ALSA_ALSA_UTIL_H_
| 1,005 |
2,842 | /* Copyright (c) 2018-2020, Arm Limited and Contributors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 the "License";
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <json.hpp>
namespace vkb
{
namespace graphing
{
class Node
{
public:
Node(){};
Node(size_t id, const char *title, const char *style = NULL, const nlohmann::json &data = {});
template <typename T>
static std::uintptr_t handle_to_uintptr_t(T handle)
{
return reinterpret_cast<std::uintptr_t>(reinterpret_cast<void *>(handle));
}
nlohmann::json attributes;
};
} // namespace graphing
} // namespace vkb | 352 |
305 | <gh_stars>100-1000
from __future__ import division
from builtins import range
from past.utils import old_div
def bokeh_plot(u, t, legends, u_e, t_e, I, w, t_range, filename):
"""
Make plots for u vs t using the Bokeh library.
u and t are lists (several experiments can be compared).
legends contain legend strings for the various u,t pairs.
Each plot has u vs t and the exact solution u_e vs t_e.
"""
import numpy as np
import bokeh.plotting as plt
plt.output_file(filename, mode='cdn', title='Comparison')
# Assume that all t arrays have the same range
t_fine = np.linspace(0, t[0][-1], 1001) # fine mesh for u_e
tools = 'pan,wheel_zoom,box_zoom,reset,'\
'save,box_select,lasso_select'
u_range = [-1.2*I, 1.2*I]
font_size = '8pt'
p = [] # list of all individual plots
p_ = plt.figure(
width=300, plot_height=250, title=legends[0],
x_axis_label='t', y_axis_label='u',
x_range=t_range, y_range=u_range, tools=tools,
title_text_font_size=font_size)
p_.xaxis.axis_label_text_font_size=font_size
p_.yaxis.axis_label_text_font_size=font_size
p_.line(t[0], u[0], line_color='blue')
p_.line(t_e, u_e, line_color='red', line_dash='4 4')
p.append(p_)
for i in range(1, len(t)):
p_ = plt.figure(
width=300, plot_height=250, title=legends[i],
x_axis_label='t', y_axis_label='u',
x_range=p[0].x_range, y_range=p[0].y_range, tools=tools,
title_text_font_size=font_size)
p_.xaxis.axis_label_text_font_size=font_size
p_.yaxis.axis_label_text_font_size=font_size
p_.line(t[i], u[i], line_color='blue')
p_.line(t_e, u_e, line_color='red', line_dash='4 4')
p.append(p_)
# Arrange in grid with 3 plots per row
grid = [[]]
for i, p_ in enumerate(p):
grid[-1].append(p_)
if (i+1) % 3 == 0:
# New row
grid.append([])
plot = plt.gridplot(grid, toolbar_location='left')
plt.save(plot)
plt.show(plot)
def demo_bokeh():
"""Plot numerical and exact solution of sinousoidal shape."""
import numpy as np
def u_exact(t):
return I*np.cos(w*t)
def u_numerical(t):
w_tilde = (old_div(2.,dt))*np.arcsin(w*dt/2.)
return I*np.cos(w_tilde*t)
I = 1 # Amplitude
w = 1.0 # Angular frequency
P = 2*np.pi/w # Period of signal
num_steps_per_period = [5, 10, 20, 40, 80]
num_periods = 40
T = num_periods*P # End time of signal
t_e = np.linspace(0, T, 1001) # Fine mesh for u_exact
u_e = u_exact(t_e)
u = []
t = []
legends = []
# Make a series of numerical solutions with different time steps
for n in num_steps_per_period:
dt = old_div(P,n) # Time step length
t_ = np.linspace(0, T, num_periods*n+1)
u_ = u_numerical(t_)
u.append(u_)
t.append(t_)
legends.append('# time steps per period: %d' % n)
bokeh_plot(u, t, legends, u_e, t_e,
I=1, w=w, t_range=[0, 4*P],
filename='tmp.html')
demo_bokeh()
| 1,543 |
2,605 | import contextlib
import json
import os
from colorama import Fore
import fasttext
from plugin import plugin
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
@plugin("detect lang")
def detect_language(jarvis, s):
"""
Detects the language of an input string
"""
while s == "":
s = jarvis.input("Enter text \n")
model = open_model()
output = model.predict(s)
generate_response(jarvis, output)
def generate_response(jarvis, output):
"""
Generates response based on the probability of a predicted language
"""
score = output[1][0]
label = output[0][0]
code_to_lang = open_languages()
lang_code = label.split('_')[-1]
language = code_to_lang[lang_code]
if score > 0.5:
jarvis.say('The language of the text is ' + language, Fore.GREEN)
elif score > 0.25:
jarvis.say("I'm not sure, but the language might be " + language, Fore.YELLOW)
else:
jarvis.say("I couldn't identify the language", Fore.BLUE)
def open_model():
"""
Opens a language detector model and disables warnings
"""
model_path = os.path.join(FILE_PATH, "../data/lid.176.ftz")
fasttext.FastText.eprint = print
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
model = fasttext.load_model(model_path)
return model
def open_languages():
"""
Opens a dictionary from code to its corresponding language
"""
language_path = os.path.join(FILE_PATH, "../data/code_to_lang.json")
with open(language_path, 'r') as f:
languages = json.load(f)
return languages
| 615 |
539 | <filename>booknlp/common/sequence_layered_reader.py
import os
def read_tagset(filename):
tags={}
with open(filename) as file:
for line in file:
cols=line.rstrip().split("\t")
tags[cols[0]]=int(cols[1])
return tags
def read_filenames(filename):
inpaths=[]
outpaths=[]
with open(filename) as file:
for line in file:
cols=line.rstrip().split("\t")
if len(cols) == 2:
inpaths.append(cols[0])
outpaths.append(cols[1])
return inpaths, outpaths
def read_booknlp(path, model):
sentences=[]
original_sentences=[]
sentence=[]
sentence.append(["[CLS]"])
orig_sentence=[]
orig_sentence.append(("[CLS]", -1))
length=0
max_sentence_length=500
with open(path, encoding="utf-8") as file:
header=file.readline().split("\t")
s_idx=header.index("sentenceID")
t_idx=header.index("tokenId")
w_idx=header.index("originalWord")
lastSentence=None
for line in file:
cols=line.rstrip().split("\t")
s_id=cols[s_idx]
t_id=cols[t_idx]
w=cols[w_idx]
toks=model.tokenizer.tokenize(w)
if s_id != lastSentence or length + len(toks) > max_sentence_length:
if len(sentence) > 0:
sentence.append(["[SEP]"])
sentences.append(sentence)
orig_sentence.append((-1, "[SEP]"))
original_sentences.append(orig_sentence)
sentence=[]
sentence.append(["[CLS]"])
orig_sentence=[]
orig_sentence.append((-1, "[CLS]"))
length=0
length+=len(toks)
sentence.append([w])
orig_sentence.append((t_id, w))
lastSentence=s_id
if len(sentence) > 1:
sentence.append(["[SEP]"])
sentences.append(sentence)
orig_sentence.append((-1, "[SEP]"))
original_sentences.append(orig_sentence)
return sentences, original_sentences
def read_annotations(filename, tagset, labeled):
""" Read tsv data and return sentences and [word, tag, sentenceID, filename] list """
with open(filename, encoding="utf-8") as f:
sentence = []
sentence.append(["[CLS]", -100, -100, -100, -100, -100, -100 -1, -1, None])
sentences = []
sentenceID=0
for line in f:
if len(line) > 0:
if line == '\n':
sentenceID+=1
sentence.append(["[SEP]", -100, -100, -100, -100, -100, -100 -1, -1, None])
if len(sentence) > 2:
sentences.append(sentence)
sentence = []
sentence.append(["[CLS]", -100, -100, -100, -100, -100, -100 -1, -1, None])
else:
data=[]
split_line = line.rstrip().split('\t')
data.append(split_line[0])
data.append(tagset[split_line[1]] if labeled else 0)
data.append(tagset[split_line[2]] if labeled else 0)
data.append(tagset[split_line[3]] if labeled else 0)
data.append(tagset[split_line[4]] if labeled else 0)
data.append(tagset[split_line[5]] if labeled else 0)
data.append(sentenceID)
data.append(filename)
sentence.append(data)
sentence.append(["[SEP]", -100, -100, -100, -100, -100, -100 -1, -1, None])
if len(sentence) > 2:
sentences.append(sentence)
return sentences
def prepare_annotations_from_file(filename, tagset, labeled=True):
""" Read a single file of annotations, returning:
-- a list of sentences, each a list of [word, label, sentenceID, filename]
"""
sentences = []
annotations = read_annotations(filename, tagset, labeled)
sentences += annotations
return sentences
def prepare_annotations_from_folder(folder, tagset, labeled=True):
""" Read folder of annotations, returning:
-- a list of sentences, each a list of [word, label, sentenceID, filename]
"""
sentences = []
for filename in os.listdir(folder):
print(filename)
annotations = read_annotations(os.path.join(folder,filename), tagset, labeled)
sentences += annotations
print("num sentences: %s" % len(sentences))
return sentences
| 1,589 |
310 | <filename>gear/software/b/beepbox.json<gh_stars>100-1000
{
"name": "BeepBox",
"description": "A web-based musical creation toy.",
"url": "https://beepbox.co/"
}
| 65 |
713 | <reponame>pferraro/infinispan
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.configuration.TimestampColumnConfiguration.TIMESTAMP_COLUMN_NAME;
import static org.infinispan.persistence.jdbc.configuration.TimestampColumnConfiguration.TIMESTAMP_COLUMN_TYPE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class TimestampColumnConfigurationBuilder implements Builder<TimestampColumnConfiguration> {
private final AttributeSet attributes;
TimestampColumnConfigurationBuilder() {
attributes = TimestampColumnConfiguration.attributeSet();
}
public TimestampColumnConfigurationBuilder dataColumnName(String dataColumnName) {
attributes.attribute(TIMESTAMP_COLUMN_NAME).set(dataColumnName);
return this;
}
public TimestampColumnConfigurationBuilder dataColumnType(String dataColumnType) {
attributes.attribute(TIMESTAMP_COLUMN_TYPE).set(dataColumnType);
return this;
}
@Override
public void validate() {
TableManipulationConfigurationBuilder.validateIfSet(attributes, TIMESTAMP_COLUMN_NAME, TIMESTAMP_COLUMN_TYPE);
}
@Override
public TimestampColumnConfiguration create() {
return new TimestampColumnConfiguration(attributes.protect());
}
@Override
public Builder<?> read(TimestampColumnConfiguration template) {
attributes.read(template.attributes());
return this;
}
}
| 464 |
335 | <gh_stars>100-1000
{
"word": "Fling",
"definitions": [
"Throw or hurl forcefully.",
"Move or push (something) suddenly or violently.",
"Start or engage in (an activity or enterprise) with great energy and enthusiasm.",
"Go quickly and angrily."
],
"parts-of-speech": "Verb"
} | 125 |
3,531 | <filename>src/extra/libs/png/lv_png.h
/**
* @file lv_png.h
*
*/
#ifndef LV_PNG_H
#define LV_PNG_H
#ifdef __cplusplus
extern "C" {
#endif
/*********************
* INCLUDES
*********************/
#include "../../../lv_conf_internal.h"
#if LV_USE_PNG
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* GLOBAL PROTOTYPES
**********************/
/**
* Register the PNG decoder functions in LVGL
*/
void lv_png_init(void);
/**********************
* MACROS
**********************/
#endif /*LV_USE_PNG*/
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /*LV_PNG_H*/
| 276 |
765 | <reponame>dan-zheng/llvm-project
//===-- RNBServices.cpp -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Created by <NAME> on 3/21/08.
//
//===----------------------------------------------------------------------===//
#include "RNBServices.h"
#include "CFString.h"
#include "DNBLog.h"
#include "MacOSX/CFUtils.h"
#include <CoreFoundation/CoreFoundation.h>
#include <libproc.h>
#include <sys/sysctl.h>
#include <unistd.h>
#include <vector>
// For now only SpringBoard has a notion of "Applications" that it can list for
// us.
// So we have to use the SpringBoard API's here.
#if defined(WITH_SPRINGBOARD) || defined(WITH_BKS)
#include <SpringBoardServices/SpringBoardServices.h>
#endif
// From DNB.cpp
size_t GetAllInfos(std::vector<struct kinfo_proc> &proc_infos);
int GetProcesses(CFMutableArrayRef plistMutableArray, bool all_users) {
if (plistMutableArray == NULL)
return -1;
// Running as root, get all processes
std::vector<struct kinfo_proc> proc_infos;
const size_t num_proc_infos = GetAllInfos(proc_infos);
if (num_proc_infos > 0) {
const pid_t our_pid = getpid();
const uid_t our_uid = getuid();
uint32_t i;
CFAllocatorRef alloc = kCFAllocatorDefault;
for (i = 0; i < num_proc_infos; i++) {
struct kinfo_proc &proc_info = proc_infos[i];
bool kinfo_user_matches;
// Special case, if lldb is being run as root we can attach to anything.
if (all_users)
kinfo_user_matches = true;
else
kinfo_user_matches = proc_info.kp_eproc.e_pcred.p_ruid == our_uid;
const pid_t pid = proc_info.kp_proc.p_pid;
// Skip zombie processes and processes with unset status
if (!kinfo_user_matches || // User is acceptable
pid == our_pid || // Skip this process
pid == 0 || // Skip kernel (kernel pid is zero)
proc_info.kp_proc.p_stat ==
SZOMB || // Zombies are bad, they like brains...
proc_info.kp_proc.p_flag & P_TRACED || // Being debugged?
proc_info.kp_proc.p_flag & P_WEXIT || // Working on exiting?
proc_info.kp_proc.p_flag &
P_TRANSLATED) // Skip translated ppc (Rosetta)
continue;
// Create a new mutable dictionary for each application
CFReleaser<CFMutableDictionaryRef> appInfoDict(
::CFDictionaryCreateMutable(alloc, 0, &kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
// Get the process id for the app (if there is one)
const int32_t pid_int32 = pid;
CFReleaser<CFNumberRef> pidCFNumber(
::CFNumberCreate(alloc, kCFNumberSInt32Type, &pid_int32));
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_PID_KEY,
pidCFNumber.get());
// Set a boolean to indicate if this is the front most
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_FRONTMOST_KEY,
kCFBooleanFalse);
const char *pid_basename = proc_info.kp_proc.p_comm;
char proc_path_buf[PATH_MAX];
int return_val = proc_pidpath(pid, proc_path_buf, PATH_MAX);
if (return_val > 0) {
// Okay, now search backwards from that to see if there is a
// slash in the name. Note, even though we got all the args we don't
// care
// because the list data is just a bunch of concatenated null terminated
// strings
// so strrchr will start from the end of argv0.
pid_basename = strrchr(proc_path_buf, '/');
if (pid_basename) {
// Skip the '/'
++pid_basename;
} else {
// We didn't find a directory delimiter in the process argv[0], just
// use what was in there
pid_basename = proc_path_buf;
}
CFString cf_pid_path(proc_path_buf);
if (cf_pid_path.get())
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_PATH_KEY,
cf_pid_path.get());
}
if (pid_basename && pid_basename[0]) {
CFString pid_name(pid_basename);
::CFDictionarySetValue(appInfoDict.get(),
DTSERVICES_APP_DISPLAY_NAME_KEY, pid_name.get());
}
// Append the application info to the plist array
::CFArrayAppendValue(plistMutableArray, appInfoDict.get());
}
}
return 0;
}
int ListApplications(std::string &plist, bool opt_runningApps,
bool opt_debuggable) {
int result = -1;
CFAllocatorRef alloc = kCFAllocatorDefault;
// Create a mutable array that we can populate. Specify zero so it can be of
// any size.
CFReleaser<CFMutableArrayRef> plistMutableArray(
::CFArrayCreateMutable(alloc, 0, &kCFTypeArrayCallBacks));
const uid_t our_uid = getuid();
#if defined(WITH_SPRINGBOARD) || defined(WITH_BKS)
if (our_uid == 0) {
bool all_users = true;
result = GetProcesses(plistMutableArray.get(), all_users);
} else {
CFReleaser<CFStringRef> sbsFrontAppID(
::SBSCopyFrontmostApplicationDisplayIdentifier());
CFReleaser<CFArrayRef> sbsAppIDs(::SBSCopyApplicationDisplayIdentifiers(
opt_runningApps, opt_debuggable));
// Need to check the return value from SBSCopyApplicationDisplayIdentifiers.
CFIndex count = sbsAppIDs.get() ? ::CFArrayGetCount(sbsAppIDs.get()) : 0;
CFIndex i = 0;
for (i = 0; i < count; i++) {
CFStringRef displayIdentifier =
(CFStringRef)::CFArrayGetValueAtIndex(sbsAppIDs.get(), i);
// Create a new mutable dictionary for each application
CFReleaser<CFMutableDictionaryRef> appInfoDict(
::CFDictionaryCreateMutable(alloc, 0, &kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
// Get the process id for the app (if there is one)
pid_t pid = INVALID_NUB_PROCESS;
if (::SBSProcessIDForDisplayIdentifier((CFStringRef)displayIdentifier,
&pid) == true) {
CFReleaser<CFNumberRef> pidCFNumber(
::CFNumberCreate(alloc, kCFNumberSInt32Type, &pid));
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_PID_KEY,
pidCFNumber.get());
}
// Set a boolean to indicate if this is the front most
if (sbsFrontAppID.get() && displayIdentifier &&
(::CFStringCompare(sbsFrontAppID.get(), displayIdentifier, 0) ==
kCFCompareEqualTo))
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_FRONTMOST_KEY,
kCFBooleanTrue);
else
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_FRONTMOST_KEY,
kCFBooleanFalse);
CFReleaser<CFStringRef> executablePath(
::SBSCopyExecutablePathForDisplayIdentifier(displayIdentifier));
if (executablePath.get() != NULL) {
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_PATH_KEY,
executablePath.get());
}
CFReleaser<CFStringRef> iconImagePath(
::SBSCopyIconImagePathForDisplayIdentifier(displayIdentifier));
if (iconImagePath.get() != NULL) {
::CFDictionarySetValue(appInfoDict.get(), DTSERVICES_APP_ICON_PATH_KEY,
iconImagePath.get());
}
CFReleaser<CFStringRef> localizedDisplayName(
::SBSCopyLocalizedApplicationNameForDisplayIdentifier(
displayIdentifier));
if (localizedDisplayName.get() != NULL) {
::CFDictionarySetValue(appInfoDict.get(),
DTSERVICES_APP_DISPLAY_NAME_KEY,
localizedDisplayName.get());
}
// Append the application info to the plist array
::CFArrayAppendValue(plistMutableArray.get(), appInfoDict.get());
}
}
#else // #if defined (WITH_SPRINGBOARD) || defined (WITH_BKS)
// When root, show all processes
bool all_users = (our_uid == 0);
GetProcesses(plistMutableArray.get(), all_users);
#endif
CFReleaser<CFDataRef> plistData(
::CFPropertyListCreateXMLData(alloc, plistMutableArray.get()));
// write plist to service port
if (plistData.get() != NULL) {
CFIndex size = ::CFDataGetLength(plistData.get());
const UInt8 *bytes = ::CFDataGetBytePtr(plistData.get());
if (bytes != NULL && size > 0) {
plist.assign((const char *)bytes, size);
return 0; // Success
} else {
DNBLogError("empty application property list.");
result = -2;
}
} else {
DNBLogError("serializing task list.");
result = -3;
}
return result;
}
| 3,886 |
625 | #include "const_fold.h"
#include <cmath>
#include "sir/util/cloning.h"
#include "sir/util/irtools.h"
#define BINOP(o) \
[](auto x, auto y) -> auto { return x o y; }
#define UNOP(o) \
[](auto x) -> auto { return o x; }
namespace seq {
namespace ir {
namespace transform {
namespace folding {
namespace {
template <typename Func, typename Out> class IntFloatBinaryRule : public RewriteRule {
private:
Func f;
std::string magic;
types::Type *out;
public:
IntFloatBinaryRule(Func f, std::string magic, types::Type *out)
: f(std::move(f)), magic(std::move(magic)), out(out) {}
virtual ~IntFloatBinaryRule() noexcept = default;
void visit(CallInstr *v) override {
if (!util::isCallOf(v, magic, 2, /*output=*/nullptr, /*method=*/true))
return;
auto *leftConst = cast<Const>(v->front());
auto *rightConst = cast<Const>(v->back());
if (!leftConst || !rightConst)
return;
auto *M = v->getModule();
if (isA<FloatConst>(leftConst) && isA<IntConst>(rightConst)) {
auto left = cast<FloatConst>(leftConst)->getVal();
auto right = cast<IntConst>(rightConst)->getVal();
return setResult(M->template N<TemplatedConst<Out>>(v->getSrcInfo(),
f(left, (double)right), out));
} else if (isA<IntConst>(leftConst) && isA<FloatConst>(rightConst)) {
auto left = cast<IntConst>(leftConst)->getVal();
auto right = cast<FloatConst>(rightConst)->getVal();
return setResult(M->template N<TemplatedConst<Out>>(v->getSrcInfo(),
f((double)left, right), out));
}
}
};
/// Binary rule that requires two constants.
template <typename ConstantType, typename Func, typename OutputType = ConstantType>
class DoubleConstantBinaryRuleExcludeRHSZero
: public DoubleConstantBinaryRule<ConstantType, Func, OutputType> {
public:
DoubleConstantBinaryRuleExcludeRHSZero(Func f, std::string magic,
types::Type *inputType,
types::Type *resultType)
: DoubleConstantBinaryRule<ConstantType, Func, OutputType>(f, magic, inputType,
resultType) {}
virtual ~DoubleConstantBinaryRuleExcludeRHSZero() noexcept = default;
void visit(CallInstr *v) override {
if (v->numArgs() == 2) {
auto *rightConst = cast<TemplatedConst<ConstantType>>(v->back());
if (rightConst && rightConst->getVal() == ConstantType())
return;
}
DoubleConstantBinaryRule<ConstantType, Func, OutputType>::visit(v);
}
};
auto id_val(Module *m) {
return [=](Value *v) -> Value * {
util::CloneVisitor cv(m);
return cv.clone(v);
};
}
int64_t int_pow(int64_t base, int64_t exp) {
if (exp < 0)
return 0;
int64_t result = 1;
while (true) {
if (exp & 1) {
result *= base;
}
exp = exp >> 1;
if (!exp)
break;
base = base * base;
}
return result;
}
template <typename From, typename To> To convert(From x) { return To(x); }
template <typename... Args> auto intSingleRule(Module *m, Args &&...args) {
return std::make_unique<SingleConstantCommutativeRule<int64_t>>(
std::forward<Args>(args)..., m->getIntType());
}
auto intNoOp(Module *m, std::string magic) {
return std::make_unique<NoOpRule>(std::move(magic), m->getIntType());
}
auto intDoubleApplyNoOp(Module *m, std::string magic) {
return std::make_unique<DoubleApplicationNoOpRule>(std::move(magic), m->getIntType());
}
template <typename Func> auto intToIntBinary(Module *m, Func f, std::string magic) {
return std::make_unique<DoubleConstantBinaryRule<int64_t, Func, int64_t>>(
std::move(f), std::move(magic), m->getIntType(), m->getIntType());
}
template <typename Func>
auto intToIntBinaryNoZeroRHS(Module *m, Func f, std::string magic) {
return std::make_unique<
DoubleConstantBinaryRuleExcludeRHSZero<int64_t, Func, int64_t>>(
std::move(f), std::move(magic), m->getIntType(), m->getIntType());
}
template <typename Func> auto intToBoolBinary(Module *m, Func f, std::string magic) {
return std::make_unique<DoubleConstantBinaryRule<int64_t, Func, bool>>(
std::move(f), std::move(magic), m->getIntType(), m->getBoolType());
}
template <typename Func> auto boolToBoolBinary(Module *m, Func f, std::string magic) {
return std::make_unique<DoubleConstantBinaryRule<bool, Func, bool>>(
std::move(f), std::move(magic), m->getBoolType(), m->getBoolType());
}
template <typename Func> auto floatToFloatBinary(Module *m, Func f, std::string magic) {
return std::make_unique<DoubleConstantBinaryRule<double, Func, double>>(
std::move(f), std::move(magic), m->getFloatType(), m->getFloatType());
}
template <typename Func> auto floatToBoolBinary(Module *m, Func f, std::string magic) {
return std::make_unique<DoubleConstantBinaryRule<double, Func, bool>>(
std::move(f), std::move(magic), m->getFloatType(), m->getBoolType());
}
template <typename Func>
auto intFloatToFloatBinary(Module *m, Func f, std::string magic) {
return std::make_unique<IntFloatBinaryRule<Func, double>>(
std::move(f), std::move(magic), m->getFloatType());
}
template <typename Func>
auto intFloatToBoolBinary(Module *m, Func f, std::string magic) {
return std::make_unique<IntFloatBinaryRule<Func, bool>>(
std::move(f), std::move(magic), m->getBoolType());
}
template <typename Func> auto intToIntUnary(Module *m, Func f, std::string magic) {
return std::make_unique<SingleConstantUnaryRule<int64_t, Func>>(
std::move(f), std::move(magic), m->getIntType(), m->getIntType());
}
template <typename Func> auto floatToFloatUnary(Module *m, Func f, std::string magic) {
return std::make_unique<SingleConstantUnaryRule<double, Func>>(
std::move(f), std::move(magic), m->getFloatType(), m->getFloatType());
}
template <typename Func> auto boolToBoolUnary(Module *m, Func f, std::string magic) {
return std::make_unique<SingleConstantUnaryRule<bool, Func>>(
std::move(f), std::move(magic), m->getBoolType(), m->getBoolType());
}
auto identityConvert(Module *m, std::string magic, types::Type *type) {
return std::make_unique<UnaryRule<decltype(id_val(m))>>(id_val(m), std::move(magic),
type);
}
template <typename From, typename To>
auto typeConvert(Module *m, std::string magic, types::Type *fromType,
types::Type *toType) {
return std::make_unique<
SingleConstantUnaryRule<From, std::function<decltype(convert<From, To>)>>>(
convert<From, To>, std::move(magic), fromType, toType);
}
} // namespace
const std::string FoldingPass::KEY = "core-folding-const-fold";
void FoldingPass::run(Module *m) {
registerStandardRules(m);
Rewriter::reset();
OperatorPass::run(m);
}
void FoldingPass::handle(CallInstr *v) { rewrite(v); }
void FoldingPass::registerStandardRules(Module *m) {
// binary, single constant, int->int
using Kind = SingleConstantCommutativeRule<int64_t>::Kind;
registerRule("int-multiply-by-zero",
intSingleRule(m, 0, 0, Module::MUL_MAGIC_NAME, Kind::COMMUTATIVE));
registerRule(
"int-multiply-by-one",
intSingleRule(m, 1, id_val(m), Module::MUL_MAGIC_NAME, Kind::COMMUTATIVE));
registerRule("int-subtract-zero",
intSingleRule(m, 0, id_val(m), Module::SUB_MAGIC_NAME, Kind::RIGHT));
registerRule("int-add-zero", intSingleRule(m, 0, id_val(m), Module::ADD_MAGIC_NAME,
Kind::COMMUTATIVE));
registerRule(
"int-floor-div-by-one",
intSingleRule(m, 1, id_val(m), Module::FLOOR_DIV_MAGIC_NAME, Kind::RIGHT));
registerRule("int-zero-floor-div",
intSingleRule(m, 0, 0, Module::FLOOR_DIV_MAGIC_NAME, Kind::LEFT));
registerRule("int-pos", intNoOp(m, Module::POS_MAGIC_NAME));
registerRule("int-double-neg", intDoubleApplyNoOp(m, Module::NEG_MAGIC_NAME));
registerRule("int-double-inv", intDoubleApplyNoOp(m, Module::INVERT_MAGIC_NAME));
// binary, double constant, int->int
registerRule("int-constant-addition",
intToIntBinary(m, BINOP(+), Module::ADD_MAGIC_NAME));
registerRule("int-constant-subtraction",
intToIntBinary(m, BINOP(-), Module::SUB_MAGIC_NAME));
registerRule("int-constant-floor-div",
intToIntBinaryNoZeroRHS(m, BINOP(/), Module::FLOOR_DIV_MAGIC_NAME));
registerRule("int-constant-mul", intToIntBinary(m, BINOP(*), Module::MUL_MAGIC_NAME));
registerRule("int-constant-lshift",
intToIntBinary(m, BINOP(<<), Module::LSHIFT_MAGIC_NAME));
registerRule("int-constant-rshift",
intToIntBinary(m, BINOP(>>), Module::RSHIFT_MAGIC_NAME));
registerRule("int-constant-pow", intToIntBinary(m, int_pow, Module::POW_MAGIC_NAME));
registerRule("int-constant-xor", intToIntBinary(m, BINOP(^), Module::XOR_MAGIC_NAME));
registerRule("int-constant-or", intToIntBinary(m, BINOP(|), Module::OR_MAGIC_NAME));
registerRule("int-constant-and", intToIntBinary(m, BINOP(&), Module::AND_MAGIC_NAME));
registerRule("int-constant-mod",
intToIntBinaryNoZeroRHS(m, BINOP(%), Module::MOD_MAGIC_NAME));
// binary, double constant, int->bool
registerRule("int-constant-eq", intToBoolBinary(m, BINOP(==), Module::EQ_MAGIC_NAME));
registerRule("int-constant-ne", intToBoolBinary(m, BINOP(!=), Module::NE_MAGIC_NAME));
registerRule("int-constant-gt", intToBoolBinary(m, BINOP(>), Module::GT_MAGIC_NAME));
registerRule("int-constant-ge", intToBoolBinary(m, BINOP(>=), Module::GE_MAGIC_NAME));
registerRule("int-constant-lt", intToBoolBinary(m, BINOP(<), Module::LT_MAGIC_NAME));
registerRule("int-constant-le", intToBoolBinary(m, BINOP(<=), Module::LE_MAGIC_NAME));
// binary, double constant, bool->bool
registerRule("bool-constant-xor",
boolToBoolBinary(m, BINOP(^), Module::XOR_MAGIC_NAME));
registerRule("bool-constant-or",
boolToBoolBinary(m, BINOP(|), Module::OR_MAGIC_NAME));
registerRule("bool-constant-and",
boolToBoolBinary(m, BINOP(&), Module::AND_MAGIC_NAME));
// unary, single constant, int->int
registerRule("int-constant-pos", intToIntUnary(m, UNOP(+), Module::POS_MAGIC_NAME));
registerRule("int-constant-neg", intToIntUnary(m, UNOP(-), Module::NEG_MAGIC_NAME));
registerRule("int-constant-inv",
intToIntUnary(m, UNOP(~), Module::INVERT_MAGIC_NAME));
// unary, singe constant, float->float
registerRule("float-constant-pos",
floatToFloatUnary(m, UNOP(+), Module::POS_MAGIC_NAME));
registerRule("float-constant-neg",
floatToFloatUnary(m, UNOP(-), Module::NEG_MAGIC_NAME));
// unary, single constant, bool->bool
registerRule("bool-constant-inv",
boolToBoolUnary(m, UNOP(!), Module::INVERT_MAGIC_NAME));
// binary, double constant, float->float
registerRule("float-constant-addition",
floatToFloatBinary(m, BINOP(+), Module::ADD_MAGIC_NAME));
registerRule("float-constant-subtraction",
floatToFloatBinary(m, BINOP(-), Module::SUB_MAGIC_NAME));
registerRule("float-constant-floor-div",
floatToFloatBinary(m, BINOP(/), Module::TRUE_DIV_MAGIC_NAME));
registerRule("float-constant-mul",
floatToFloatBinary(m, BINOP(*), Module::MUL_MAGIC_NAME));
registerRule(
"float-constant-pow",
floatToFloatBinary(
m, [](auto a, auto b) { return std::pow(a, b); }, Module::POW_MAGIC_NAME));
// binary, double constant, float->bool
registerRule("float-constant-eq",
floatToBoolBinary(m, BINOP(==), Module::EQ_MAGIC_NAME));
registerRule("float-constant-ne",
floatToBoolBinary(m, BINOP(!=), Module::NE_MAGIC_NAME));
registerRule("float-constant-gt",
floatToBoolBinary(m, BINOP(>), Module::GT_MAGIC_NAME));
registerRule("float-constant-ge",
floatToBoolBinary(m, BINOP(>=), Module::GE_MAGIC_NAME));
registerRule("float-constant-lt",
floatToBoolBinary(m, BINOP(<), Module::LT_MAGIC_NAME));
registerRule("float-constant-le",
floatToBoolBinary(m, BINOP(<=), Module::LE_MAGIC_NAME));
// binary, double constant, int,float->float
registerRule("int-float-constant-addition",
intFloatToFloatBinary(m, BINOP(+), Module::ADD_MAGIC_NAME));
registerRule("int-float-constant-subtraction",
intFloatToFloatBinary(m, BINOP(-), Module::SUB_MAGIC_NAME));
registerRule("int-float-constant-floor-div",
intFloatToFloatBinary(m, BINOP(/), Module::TRUE_DIV_MAGIC_NAME));
registerRule("int-float-constant-mul",
intFloatToFloatBinary(m, BINOP(*), Module::MUL_MAGIC_NAME));
// binary, double constant, int,float->bool
registerRule("int-float-constant-eq",
intFloatToBoolBinary(m, BINOP(==), Module::EQ_MAGIC_NAME));
registerRule("int-float-constant-ne",
intFloatToBoolBinary(m, BINOP(!=), Module::NE_MAGIC_NAME));
registerRule("int-float-constant-gt",
intFloatToBoolBinary(m, BINOP(>), Module::GT_MAGIC_NAME));
registerRule("int-float-constant-ge",
intFloatToBoolBinary(m, BINOP(>=), Module::GE_MAGIC_NAME));
registerRule("int-float-constant-lt",
intFloatToBoolBinary(m, BINOP(<), Module::LT_MAGIC_NAME));
registerRule("int-float-constant-le",
intFloatToBoolBinary(m, BINOP(<=), Module::LE_MAGIC_NAME));
// type conversions, identity
registerRule("int-constant-int",
identityConvert(m, Module::INT_MAGIC_NAME, m->getIntType()));
registerRule("float-constant-float",
identityConvert(m, Module::FLOAT_MAGIC_NAME, m->getFloatType()));
registerRule("bool-constant-bool",
identityConvert(m, Module::BOOL_MAGIC_NAME, m->getBoolType()));
// type conversions, distinct
registerRule("float-constant-int",
typeConvert<double, int64_t>(m, Module::INT_MAGIC_NAME,
m->getFloatType(), m->getIntType()));
registerRule("bool-constant-int",
typeConvert<bool, int64_t>(m, Module::INT_MAGIC_NAME, m->getBoolType(),
m->getIntType()));
registerRule("int-constant-float",
typeConvert<int64_t, double>(m, Module::FLOAT_MAGIC_NAME,
m->getIntType(), m->getFloatType()));
registerRule("bool-constant-float",
typeConvert<bool, double>(m, Module::FLOAT_MAGIC_NAME, m->getBoolType(),
m->getFloatType()));
registerRule("int-constant-bool",
typeConvert<int64_t, bool>(m, Module::BOOL_MAGIC_NAME, m->getIntType(),
m->getBoolType()));
registerRule("float-constant-bool",
typeConvert<double, bool>(m, Module::BOOL_MAGIC_NAME, m->getFloatType(),
m->getBoolType()));
}
} // namespace folding
} // namespace transform
} // namespace ir
} // namespace seq
#undef BINOP
#undef UNOP
| 6,718 |
450 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <string>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "records/ApplicationSubmissionContext.h"
using std::string;
using namespace libyarn;
using namespace testing;
class TestApplicationSubmissionContext: public ::testing::Test {
protected:
ApplicationSubmissionContext applicationContext;
};
TEST_F(TestApplicationSubmissionContext, TestGetApplicationId)
{
ApplicationId id;
ApplicationId retId;
applicationContext.setApplicationId(id);
retId = applicationContext.getApplicationId();
/* The operator '==' is not defined for class ApplicationId,
so we can't compare id and retId here. */
SUCCEED();
}
TEST_F(TestApplicationSubmissionContext, TestGetApplicationName)
{
string name("name");
string retName;
applicationContext.setApplicationName(name);
retName = applicationContext.getApplicationName();
EXPECT_EQ(name, retName);
}
TEST_F(TestApplicationSubmissionContext, TestGetQueue)
{
string queue("queue");
string retQueue;
applicationContext.setQueue(queue);
retQueue = applicationContext.getQueue();
EXPECT_EQ(queue, retQueue);
}
TEST_F(TestApplicationSubmissionContext, TestGetPriority)
{
Priority priority;
Priority retPriority;
applicationContext.setPriority(priority);
retPriority = applicationContext.getPriority();
/* The operator '==' is not defined for class Priority,
so we can't compare priority and retPriority here. */
SUCCEED();
}
TEST_F(TestApplicationSubmissionContext, TestGetAMContainerSpec)
{
ContainerLaunchContext context;
ContainerLaunchContext retContext;
applicationContext.setAMContainerSpec(context);
retContext = applicationContext.getAMContainerSpec();
/* The operator '==' is not defined for class ContainerLaunchContext,
so we can't compare context and retContext here. */
SUCCEED();
}
TEST_F(TestApplicationSubmissionContext, TestGetCancelTokensWhenComplete)
{
bool flag = true;
bool retFlag;
applicationContext.setCancelTokensWhenComplete(flag);
retFlag = applicationContext.getCancelTokensWhenComplete();
EXPECT_EQ(flag, retFlag);
}
TEST_F(TestApplicationSubmissionContext, TestGetUnmanagedAM)
{
bool flag = true;
bool retFlag;
applicationContext.setUnmanagedAM(flag);
retFlag = applicationContext.getUnmanagedAM();
EXPECT_EQ(flag, retFlag);
}
TEST_F(TestApplicationSubmissionContext, TestGetMaxAppAttempts)
{
int32_t max = -1;
int32_t retMax;
applicationContext.setMaxAppAttempts(max);
retMax = applicationContext.getMaxAppAttempts();
EXPECT_EQ(max, retMax);
}
TEST_F(TestApplicationSubmissionContext, TestGetResource)
{
Resource resource;
Resource retResource;
applicationContext.setResource(resource);
retResource = applicationContext.getResource();
/* The operator '==' is not defined for class Resource,
so we can't compare resource and retResource here. */
SUCCEED();
}
TEST_F(TestApplicationSubmissionContext, TestGetApplicationType)
{
string type("type");
string retType;
applicationContext.setApplicationType(type);
retType = applicationContext.getApplicationType();
EXPECT_EQ(type, retType);
}
| 1,122 |
477 | <reponame>flying-sheep/goatools<gh_stars>100-1000
"""Test the loading of the optional GO term fields."""
# https://owlcollab.github.io/oboformat/doc/GO.format.obo-1_4.html
__copyright__ = "Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved."
__author__ = "<NAME>"
import os
import sys
import re
import timeit
import collections as cx
from goatools.godag.prttime import GoDagTimed
from goatools.godag.prttime import prt_hms
class OptionalAttrs(object):
"""Holds data for GO relationship test."""
repo = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")
cmpfld = re.compile(r'^(\S+)\s*:\s*(\S.*\S)\s*$') # Field line pattern
exp_scopes = set(['EXACT', 'BROAD', 'NARROW', 'RELATED'])
exp_xrefpat = re.compile(r'^\S+:\S+$')
# Required attributes are always loaded
exp_req = set(['name', 'item_id', 'is_obsolete', 'namespace', 'alt_id', 'is_a', 'is_obsolete'])
# Generated attributes
exp_gen = set(['level', 'depth', 'parents', 'children', '_parents'])
exp_relationships = set(['part_of',
'regulates', 'negatively_regulates', 'positively_regulates'])
attrs_scalar = set(['item_id', 'namespace', 'name', 'def', 'comment'])
attrs_set = set(['xref', 'subset', 'alt_id'])
def __init__(self, fin_obo, opt_field=None, keep_alt_ids=False):
self.opt = opt_field # None causes all fields to read to exp dict
self.obo = os.path.join(self.repo, fin_obo)
self.go2obj = GoDagTimed(self.obo, opt_field, keep_alt_ids).go2obj
self.dcts = self._init_go2dct() # go2dct typdefdct flds
self.go2dct = {go:d for go, d in self.dcts['go2dct'].items() if go in self.go2obj}
self.num_tot = len(self.go2obj)
self._chk_required()
self._chk_parents()
self._set_exp_children()
self._chk_children()
def chk_get_goterms_upper(self):
"""Check that GOTerm's 'get_upper' returns parents and relationships."""
tic = timeit.default_timer()
for goterm in self.go2obj.values():
goids_act = set(o.item_id for o in goterm.get_goterms_upper())
goids_exp = self._get_goterms_upper(goterm.item_id)
assert goids_act == goids_exp
prt_hms(tic, "get_goterms_upper")
def chk_get_goterms_lower(self):
"""Check that GOTerm's 'get_lower' returns parents and relationships."""
tic = timeit.default_timer()
for goterm in self.go2obj.values():
goids_act = set(o.item_id for o in goterm.get_goterms_lower())
goids_exp = self._get_goterms_lower(goterm.item_id)
assert goids_act == goids_exp, "{GO} EXP({E}) ACT({A})".format(
GO=goterm.item_id, E=goids_exp, A=goids_act)
prt_hms(tic, "get_goterms_lower")
def _get_goterms_upper(self, goid):
"""Get expected GO IDs returned by GOTerm's 'get_goterms_upper'."""
goids_exp = set()
dct = self.go2dct[goid]
if 'is_a' in dct:
goids_exp.update(dct['is_a'])
if 'relationship' in dct:
for rel_go in dct['relationship']:
goids_exp.add(rel_go.split()[1])
return goids_exp
def _get_goterms_lower(self, goid):
"""Get expected GO IDs returned by GOTerm's 'get_goterms_lower'."""
goids_exp = set()
dct = self.go2dct[goid]
if 'is_a_rev' in dct:
goids_exp.update(dct['is_a_rev'])
if 'relationship_rev' in dct:
for rel_gos in dct['relationship_rev'].values():
goids_exp.update(rel_gos)
return goids_exp
def chk_relationships_rev(self, reltype='part_of', prt=None):
"""Check reciprocal relationships. Print all GO pairs in one type of relationship."""
spc = " "*len(reltype)
rec2revs = cx.defaultdict(set)
for rec in sorted(self.go2obj.values(), key=lambda o: o.namespace):
reldct = rec.relationship
if reltype in reldct:
if prt is not None:
prt.write("{SPC} {GO}\n".format(SPC=spc, GO=str(rec)))
for related_to in reldct[reltype]:
rec2revs[related_to].add(rec)
if prt is not None:
prt.write("{RELTYPE} {GO}\n".format(RELTYPE=reltype, GO=str(related_to)))
if prt is not None:
prt.write("\n")
for rec, exp_revs in sorted(rec2revs.items(), key=lambda t: t[0].namespace):
if prt is not None:
prt.write(" {SPC} {GO}\n".format(SPC=spc, GO=str(rec)))
assert rec.relationship_rev[reltype] == exp_revs
for related_from in rec.relationship_rev[reltype]:
if prt is not None:
prt.write("rev {RELTYPE} {GO}\n".format(RELTYPE=reltype, GO=str(related_from)))
if prt is not None:
prt.write("\n")
def chk_str(self, attr):
"""Check that expected scalar value matches actual string value."""
for goid, rec in self.go2obj.items():
# A string data member must always be present, even if the value is ""
act_str = getattr(rec, attr)
exp_dct = self.go2dct[goid]
# Expected string equals actual string?
if attr in exp_dct:
exp_str = next(iter(exp_dct[attr]))
assert exp_str == act_str, "{} EXP({}) ACT({})".format(
goid, exp_str, act_str)
# If there is no expected string, is actual string ""?
else:
assert act_str == ""
def prt_summary(self, prt=sys.stdout):
"""Print percentage of GO IDs that have a specific relationship."""
sep = "\n-----------------------------------------------------------\n"
flds_seen = self.dcts['flds']
fld_cnts_go = self._get_cnts_gte1(self.go2dct.values())
prt.write("{SEP}GO TERM REQUIRED FIELDS:\n".format(SEP=sep))
self._prt_summary(prt, fld_cnts_go, self.exp_req, self.go2dct.values())
flds_seen = flds_seen.difference(self.exp_req)
prt.write("{SEP}GO TERM OPTIONAL FIELDS:\n".format(SEP=sep))
self._prt_summary(prt, fld_cnts_go, flds_seen, self.go2dct.values())
flds_seen = flds_seen.difference(fld_cnts_go.keys())
prt.write("{SEP}Typedef FIELDS:\n".format(SEP=sep))
fld_cnts_typedef = self._get_cnts_gte1(self.dcts['typedefdct'].values())
self._prt_summary(prt, fld_cnts_typedef, flds_seen, self.dcts['typedefdct'])
flds_seen = flds_seen.difference(fld_cnts_typedef.keys())
assert flds_seen == set(['consider', 'replaced_by']), "UNEXPECTED FIELDS({})".format(
flds_seen)
def _prt_summary(self, prt, fld_cnts, prt_flds, dcts):
prt.write("\n These fields appear at least once\n")
# Ex: 28,951 of 44,948 (64%) GO IDs has field(synonym)
for relname, cnt in fld_cnts.most_common():
if prt_flds is None or relname in prt_flds:
self._prt_perc(cnt, relname, len(dcts), prt)
prt.write("\n Maximum number of fields:\n")
for fld, maxqty in sorted(self._get_cnts_max(dcts).items(), key=lambda t: t[1]):
if prt_flds is None or fld in prt_flds:
prt.write(" {MAX:3} {MRK} {FLD}\n".format(
MAX=maxqty, MRK=self._get_fldmrk(fld), FLD=fld))
def _chk_parents(self):
"""Check parents."""
for goobj in self.go2obj.values():
exp_dct = self.go2dct[goobj.item_id]
if 'is_a' in exp_dct:
# pylint: disable=protected-access
exp_parents = exp_dct['is_a']
act_parents = goobj._parents
assert exp_parents == act_parents
else:
assert not goobj.parents
def _chk_children(self):
"""Check children."""
for goobj in self.go2obj.values():
exp_dct = self.go2dct[goobj.item_id]
if '_children' in exp_dct:
exp_children = exp_dct['_children']
act_children = set(o.item_id for o in goobj.children)
assert exp_children == act_children
else:
assert not goobj.children
def _set_exp_children(self):
"""Fill expected child GO IDs."""
# Initialize empty sets for child GO IDs
for exp_dct in self.go2dct.values():
exp_dct['_children'] = set()
# Loop thru all GO IDs
for goid_child, exp_dct in self.go2dct.items():
if 'is_a' in exp_dct:
# Add current GO ID to all of it's parents' set of children
for goid_parent in exp_dct['is_a']:
self.go2dct[goid_parent]['_children'].add(goid_child)
def _chk_required(self):
"""Check the required attributes."""
for goid, goobj in self.go2obj.items():
godct = self.go2dct[goid]
assert goobj.item_id == godct['GO']
assert goobj.namespace == next(iter(godct['namespace'])), godct
assert goobj.name == next(iter(godct['name']))
self._chk_is_obsolete(goobj, godct)
self._chk_alt_ids(goobj, godct)
@staticmethod
def _chk_alt_ids(goobj, godct):
"""Check 'alt_ids' required attribute."""
if 'alt_id' in godct:
assert godct['alt_id'] == goobj.alt_ids
else:
assert not goobj.alt_ids
@staticmethod
def _chk_is_obsolete(goobj, godct):
"""Check 'is_obsolete' required attribute."""
act_obso = getattr(goobj, 'is_obsolete', None)
if act_obso:
assert 'is_obsolete' in godct, "EXP({})\nACT({})".format(
godct, getattr(goobj, 'is_obsolete', None))
else:
assert 'is_obsolete' not in godct, "EXP({})\nACT({})".format(
godct, getattr(goobj, 'is_obsolete', None))
def chk_no_optattrs(self):
"""Check that only the optional attributes requested are the attributes implemented."""
# name is_obsolete namespace item_id alt_ids
# level namespace depth parents children _parents
exp_flds = self.exp_req.union(self.exp_gen)
obj1_exp0 = set(['id', 'alt_ids'])
for goobj in self.go2obj.values():
attrs = set(vars(goobj).keys()).difference(exp_flds)
assert attrs == obj1_exp0, attrs
def chk_xref(self, prt=None):
"""Check xrefs."""
# Get GO IDs which are expected to have xrefs
goids = set(go for go, d in self.go2dct.items() if 'xref' in d)
for goid in goids:
goobj = self.go2obj[goid]
xrefs = getattr(goobj, 'xref', None)
assert xrefs is not None, "{GO} MISSING XREF".format(GO=goid)
# Iterate through list of xref data stored in named tuples
for dbxref in xrefs:
if prt is not None:
prt.write("{GO} {DBXREF}\n".format(GO=goid, DBXREF=dbxref))
assert self.exp_xrefpat.match(dbxref), "INVALID XREF FORMAT({X})".format(
X=dbxref)
def chk_synonyms(self, prt=None):
"""Check synonyms
Example synonym and its storage in a namedtuple:
synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]
text: "The other white meat"
scope: EXACT
typename: MARKETING_SLOGAN
dbxrefs: set(["MEAT:00324", "BACONBASE:03021"])
"""
# Get GO IDs which are expected to have synonyms
badnts = []
for goid, dct_exp in self.go2dct.items():
goobj = self.go2obj[goid]
if 'synonym' in dct_exp:
ntsyns = getattr(goobj, 'synonym', None)
assert ntsyns is not None, "{GO} MISSING SYNONYM".format(GO=goid)
# Iterate through list of synonym data stored in named tuples
for ntsyn in ntsyns:
if prt is not None:
prt.write("{GO} {NT}\n".format(GO=goid, NT=ntsyn))
# Example:
assert ntsyn.text, "SYNONYM CANNOT BE EMPTY"
assert ntsyn.scope in self.exp_scopes, "INVALID SYNONYM SCOPE"
for dbxref in ntsyn.dbxrefs:
if not self.exp_xrefpat.match(dbxref):
badnts.append((goid, ntsyn))
print("**WARNING: INVALID FORMAT: DBXREF({D}) ON {GO}".format(
D=dbxref, GO=goid))
else:
assert goobj.synonym == []
return badnts
def _get_fldmrk(self, fld):
"""Get a mark for each field indicating if it is required or optional"""
#pylint: disable=too-many-return-statements
if fld in self.exp_req:
return 'REQ'
if fld == 'def':
return 'str'
if fld in self.attrs_scalar:
return 'str'
if fld in self.attrs_set:
return 'set'
if fld == 'relationship':
return 'rel'
if fld == 'synonym':
return 'syn'
if fld == 'xref':
return 'xrf'
raise RuntimeError("UNEXPECTED FIELD({})".format(fld))
@staticmethod
def _prt_perc(num_rel, name, num_tot, prt=sys.stdout):
"""Print percentage of GO IDs that have a specific relationship."""
prt.write(" {N:6,} of {M:,} ({P:3.0f}%) GO IDs has field({A})\n".format(
N=num_rel, M=num_tot, P=float(num_rel)/num_tot*100, A=name))
def _get_cnts_max(self, dcts):
"""Get the maximum count of times a specific relationship was seen on a GO."""
fld2qtys = cx.defaultdict(set)
flds = self.dcts['flds']
for recdct in dcts:
for opt in flds:
if opt in recdct:
fld2qtys[opt].add(len(recdct[opt]))
return {f:max(qtys) for f, qtys in fld2qtys.items()}
def _get_cnts_gte1(self, record_dicts):
"""Get counts of if a specific relationship was seen on a GO."""
ctr = cx.Counter()
flds = self.dcts['flds']
for recdct in record_dicts:
for opt in flds:
if opt in recdct:
ctr[opt] += 1
return ctr
def chk_set(self, opt):
"""Check that actual set contents match expected set contents."""
errpat = "SET EXP({EXP}) ACT({ACT}) {GO}\n{DESC}:\nEXP:\n{Es}\n\nACT:\n{As}"
for goid, dct in self.go2dct.items():
act_set = getattr(self.go2obj[goid], opt, None)
if opt in dct:
exp_set = dct[opt]
assert exp_set == act_set, errpat.format(
EXP=len(exp_set), ACT=len(act_set), GO=goid,
DESC=str(self.go2obj[goid].name),
Es="\n".join(sorted(exp_set)),
As="\n".join(sorted(act_set)))
else:
assert act_set == set(), "EXPECTED EMPTY SET FOR {O}: ACT({A})\n".format(
O=opt, A=act_set)
def chk_relationships(self):
"""Expected relationship GO IDs should match actual relationship GO IDs."""
for goid, dct in self.go2dct.items():
act_rel2recs = getattr(self.go2obj[goid], 'relationship', None)
if 'relationship' in dct:
rel2gos = self._mk_exp_relatinship_sets(dct['relationship'])
# Check if expected relationships and actual relationships are the same
assert set(act_rel2recs.keys()) == set(rel2gos.keys()), "EXP({}) != ACT({})".format(
set(act_rel2recs.keys()), set(rel2gos.keys()))
for rel, exp_goids in rel2gos.items():
# Expected relationships store GO IDs.
# Actual relationships store GO Terms.
act_goids = set(o.item_id for o in act_rel2recs[rel])
assert exp_goids == act_goids, "EXP({}) ACT({}) {}:\nEXP({})\nACT({})".format(
len(exp_goids), len(act_goids), goid, exp_goids, act_goids)
else:
assert act_rel2recs == {}, act_rel2recs
def _mk_exp_relatinship_sets(self, relationship_str_set):
"""Transform a set of relationship strings into a dict of sets containing GO IDs."""
rel2gos = cx.defaultdict(set)
for rel_str in relationship_str_set:
rel, goid = rel_str.split()
assert rel in self.exp_relationships
assert goid[:3] == "GO:" and goid[3:].isdigit()
rel2gos[rel].add(goid)
return rel2gos
@staticmethod
def add_is_a_rev(go2dct):
"""If there 'is_a' exists, add 'is_a_rev'."""
for go_src, dct in go2dct.items():
if 'is_a' in dct:
for go_parent in dct['is_a']:
if 'is_a_rev' not in go2dct[go_parent]:
go2dct[go_parent]['is_a_rev'] = set()
go2dct[go_parent]['is_a_rev'].add(go_src)
@staticmethod
def add_relationship_rev(go2dct):
"""If there is a relationship, add 'relationship_rev'."""
for go_src, dct in go2dct.items():
if 'relationship' in dct:
for rel in dct['relationship']:
reltype, go_dst = rel.split()
# print("RRRRRRRRR", go_src, reltype, go_dst)
if 'relationship_rev' not in go2dct[go_dst]:
go2dct[go_dst]['relationship_rev'] = {}
if reltype not in go2dct[go_dst]['relationship_rev']:
go2dct[go_dst]['relationship_rev'][reltype] = set()
go2dct[go_dst]['relationship_rev'][reltype].add(go_src)
# pylint: disable=too-many-branches
def _init_go2dct(self):
"""Create EXPECTED RESULTS stored in a dict of GO fields."""
go2dct = {}
# pylint: disable=unsubscriptable-object
typedefdct = {}
flds = set()
with open(self.obo) as ifstrm:
rec = {}
rec_typedef = None
for line in ifstrm:
line = line.rstrip()
# End of GO record
if not line:
if rec: # and option is None or option in rec:
# 'Definition' is specified in obo as 'def' and in Python by 'defn'
if 'def' in rec:
rec['defn'] = rec['def']
go2dct[rec['GO']] = rec
rec = {}
if rec_typedef is not None:
# Example rec_typedef:
# {'xref': 'RO:0002212',
# 'name': 'negatively regulates',
# 'namespace': 'external',
# 'transitive_over': 'part_of',
# 'is_a': 'regulates',
# 'id': 'negatively_regulates'}
typedefdct[rec_typedef['item_id']] = rec_typedef
rec_typedef = None
elif line[:9] == "[Typedef]":
rec_typedef = {}
else:
mtch = self.cmpfld.match(line)
if mtch:
fld = mtch.group(1)
val = mtch.group(2)
# Beginning of GO record
if fld == "id":
assert not rec, "NOW({}) WAS({})".format(line, rec)
rec = {'GO':val, 'item_id':val}
flds.add('item_id')
# Middle of GO record
elif rec:
flds.add(fld)
if fld not in rec:
rec[fld] = set()
# Strip comment if it exists
loc = val.find(' ! ')
if loc != -1:
val = val[:loc]
# Add value
rec[fld].add(val)
if rec_typedef is not None:
if fld == 'id':
fld = 'item_id'
rec_typedef[fld] = val
for dct in go2dct.values():
if 'def' in dct:
dct['defn'] = dct['def']
self.add_relationship_rev(go2dct)
self.add_is_a_rev(go2dct)
return {'go2dct':go2dct, 'typedefdct':typedefdct, 'flds':flds}
# Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.
| 11,107 |
451 | #include <cmath>
#include "ENUCoords.h"
double ENUCoords::distanceTo(ENUCoords enu){
double de = (this->E - enu.E);
double dn = (this->N - enu.N);
double du = (this->U - enu.U);
return sqrt(de*de + dn*dn + du*du);
}
// ---------------------------------------------------------------
// ENU Cartesian to ECEF coordinates
// ---------------------------------------------------------------
ECEFCoords ENUCoords::toECEFCoords(ECEFCoords base){
ECEFCoords xyz;
double slon, clon, slat, clat, blon, blat;
double e, u, n;
e = this->E;
n = this->N;
u = this->U;
GeoCoords base_geo = base.toGeoCoords();
blon = Utils::deg2rad(base_geo.longitude);
blat = Utils::deg2rad(base_geo.latitude);
slon = sin(blon);
slat = sin(blat);
clon = cos(blon);
clat = cos(blat);
xyz.X = -e*slon - n*clon*slat + u*clon*clat + base.X;
xyz.Y = e*clon - n*slon*slat + u*slon*clat + base.Y;
xyz.Z = n*clat + u*slat + base.Z;
return xyz;
}
double ENUCoords::elevationTo(ENUCoords p){
ENUCoords visee(p.E-this->E, p.N-this->N, p.U-this->U);
return atan2(visee.U, visee.norm2D());
}
double ENUCoords::azimuthTo(ENUCoords p){
ENUCoords visee(p.E-this->E, p.N-this->N, p.U-this->U);
return atan2(visee.E, visee.N);
}
double ENUCoords::dot(ENUCoords p){
return this->E*p.E + this->N*p.N + this->U*p.U;
}
double ENUCoords::norm2D(){
return sqrt(this->E*this->E + this->N*this->N);
}
double ENUCoords::norm3D(){
return sqrt(this->dot(*this));
}
| 808 |
444 | {
"name": "iex",
"version": "1.0.0",
"dependencies": {
"socket.io": "1.7.3"
},
"description": "Data Downloader using the public IEX data feed as a data source",
"devDependencies": {
"babel-cli": "*",
"babel-core": "*",
"babel-eslint": "*",
"babel-plugin-transform-flow-strip-types": "*",
"babel-preset-es2015-node5": "*",
"babel-preset-stage-3": "*",
"babel-register": "*",
"eslint": ">=2.0.0",
"eslint-plugin-flowtype": "*",
"flow-bin": "*"
},
"main": "iex.js",
"scripts": {
"flow": "flow; test $? -eq 0 -o $? -eq 2",
"run": "babel-node iex.js",
"transpile": "rm ../../dist/iex_dd -rf && babel --out-dir ../../dist/iex_dd iex.js && babel --out-dir ../../dist/iex_dd/src ./src && cp ./node_modules ../../dist/iex_dd -r",
"test": "echo \"Error: no test specified\" && exit 1",
"start": "sh run.sh"
},
"author": "<NAME> <<EMAIL>>",
"license": "MIT"
}
| 440 |
365 | <reponame>rbabari/blender<filename>extern/audaspace/src/fx/ConvolverSound.cpp
/*******************************************************************************
* Copyright 2015-2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
#include "fx/ConvolverSound.h"
#include "fx/ConvolverReader.h"
#include "Exception.h"
#include <cstring>
AUD_NAMESPACE_BEGIN
ConvolverSound::ConvolverSound(std::shared_ptr<ISound> sound, std::shared_ptr<ImpulseResponse> impulseResponse, std::shared_ptr<ThreadPool> threadPool) :
ConvolverSound(sound, impulseResponse, threadPool, std::make_shared<FFTPlan>(0.0))
{
}
ConvolverSound::ConvolverSound(std::shared_ptr<ISound> sound, std::shared_ptr<ImpulseResponse> impulseResponse, std::shared_ptr<ThreadPool> threadPool, std::shared_ptr<FFTPlan> plan) :
m_sound(sound), m_impulseResponse(impulseResponse), m_threadPool(threadPool), m_plan(plan)
{
}
std::shared_ptr<IReader> ConvolverSound::createReader()
{
return std::make_shared<ConvolverReader>(m_sound->createReader(), m_impulseResponse, m_threadPool, m_plan);
}
std::shared_ptr<ImpulseResponse> ConvolverSound::getImpulseResponse()
{
return m_impulseResponse;
}
void ConvolverSound::setImpulseResponse(std::shared_ptr<ImpulseResponse> impulseResponse)
{
m_impulseResponse = impulseResponse;
}
AUD_NAMESPACE_END
| 572 |
2,206 | <reponame>steljord2/deeplearning4j
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
package org.nd4j.linalg.indexing.conditions;
import org.nd4j.linalg.factory.Nd4j;
public class Conditions {
private Conditions() {}
public enum ConditionMode {
EPSILON_EQUALS(0),
EPSILON_NOT_EQUALS(1),
LESS_THAN(2),
GREATER_THAN(3),
LESS_THAN_OR_EQUAL(4),
GREATER_THAN_OR_EQUAL(5),
ABS_LESS_THAN(6),
ABS_GREATER_THAN(7),
IS_INFINITE(8),
IS_NAN(9),
ABS_EQUALS(10),
NOT_EQUALS(11),
ABS_GREATER_OR_EQUAL(12),
ABS_LESS_THAN_OR_EQUAL(13),
IS_FINITE(14),
NOT_FINITE(15),
AGGREGATE(-1); // this is an aggregate enum for or, and, not, and other indirect conditions that depend on others
public final int index;
ConditionMode(int index) {
this.index = index;
}
public static ConditionMode fromNumber(int index) {
switch(index) {
case 0: return EPSILON_EQUALS;
case 1: return EPSILON_NOT_EQUALS;
case 2: return LESS_THAN;
case 3: return GREATER_THAN;
case 4: return LESS_THAN_OR_EQUAL;
case 5: return GREATER_THAN_OR_EQUAL;
case 6: return ABS_LESS_THAN;
case 7: return ABS_GREATER_THAN;
case 8: return IS_INFINITE;
case 9: return IS_NAN;
case 10: return ABS_EQUALS;
case 11: return NOT_EQUALS;
case 12: return ABS_GREATER_OR_EQUAL;
case 13: return ABS_LESS_THAN_OR_EQUAL;
case 14: return IS_FINITE;
case 15: return NOT_FINITE;
case -1: return AGGREGATE;
default:throw new IllegalArgumentException("No condition number found for " + index);
}
}
}
/**
* This method will create Condition that checks if value is infinite
* @return
*/
public static Condition isInfinite() {
return new IsInfinite();
}
/**
* This method will create Condition that checks if value is NaN
* @return
*/
public static Condition isNan() {
return new IsNaN();
}
/**
* This method will create Condition that checks if value is finite
* @return
*/
public static Condition isFinite() {
return new IsFinite();
}
/**
* This method will create Condition that checks if value is NOT finite
* @return
*/
public static Condition notFinite() {
return new NotFinite();
}
/**
* This method will create Condition that checks if value is two values are not equal wrt eps
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition epsNotEquals() {
// in case of pairwise MatchCondition we don't really care about number here
return epsNotEquals(0.0);
}
/**
* This method will create Condition that checks if value is two values are not equal wrt eps
*
* @return
*/
public static Condition epsNotEquals(Number value) {
return new EpsilonNotEquals(value);
}
/**
* This method will create Condition that checks if value is two values are equal wrt eps
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition epsEquals() {
// in case of pairwise MatchCondition we don't really care about number here
return epsEquals(0.0);
}
/**
* This method will create Condition that checks if value is two values are equal wrt eps
*
* @return
*/
public static Condition epsEquals(Number value) {
return epsEquals(value, Nd4j.EPS_THRESHOLD);
}
/**
* This method will create Condition that checks if value is two values are equal wrt eps
*
* @return
*/
public static Condition epsEquals(Number value, Number epsilon) {
return new EpsilonEquals(value, epsilon.doubleValue());
}
/**
* This method will create Condition that checks if value is two values are equal
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition equals() {
// in case of pairwise MatchCondition we don't really care about number here
return equals(0.0);
}
/**
* This method will create Condition that checks if value is two values are equal
*
* @return
*/
public static Condition equals(Number value) {
return new EqualsCondition(value);
}
/**
* This method will create Condition that checks if value is two values are not equal
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition notEquals() {
// in case of pairwise MatchCondition we don't really care about number here
return notEquals(0.0);
}
/**
* This method will create Condition that checks if value is two values are not equal
*
* @return
*/
public static Condition notEquals(Number value) {
return new NotEqualsCondition(value);
}
/**
* This method will create Condition that checks if value is value X is greater than value Y
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition greaterThan() {
// in case of pairwise MatchCondition we don't really care about number here
return greaterThan(0.0);
}
/**
* This method will create Condition that checks if value is value X is greater than value Y
*
* @return
*/
public static Condition greaterThan(Number value) {
return new GreaterThan(value);
}
/**
* This method will create Condition that checks if value is value X is less than value Y
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition lessThan() {
// in case of pairwise MatchCondition we don't really care about number here
return lessThan(0.0);
}
/**
* This method will create Condition that checks if value is value X is less than value Y
*
* @return
*/
public static Condition lessThan(Number value) {
return new LessThan(value);
}
/**
* This method will create Condition that checks if value is value X is less than or equal to value Y
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition lessThanOrEqual() {
// in case of pairwise MatchCondition we don't really care about number here
return lessThanOrEqual(0.0);
}
/**
* This method will create Condition that checks if value is value X is less than or equal to value Y
*
* @return
*/
public static Condition lessThanOrEqual(Number value) {
return new LessThanOrEqual(value);
}
/**
* This method will create Condition that checks if value is value X is greater than or equal to value Y
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition greaterThanOrEqual() {
// in case of pairwise MatchCondition we don't really care about number here
return greaterThanOrEqual(0.0);
}
/**
* This method will create Condition that checks if value is value X is greater than or equal to value Y
*
* @return
*/
public static Condition greaterThanOrEqual(Number value) {
return new GreaterThanOrEqual(value);
}
/**
* This method will create Condition that checks if value is value X is greater than or equal to value Y in absolute values
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absGreaterThanOrEqual() {
// in case of pairwise MatchCondition we don't really care about number here
return absGreaterThanOrEqual(0.0);
}
/**
* This method will create Condition that checks if value is value X is greater than or equal to value Y in absolute values
*
* @return
*/
public static Condition absGreaterThanOrEqual(Number value) {
return new AbsValueGreaterOrEqualsThan(value);
}
/**
* This method will create Condition that checks if value is value X is less than or equal to value Y in absolute values
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absLessThanOrEqual() {
// in case of pairwise MatchCondition we don't really care about number here
return absLessThanOrEqual(0.0);
}
/**
* This method will create Condition that checks if value is value X is less than or equal to value Y in absolute values
*
* @return
*/
public static Condition absLessThanOrEqual(Number value) {
return new AbsValueLessOrEqualsThan(value);
}
/**
* This method will create Condition that checks if value is value X is greater than value Y in absolute values
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absGreaterThan() {
// in case of pairwise MatchCondition we don't really care about number here
return absGreaterThan(0.0);
}
/**
* This method will create Condition that checks if value is value X is greater than value Y in absolute values
*
* @return
*/
public static Condition absGreaterThan(Number value) {
return new AbsValueGreaterThan(value);
}
/**
* This method will create Condition that checks if value is value X is less than value Y in absolute values
*
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absLessThan() {
// in case of pairwise MatchCondition we don't really care about number here
return absLessThan(0.0);
}
/**
* This method will create Condition that checks if the absolute value of
* x is equal to 0.0
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absEquals() {
// in case of pairwise MatchCondition we don't really care about number here
return absEquals(0.0);
}
/**
This method will create Condition that checks if the absolute value of
* x is equal to the specified value.
* PLEASE NOTE: This condition should be used only with pairwise methods, i.e. INDArray.match(...)
* @return
*/
public static Condition absEquals(double value) {
// in case of pairwise MatchCondition we don't really care about number here
return new AbsoluteEquals(value);
}
/**
* This method will create Condition that checks if value is value X is less than value Y in absolute values
*
* @return
*/
public static Condition absLessThan(Number value) {
return new AbsValueLessThan(value);
}
public static Condition fromInt(int mode) {
return fromInt(mode,0.0);
}
public static Condition fromInt(int mode,Double value) {
if(value == null) value = 0.0;
switch(ConditionMode.fromNumber(mode)) {
case IS_FINITE:
return Conditions.isFinite();
case IS_NAN:
return Conditions.isInfinite();
case LESS_THAN:
return Conditions.lessThan(value);
case ABS_EQUALS:
return Conditions.absEquals(value);
case NOT_EQUALS:
return Conditions.notEquals();
case NOT_FINITE:
return Conditions.notFinite();
case IS_INFINITE:
return Conditions.isInfinite();
case GREATER_THAN:
return Conditions.greaterThan(value);
case ABS_LESS_THAN:
return Conditions.absLessThan(value);
case EPSILON_EQUALS:
return Conditions.epsEquals(value);
case ABS_GREATER_THAN:
return Conditions.absGreaterThan(value);
case EPSILON_NOT_EQUALS:
return Conditions.epsNotEquals(value);
case LESS_THAN_OR_EQUAL:
return Conditions.lessThanOrEqual(value);
case ABS_GREATER_OR_EQUAL:
return Conditions.absGreaterThan(value);
case GREATER_THAN_OR_EQUAL:
return Conditions.greaterThanOrEqual(value);
case ABS_LESS_THAN_OR_EQUAL:
return Conditions.absLessThanOrEqual(value);
default:
throw new IllegalArgumentException("Illegal value specified " + mode);
}
}
}
| 5,570 |
9,095 | from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.system_info import get_info
from scipy._build_utils import (uses_blas64, blas_ilp64_pre_build_hook,
combine_dict)
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f',
'd_lpk.f']
if uses_blas64():
blas_info = get_info('blas_ilp64_opt')
pre_build_hook = blas_ilp64_pre_build_hook(blas_info)
else:
blas_info = get_info('blas_opt')
pre_build_hook = None
odrpack_src = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=odrpack_src,
_pre_build_hook=pre_build_hook)
sources = ['__odrpack.c']
cfg = combine_dict(blas_info, numpy_nodepr_api,
libraries=['odrpack'],
include_dirs=['.'])
ext = config.add_extension('__odrpack',
sources=sources,
depends=(['odrpack.h'] + odrpack_src),
**cfg
)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 724 |
852 | import FWCore.ParameterSet.Config as cms
ecalMipGraphs = cms.EDAnalyzer("EcalMipGraphs",
# parameter for the amplitude threshold
amplitudeThreshold = cms.untracked.double(0.5),
EBDigiCollection = cms.InputTag("ecalEBunpacker","ebDigis"),
EEDigiCollection = cms.InputTag("ecalEBunpacker","eeDigis"),
EcalRecHitCollectionEB = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
EcalRecHitCollectionEE = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
headerProducer = cms.InputTag("ecalEBunpacker"),
#EcalUncalibratedRecHitCollection = cms.InputTag("ecalUncalibHit","EcalUncalibRecHitsEB"),
# masked EBids
maskedEBs = cms.untracked.vstring('-1'),
# masked FEDs
maskedFEDs = cms.untracked.vint32(-1),
# use hash index to mask channels
# add a simple description of hashIndex (hhahhahhh...)
maskedChannels = cms.untracked.vint32(),
# parameter for fixed crystals mode (use hashedIndices)
seedCrys = cms.untracked.vint32(),
# parameter for size of the square matrix, i.e.,
# should the seed be at the center of a 3x3 matrix, a 5x5, etc.
# must be an odd number (default is 3)
side = cms.untracked.int32(3),
minimumTimingAmplitude = cms.untracked.double(0.100)
)
| 483 |
432 | /*
* This file is part of Cubic Chunks Mod, licensed under the MIT License (MIT).
*
* Copyright (c) 2015-2019 OpenCubicChunks
* Copyright (c) 2015-2019 contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.github.opencubicchunks.cubicchunks.core.world.column;
import com.google.common.collect.Lists;
import io.github.opencubicchunks.cubicchunks.core.CubicChunksConfig;
import io.github.opencubicchunks.cubicchunks.core.util.AddressTools;
import io.github.opencubicchunks.cubicchunks.core.world.cube.Cube;
import mcp.MethodsReturnNonnullByDefault;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.EnumSkyBlock;
import net.minecraft.world.chunk.storage.ExtendedBlockStorage;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.annotation.ParametersAreNonnullByDefault;
/**
* Stores cubes for columns
*/
@ParametersAreNonnullByDefault
@MethodsReturnNonnullByDefault
public class CubeMap implements Iterable<Cube> {
@Nonnull private final List<Cube> cubes = new ArrayList<>();
@Nonnull private ExtendedBlockStorage[] toBlockTick = new ExtendedBlockStorage[0];
/**
* Removes the cube at {@code cubeY}
*
* @param cubeY cube y position
*
* @return the removed cube if it existed, otherwise <code>null</code>
*/
@Nullable public Cube remove(int cubeY) {
int index = binarySearch(cubeY);
return index < cubes.size() && cubes.get(index).getY() == cubeY ? cubes.remove(index) : null;
}
/**
* Adds a cube
*
* @param cube the cube to add
*/
public void put(Cube cube) {
int searchIndex = binarySearch(cube.getY());
if (this.contains(cube.getY(), searchIndex)) {
throw new IllegalArgumentException("Cube at " + cube.getY() + " already exists!");
}
cubes.add(searchIndex, cube);
}
/**
* Iterate over all cubes between {@code startY} and {@code endY} in this storage in order. If
* {@code startY < endY}, order is bottom to top, otherwise order is top to bottom.
*
* @param startY initial cube y position
* @param endY last cube y position
*
* @return an iterator over the cubes
*/
public Iterable<Cube> cubes(int startY, int endY) {
boolean reverse = false;
if (startY > endY) {
int i = startY;
startY = endY;
endY = i;
reverse = true;
}
int bottom = binarySearch(startY);
int top = binarySearch(endY + 1); // subList()'s second arg is exclusive so we need to add 1
if (bottom < cubes.size() && top <= cubes.size()) {
return reverse ? Lists.reverse(cubes.subList(bottom, top)) : cubes.subList(bottom, top);
} else {
return Collections.emptyList();
}
}
/**
* Check if the target cube is stored here
*
* @param cubeY the y coordinate of the cube
* @param searchIndex the index to search at (got form {@link #binarySearch(int)})
*
* @return <code>true</code> if the cube is contained here, <code>false</code> otherwise
*/
private boolean contains(int cubeY, int searchIndex) {
return searchIndex < cubes.size() && cubes.get(searchIndex).getY() == cubeY;
}
/**
* Iterate over all cubes in this storage
*
* @return the iterator
*/
@Override public Iterator<Cube> iterator() {
return cubes.iterator();
}
/**
* Retrieve a collection of all cubes within this storage. The collection is non-modifiable
*
* @return the collection
*/
public Collection<Cube> all() {
return cubes;
}
/**
* Check if this storage is empty
*
* @return <code>true</code> if there are no cubes in this storage, <code>false</code> otherwise
*/
public boolean isEmpty() {
return cubes.isEmpty();
}
/**
* @return An array of EBSs from cubes that need ticking
*/
public ExtendedBlockStorage[] getStoragesToTick() {
if (!isToTickValid()) {
int count = 0;
for (Cube cube : cubes) {
if (cube.getStorage() != null && cube.getTickets().shouldTick()) {
count++;
}
}
toBlockTick = new ExtendedBlockStorage[count];
count = 0;
for (Cube cube : cubes) {
if (cube.getStorage() != null && cube.getTickets().shouldTick()) {
toBlockTick[count++] = cube.getStorage();
}
}
}
return toBlockTick;
}
private boolean isToTickValid() {
int index = 0;
for (Cube cube : cubes) {
if (cube.getStorage() != null && cube.getTickets().shouldTick()) {
if (index >= toBlockTick.length) {
return false;
}
if (toBlockTick[index++] != cube.getStorage()) {
return false;
}
}
}
return index == toBlockTick.length; // did we check everything there was in toBlockTick?
}
/**
* Binary search for the index of the specified cube. If the cube is not present, returns the index at which it
* should be inserted.
*
* @param cubeY cube y position
*
* @return the target index
*/
private int binarySearch(int cubeY) {
int start = 0;
int end = cubes.size() - 1;
int mid;
while (start <= end) {
mid = start + end >>> 1;
int at = cubes.get(mid).getY();
if (at < cubeY) { // we are below the target;
start = mid + 1;
} else if (at > cubeY) {
end = mid - 1; // we are above the target
} else {
return mid;// found target!
}
}
return start; // not found :(
}
private int relightCubeIdx = 0;
private int relightCubeBlockIdx = (int) (Math.random() * 4096);
public void enqueueRelightChecks() {
if (cubes.isEmpty()) {
return;
}
int count = CubicChunksConfig.relightChecksPerTickPerColumn;
for (int i = 0; i < count; i++) {
if (relightCubeIdx >= cubes.size()) {
relightCubeIdx = 0;
relightCubeBlockIdx++;
if (relightCubeBlockIdx >= 4096) {
relightCubeBlockIdx = 0;
}
}
// taking the bits in reverse order means we will still visit every possible position, but jumping around the cube
// order of coords: 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
int reversedBits = Integer.reverse(relightCubeBlockIdx) >>> (32 - 12);
assert reversedBits < 4096 && reversedBits >= 0;
final Cube cube = cubes.get(relightCubeIdx);
final int x = AddressTools.getLocalX(relightCubeBlockIdx);
final int y = AddressTools.getLocalY(relightCubeBlockIdx);
final int z = AddressTools.getLocalZ(relightCubeBlockIdx);
final BlockPos min = cube.getCoords().getMinBlockPos();
cube.getWorld().checkLight(min.add(x, y, z));
relightCubeIdx++;
}
}
}
| 3,494 |
5,279 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.dataflow.worker;
import org.apache.beam.model.pipeline.v1.Endpoints.ApiServiceDescriptor;
import org.apache.beam.runners.fnexecution.control.FnApiControlClient;
import org.apache.beam.runners.fnexecution.data.GrpcDataService;
import org.apache.beam.runners.fnexecution.state.GrpcStateService;
import org.apache.beam.sdk.fn.server.GrpcFnServer;
import org.checkerframework.checker.nullness.qual.Nullable;
/** Registry used to manage all the connections (Control, Data, State) from SdkHarness */
public interface SdkHarnessRegistry {
/**
* Register the {@link FnApiControlClient} to allocate work to the client
*
* @param controlClient
*/
void registerWorkerClient(FnApiControlClient controlClient);
/**
* Unregister the {@link FnApiControlClient} to stop allocating work to the client
*
* @param controlClient
*/
void unregisterWorkerClient(FnApiControlClient controlClient);
/** Returns true if all of the registered SDK harnesses are healthy. */
boolean sdkHarnessesAreHealthy();
/** Find the available worker and assign work to it or wait till a worker becomes available */
SdkWorkerHarness getAvailableWorkerAndAssignWork();
void completeWork(SdkWorkerHarness worker);
@Nullable
ApiServiceDescriptor beamFnStateApiServiceDescriptor();
@Nullable
ApiServiceDescriptor beamFnDataApiServiceDescriptor();
/** Class to keep client and associated data */
interface SdkWorkerHarness {
public @Nullable FnApiControlClient getControlClientHandler();
public @Nullable String getWorkerId();
public @Nullable GrpcFnServer<GrpcDataService> getGrpcDataFnServer();
public @Nullable GrpcFnServer<GrpcStateService> getGrpcStateFnServer();
}
}
| 744 |
4,538 | /*
* Copyright (C) 2015-2018 Alibaba Group Holding Limited
*/
#include <stdlib.h>
const char *iotx_ca_crt = {
"-----BEGIN CERTIFICATE-----\r\n"
"MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG\r\n"
"A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv\r\n"
"b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw\r\n"
"MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i\r\n"
"YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT\r\n"
"aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ\r\n"
"jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp\r\n"
"xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp\r\n"
"1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG\r\n"
"snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ\r\n"
"U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8\r\n"
"9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E\r\n"
"BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B\r\n"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME\r\n"
"HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==\r\n"
"-----END CERTIFICATE-----"
};
| 950 |
341 | {
"name": "vue-qart",
"description": "the Vue 2.x component of QArt.js",
"version": "2.2.0",
"author": "<EMAIL>",
"repository": "https://github.com/superman66/vue-qart",
"main": "dist/vue-qart.js",
"scripts": {
"dev": "cross-env NODE_ENV=development webpack-dev-server --open --inline --hot",
"prepages": "cross-env NODE_ENV=production webpack --progress",
"pages": "node demo/gh-pages.js",
"build": "rm -rf dist/ && cross-env NODE_ENV=production webpack --config webpack.config.build.js --progress --hide-modules",
"release": "nrm use npm && npm publish"
},
"dependencies": {
"qartjs": "1.1.1"
},
"peerDependencies": {
"vue": "^2.5.0"
},
"files": [
"dist/"
],
"devDependencies": {
"babel-core": "^6.0.0",
"babel-loader": "^6.0.0",
"babel-preset-es2015": "^6.0.0",
"cross-env": "^3.0.0",
"css-loader": "^0.25.0",
"cz-conventional-changelog": "^2.1.0",
"file-loader": "^0.9.0",
"gh-pages": "^1.1.0",
"vue": "^2.5.0",
"vue-loader": "^10.0.0",
"vue-template-compiler": "^2.1.0",
"webpack": "^2.2.0",
"webpack-dev-server": "^2.2.0",
"html-webpack-plugin": "^2.30.1",
"webpack-merge": "^2.6.1"
},
"config": {
"commitizen": {
"path": "./node_modules/cz-conventional-changelog"
}
}
}
| 644 |
412 | /*******************************************************************\
Module: A special command line object for Bruce's C Compiler
Author: <NAME>
Date: July 2016
\*******************************************************************/
/// \file
/// A special command line object for Bruce's C Compiler Author: Michael
/// Tautschnig Date: July 2016
#ifndef CPROVER_GOTO_CC_BCC_CMDLINE_H
#define CPROVER_GOTO_CC_BCC_CMDLINE_H
#include "goto_cc_cmdline.h"
class bcc_cmdlinet:public goto_cc_cmdlinet
{
public:
// overload
virtual bool parse(int, const char**);
bcc_cmdlinet()
{
}
};
#endif // CPROVER_GOTO_CC_BCC_CMDLINE_H
| 215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.