focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static String toJson(MetadataUpdate metadataUpdate) {
return toJson(metadataUpdate, false);
} | @Test
public void testSetSnapshotRefTagToJsonAllFields() {
long snapshotId = 1L;
SnapshotRefType type = SnapshotRefType.TAG;
String refName = "hank";
Integer minSnapshotsToKeep = null;
Long maxSnapshotAgeMs = null;
Long maxRefAgeMs = 1L;
String expected =
"{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\","
+ "\"snapshot-id\":1,\"type\":\"tag\",\"max-ref-age-ms\":1}";
MetadataUpdate update =
new MetadataUpdate.SetSnapshotRef(
refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs);
String actual = MetadataUpdateParser.toJson(update);
assertThat(actual)
.as("Set snapshot ref should serialize to the correct JSON value for tag with all fields")
.isEqualTo(expected);
} |
public MonitorBuilder address(String address) {
this.address = address;
return getThis();
} | @Test
void address() {
MonitorBuilder builder = MonitorBuilder.newBuilder();
builder.address("address");
Assertions.assertEquals("address", builder.build().getAddress());
} |
@Override
protected void close() {
// FIXME Kafka needs to add a timeout parameter here for us to properly obey the timeout
// passed in
try {
task.stop();
} catch (Throwable t) {
log.warn("Could not stop task", t);
}
taskStopped = true;
Utils.closeQuietly(consumer, "consumer");
Utils.closeQuietly(headerConverter, "header converter");
/*
Setting partition count explicitly to 0 to handle the case,
when the task fails, which would cause its consumer to leave the group.
This would cause onPartitionsRevoked to be invoked in the rebalance listener, but not onPartitionsAssigned,
so the metrics for the task (which are still available for failed tasks until they are explicitly revoked
from the worker) would become inaccurate.
*/
sinkTaskMetricsGroup.recordPartitionCount(0);
} | @Test
public void testMetricsGroup() {
SinkTaskMetricsGroup group = new SinkTaskMetricsGroup(taskId, metrics);
SinkTaskMetricsGroup group1 = new SinkTaskMetricsGroup(taskId1, metrics);
for (int i = 0; i != 10; ++i) {
group.recordRead(1);
group.recordSend(2);
group.recordPut(3);
group.recordPartitionCount(4);
group.recordOffsetSequenceNumber(5);
}
Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
group.recordCommittedOffsets(committedOffsets);
Map<TopicPartition, OffsetAndMetadata> consumedOffsets = new HashMap<>();
consumedOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 10));
group.recordConsumedOffsets(consumedOffsets);
for (int i = 0; i != 20; ++i) {
group1.recordRead(1);
group1.recordSend(2);
group1.recordPut(30);
group1.recordPartitionCount(40);
group1.recordOffsetSequenceNumber(50);
}
committedOffsets = new HashMap<>();
committedOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET + 2));
committedOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 3));
group1.recordCommittedOffsets(committedOffsets);
consumedOffsets = new HashMap<>();
consumedOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET + 20));
consumedOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 30));
group1.recordConsumedOffsets(consumedOffsets);
assertEquals(0.333, metrics.currentMetricValueAsDouble(group.metricGroup(), "sink-record-read-rate"), 0.001d);
assertEquals(0.667, metrics.currentMetricValueAsDouble(group.metricGroup(), "sink-record-send-rate"), 0.001d);
assertEquals(9, metrics.currentMetricValueAsDouble(group.metricGroup(), "sink-record-active-count"), 0.001d);
assertEquals(4, metrics.currentMetricValueAsDouble(group.metricGroup(), "partition-count"), 0.001d);
assertEquals(5, metrics.currentMetricValueAsDouble(group.metricGroup(), "offset-commit-seq-no"), 0.001d);
assertEquals(3, metrics.currentMetricValueAsDouble(group.metricGroup(), "put-batch-max-time-ms"), 0.001d);
// Close the group
group.close();
for (MetricName metricName : group.metricGroup().metrics().metrics().keySet()) {
// Metrics for this group should no longer exist
assertFalse(group.metricGroup().groupId().includes(metricName));
}
// Sensors for this group should no longer exist
assertNull(group.metricGroup().metrics().getSensor("source-record-poll"));
assertNull(group.metricGroup().metrics().getSensor("source-record-write"));
assertNull(group.metricGroup().metrics().getSensor("poll-batch-time"));
assertEquals(0.667, metrics.currentMetricValueAsDouble(group1.metricGroup(), "sink-record-read-rate"), 0.001d);
assertEquals(1.333, metrics.currentMetricValueAsDouble(group1.metricGroup(), "sink-record-send-rate"), 0.001d);
assertEquals(45, metrics.currentMetricValueAsDouble(group1.metricGroup(), "sink-record-active-count"), 0.001d);
assertEquals(40, metrics.currentMetricValueAsDouble(group1.metricGroup(), "partition-count"), 0.001d);
assertEquals(50, metrics.currentMetricValueAsDouble(group1.metricGroup(), "offset-commit-seq-no"), 0.001d);
assertEquals(30, metrics.currentMetricValueAsDouble(group1.metricGroup(), "put-batch-max-time-ms"), 0.001d);
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokeOutRangeScale() {
FunctionTestUtil.assertResultError(ceilingFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(6177)),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(ceilingFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(-6122))
, InvalidParametersEvent.class);
} |
@Override
public ResourceId getCurrentDirectory() {
if (isDirectory()) {
return this;
}
return fromComponents(scheme, getBucket(), key.substring(0, key.lastIndexOf('/') + 1));
} | @Test
public void testGetCurrentDirectory() {
// Tests s3 paths.
assertEquals(
S3ResourceId.fromUri("s3://my_bucket/tmp dir/"),
S3ResourceId.fromUri("s3://my_bucket/tmp dir/").getCurrentDirectory());
// Tests path with unicode.
assertEquals(
S3ResourceId.fromUri("s3://my_bucket/输出 目录/"),
S3ResourceId.fromUri("s3://my_bucket/输出 目录/文件01.txt").getCurrentDirectory());
// Tests bucket with no ending '/'.
assertEquals(
S3ResourceId.fromUri("s3://my_bucket/"),
S3ResourceId.fromUri("s3://my_bucket").getCurrentDirectory());
assertEquals(
S3ResourceId.fromUri("s3://my_bucket/"),
S3ResourceId.fromUri("s3://my_bucket/not-directory").getCurrentDirectory());
} |
public void isEqualTo(@Nullable Object expected) {
standardIsEqualTo(expected);
} | @Test
public void isEqualToWithSameObject() {
Object a = new Object();
Object b = a;
assertThat(a).isEqualTo(b);
} |
public void setCliProperty(final String name, final Object value) {
try {
config = config.with(name, value);
} catch (final ConfigException e) {
terminal.writer().println(e.getMessage());
}
} | @Test
public void shouldThrowOnInvalidCliProperty() {
// When:
console.setCliProperty("FOO", "BAR");
// Then:
assertThat(terminal.getOutputString(),
containsString("Undefined property: FOO. Valid properties are"));
} |
@Bean
public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties,
EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties,
retryRegistryEventConsumer, compositeRetryCustomizer);
registerEventConsumer(retryRegistry, retryEventConsumerRegistry,
retryConfigurationProperties);
initRetryRegistry(retryConfigurationProperties, compositeRetryCustomizer, retryRegistry);
return retryRegistry;
} | @Test
public void testRetryRegistry() {
InstanceProperties instanceProperties1 = new InstanceProperties();
instanceProperties1.setMaxAttempts(3);
InstanceProperties instanceProperties2 = new InstanceProperties();
instanceProperties2.setMaxAttempts(2);
RetryConfigurationProperties retryConfigurationProperties = new RetryConfigurationProperties();
retryConfigurationProperties.getInstances().put("backend1", instanceProperties1);
retryConfigurationProperties.getInstances().put("backend2", instanceProperties2);
retryConfigurationProperties.setRetryAspectOrder(200);
RetryConfiguration retryConfiguration = new RetryConfiguration();
DefaultEventConsumerRegistry<RetryEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
RetryRegistry retryRegistry = retryConfiguration
.retryRegistry(retryConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()), compositeRetryCustomizerTest());
assertThat(retryConfigurationProperties.getRetryAspectOrder()).isEqualTo(200);
assertThat(retryRegistry.getAllRetries().size()).isEqualTo(2);
Retry retry1 = retryRegistry.retry("backend1");
assertThat(retry1).isNotNull();
assertThat(retry1.getRetryConfig().getMaxAttempts()).isEqualTo(3);
Retry retry2 = retryRegistry.retry("backend2");
assertThat(retry2).isNotNull();
assertThat(retry2.getRetryConfig().getMaxAttempts()).isEqualTo(2);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2);
} |
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws
RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final EndTransactionRequestHeader requestHeader =
(EndTransactionRequestHeader) request.decodeCommandCustomHeader(EndTransactionRequestHeader.class);
LOGGER.debug("Transaction request:{}", requestHeader);
if (BrokerRole.SLAVE == brokerController.getMessageStoreConfig().getBrokerRole()) {
response.setCode(ResponseCode.SLAVE_NOT_AVAILABLE);
LOGGER.warn("Message store is slave mode, so end transaction is forbidden. ");
return response;
}
if (requestHeader.getFromTransactionCheck()) {
switch (requestHeader.getCommitOrRollback()) {
case MessageSysFlag.TRANSACTION_NOT_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, but it's pending status."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
return null;
}
case MessageSysFlag.TRANSACTION_COMMIT_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, the producer commit the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: {
LOGGER.warn("Check producer[{}] transaction state, the producer rollback the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
default:
return null;
}
} else {
switch (requestHeader.getCommitOrRollback()) {
case MessageSysFlag.TRANSACTION_NOT_TYPE: {
LOGGER.warn("The producer[{}] end transaction in sending message, and it's pending status."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
return null;
}
case MessageSysFlag.TRANSACTION_COMMIT_TYPE: {
break;
}
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: {
LOGGER.warn("The producer[{}] end transaction in sending message, rollback the message."
+ "RequestHeader: {} Remark: {}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
requestHeader.toString(),
request.getRemark());
break;
}
default:
return null;
}
}
OperationResult result = new OperationResult();
if (MessageSysFlag.TRANSACTION_COMMIT_TYPE == requestHeader.getCommitOrRollback()) {
result = this.brokerController.getTransactionalMessageService().commitMessage(requestHeader);
if (result.getResponseCode() == ResponseCode.SUCCESS) {
if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) {
response.setCode(ResponseCode.ILLEGAL_OPERATION);
LOGGER.warn("Message commit fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check",
requestHeader.getMsgId(), requestHeader.getCommitLogOffset());
return response;
}
RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader);
if (res.getCode() == ResponseCode.SUCCESS) {
MessageExtBrokerInner msgInner = endMessageTransaction(result.getPrepareMessage());
msgInner.setSysFlag(MessageSysFlag.resetTransactionValue(msgInner.getSysFlag(), requestHeader.getCommitOrRollback()));
msgInner.setQueueOffset(requestHeader.getTranStateTableOffset());
msgInner.setPreparedTransactionOffset(requestHeader.getCommitLogOffset());
msgInner.setStoreTimestamp(result.getPrepareMessage().getStoreTimestamp());
MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED);
RemotingCommand sendResult = sendFinalMessage(msgInner);
if (sendResult.getCode() == ResponseCode.SUCCESS) {
this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage());
// successful committed, then total num of half-messages minus 1
this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(msgInner.getTopic(), -1);
BrokerMetricsManager.commitMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, msgInner.getTopic())
.build());
// record the commit latency.
Long commitLatency = (System.currentTimeMillis() - result.getPrepareMessage().getBornTimestamp()) / 1000;
BrokerMetricsManager.transactionFinishLatency.record(commitLatency, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, msgInner.getTopic())
.build());
}
return sendResult;
}
return res;
}
} else if (MessageSysFlag.TRANSACTION_ROLLBACK_TYPE == requestHeader.getCommitOrRollback()) {
result = this.brokerController.getTransactionalMessageService().rollbackMessage(requestHeader);
if (result.getResponseCode() == ResponseCode.SUCCESS) {
if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) {
response.setCode(ResponseCode.ILLEGAL_OPERATION);
LOGGER.warn("Message rollback fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check",
requestHeader.getMsgId(), requestHeader.getCommitLogOffset());
return response;
}
RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader);
if (res.getCode() == ResponseCode.SUCCESS) {
this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage());
// roll back, then total num of half-messages minus 1
this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC), -1);
BrokerMetricsManager.rollBackMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder()
.put(LABEL_TOPIC, result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC))
.build());
}
return res;
}
}
response.setCode(result.getResponseCode());
response.setRemark(result.getResponseRemark());
return response;
} | @Test
public void testProcessRequest_RejectCommitMessage() throws RemotingCommandException {
when(transactionMsgService.commitMessage(any(EndTransactionRequestHeader.class))).thenReturn(createRejectResponse());
RemotingCommand request = createEndTransactionMsgCommand(MessageSysFlag.TRANSACTION_COMMIT_TYPE, false);
RemotingCommand response = endTransactionProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.ILLEGAL_OPERATION);
} |
@Override
public Optional<InetAddress> toInetAddress(String hostOrAddress) {
try {
if (InetAddresses.isInetAddress(hostOrAddress)) {
return Optional.of(InetAddresses.forString(hostOrAddress));
}
return Optional.of(InetAddress.getByName(hostOrAddress));
} catch (UnknownHostException e) {
LOG.trace("toInetAddress({}) failed", hostOrAddress, e);
return Optional.empty();
}
} | @Test
public void toInetAddress_returns_empty_on_unvalid_IP_and_hostname() {
assertThat(underTest.toInetAddress(randomAlphabetic(32))).isEmpty();
} |
public void setDefault() {
fileName = "file";
extension = "xml";
stepNrInFilename = false;
doNotOpenNewFileInit = false;
dateInFilename = false;
timeInFilename = false;
addToResultFilenames = false;
zipped = false;
splitEvery = 0;
encoding = Const.XML_ENCODING;
nameSpace = "";
date_time_format = null;
SpecifyFormat = false;
omitNullValues = false;
mainElement = "Rows";
repeatElement = "Row";
int nrfields = 0;
allocate( nrfields );
} | @Test
public void testSetDefault() throws Exception {
XMLOutputMeta xmlOutputMeta = new XMLOutputMeta();
xmlOutputMeta.setDefault();
assertEquals( "file", xmlOutputMeta.getFileName() );
assertEquals( "xml", xmlOutputMeta.getExtension() );
assertFalse( xmlOutputMeta.isStepNrInFilename() );
assertFalse( xmlOutputMeta.isDoNotOpenNewFileInit() );
assertFalse( xmlOutputMeta.isDateInFilename() );
assertFalse( xmlOutputMeta.isTimeInFilename() );
assertFalse( xmlOutputMeta.isAddToResultFiles() );
assertFalse( xmlOutputMeta.isZipped() );
assertEquals( 0, xmlOutputMeta.getSplitEvery() );
assertEquals( Const.XML_ENCODING, xmlOutputMeta.getEncoding() );
assertEquals( "", xmlOutputMeta.getNameSpace() );
assertNull( xmlOutputMeta.getDateTimeFormat() );
assertFalse( xmlOutputMeta.isSpecifyFormat() );
assertFalse( xmlOutputMeta.isOmitNullValues() );
assertEquals( "Rows", xmlOutputMeta.getMainElement() );
assertEquals( "Row", xmlOutputMeta.getRepeatElement() );
} |
public static double[] rowMeans(double[][] matrix) {
double[] x = new double[matrix.length];
for (int i = 0; i < x.length; i++) {
x[i] = mean(matrix[i]);
}
return x;
} | @Test
public void testRowMeans() {
System.out.println("rowMeans");
double[][] A = {
{0.7220180, 0.07121225, 0.6881997},
{-0.2648886, -0.89044952, 0.3700456},
{-0.6391588, 0.44947578, 0.6240573}
};
double[] r = {0.4938100, -0.2617642, 0.1447914};
double[] result = MathEx.rowMeans(A);
for (int i = 0; i < r.length; i++) {
assertEquals(result[i], r[i], 1E-7);
}
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
try {
final Ds3Client client = new SpectraClientBuilder().wrap(session.getClient(), session.getHost());
final Map<Path, TransferStatus> filtered = new LinkedHashMap<>(files);
for(Iterator<Map.Entry<Path, TransferStatus>> iter = filtered.entrySet().iterator(); iter.hasNext(); ) {
final Map.Entry<Path, TransferStatus> file = iter.next();
if(containerService.isContainer(file.getKey())) {
client.deleteBucketSpectraS3(
new DeleteBucketSpectraS3Request(containerService.getContainer(file.getKey()).getName()).withForce(true));
iter.remove();
}
else if(file.getKey().isDirectory()) {
client.deleteFolderRecursivelySpectraS3(
new DeleteFolderRecursivelySpectraS3Request(
containerService.getContainer(file.getKey()).getName(),
containerService.getKey(file.getKey())));
iter.remove();
}
}
final Map<Path, List<Path>> containers = new HashMap<>();
for(Path file : filtered.keySet()) {
final Path bucket = containerService.getContainer(file);
if(containers.containsKey(bucket)) {
containers.get(bucket).add(file);
}
else {
containers.put(bucket, new ArrayList<>(Collections.singletonList(file)));
}
}
for(Map.Entry<Path, List<Path>> entry : containers.entrySet()) {
final List<String> keys = entry.getValue().stream().map(containerService::getKey).collect(Collectors.toList());
client.deleteObjects(new DeleteObjectsRequest(containerService.getContainer(entry.getKey()).getName(), keys));
}
}
catch(FailedRequestException e) {
throw new SpectraExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
} | @Test
public void testDeleteContainer() throws Exception {
final Path container = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.volume, Path.Type.directory));
container.attributes().setRegion("US");
new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(container, new TransferStatus());
assertTrue(new SpectraFindFeature(session).find(container));
new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public <T> void resolve(T resolvable) {
ParamResolver resolver = this;
if (ParamScope.class.isAssignableFrom(resolvable.getClass())) {
ParamScope newScope = (ParamScope) resolvable;
resolver = newScope.applyOver(resolver);
}
resolveStringLeaves(resolvable, resolver);
resolveNonStringLeaves(resolvable, resolver);
resolveNodes(resolvable, resolver);
} | @Test
public void shouldNotTryToResolveNonStringAttributes() {//this tests replacement doesn't fail when non-string config-attributes are present, and non opt-out annotated
MailHost mailHost = new MailHost("host", 25, "loser", "passwd", true, false, "[email protected]", "[email protected]");
new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bool", "tr"))), fieldCache).resolve(mailHost);
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testDefaultMethodIgnoresDefaultImplementation() {
OptionsWithDefaultMethod optsWithDefault =
PipelineOptionsFactory.as(OptionsWithDefaultMethod.class);
assertThat(optsWithDefault.getValue(), nullValue());
optsWithDefault.setValue(12.25);
assertThat(optsWithDefault.getValue(), equalTo(12.25));
} |
Double calculateMedian(List<Double> durationEntries) {
if (durationEntries.isEmpty()) {
return 0.0;
}
Collections.sort(durationEntries);
int middle = durationEntries.size() / 2;
if (durationEntries.size() % 2 == 1) {
return durationEntries.get(middle);
} else {
double total = durationEntries.get(middle - 1) + durationEntries.get(middle);
return total / 2;
}
} | @Test
void calculateMedianOfOddNumberOfEntries() {
OutputStream out = new ByteArrayOutputStream();
UsageFormatter usageFormatter = new UsageFormatter(out);
Double result = usageFormatter
.calculateMedian(asList(1.0, 2.0, 3.0));
assertThat(result, is(closeTo(2.0, EPSILON)));
} |
public Set<Path> getAllowedAuxiliaryPaths() {
return allowedAuxiliaryPaths;
} | @Test
public void testAllowedAuxiliaryPaths() throws ValidationException, RepositoryException {
HashMap<String, String> validProperties = new HashMap<>();
validProperties.put("allowed_auxiliary_paths", "/permitted-dir,/another-valid-dir");
PathConfiguration configuration = ConfigurationHelper.initPathConfig(new PathConfiguration(), validProperties);
assertEquals(2,configuration.getAllowedAuxiliaryPaths().size());
} |
public Boolean setSeckillEndFlag(long seckillId, String taskId) {
return stringRedisTemplate.opsForValue().setIfAbsent("goodskill:seckill:end:notice" + seckillId + ":" + taskId, "1");
} | @Test
void testSetSeckillEndFlag() {
ValueOperations valueOperations = mock(ValueOperations.class);
when(stringRedisTemplate.opsForValue()).thenReturn(valueOperations);
Boolean result = redisService.setSeckillEndFlag(0L, "1");
Assertions.assertEquals(Boolean.FALSE, result);
} |
@SuppressWarnings("unchecked")
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, CircuitBreaker circuitBreaker,
String methodName) throws Throwable {
CircuitBreakerOperator circuitBreakerOperator = CircuitBreakerOperator.of(circuitBreaker);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava3Aspect(circuitBreakerOperator, returnValue, methodName);
} | @Test
public void testRxTypes() throws Throwable {
CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava3CircuitBreakerAspectExt
.handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava3CircuitBreakerAspectExt
.handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull();
} |
@SuppressWarnings("OptionalGetWithoutIsPresent")
public StatementExecutorResponse execute(
final ConfiguredStatement<DescribeConnector> configuredStatement,
final SessionProperties sessionProperties,
final KsqlExecutionContext ksqlExecutionContext,
final ServiceContext serviceContext
) {
final String connectorName = configuredStatement
.getStatement()
.getConnectorName();
final ConnectResponse<ConnectorStateInfo> statusResponse = serviceContext
.getConnectClient()
.status(connectorName);
if (statusResponse.error().isPresent()) {
final String errorMsg = "Failed to query connector status: " + statusResponse.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(statusResponse.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(statusResponse.httpCode()), errorMsg))
.build()
);
}
final ConnectResponse<ConnectorInfo> infoResponse = serviceContext
.getConnectClient()
.describe(connectorName);
if (infoResponse.error().isPresent()) {
final String errorMsg = "Failed to describe connector: " + infoResponse.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(infoResponse.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(infoResponse.httpCode()), errorMsg))
.build()
);
}
final ConnectorStateInfo status = statusResponse.datum().get();
final ConnectorInfo info = infoResponse.datum().get();
final Optional<Connector> connector = connectorFactory.apply(info);
final List<KsqlWarning> warnings;
final List<String> topics;
if (connector.isPresent()) {
// Small optimization. If a connector's info is not found in the response, don't query for
// active topics with the given connectorName
final ConnectResponse<Map<String, Map<String, List<String>>>> topicsResponse = serviceContext
.getConnectClient()
.topics(connectorName);
// topics endpoint is relatively new (KAFKA-9422), so 404 here is expected behavior for older
// Connect versions. Rather than showing a scary warning to the user, we just log it to the
// server logs.
if (topicsResponse.error().isPresent()
&& topicsResponse.httpCode() == HttpStatus.SC_NOT_FOUND) {
topics = ImmutableList.of();
warnings = ImmutableList.of();
LOG.warn("Could not list related topics due to error: " + topicsResponse.error().get());
} else if (topicsResponse.error().isPresent()) {
topics = ImmutableList.of();
warnings = ImmutableList.of(
new KsqlWarning("Could not list related topics due to error: "
+ topicsResponse.error().get()));
} else {
topics = topicsResponse.datum()
.get()
.get(connectorName)
.getOrDefault(TOPICS_KEY, ImmutableList.of());
warnings = ImmutableList.of();
}
} else {
topics = ImmutableList.of();
warnings = ImmutableList.of();
}
final List<SourceDescription> sources;
if (connector.isPresent()) {
sources = ksqlExecutionContext
.getMetaStore()
.getAllDataSources()
.values()
.stream()
.filter(source -> topics.contains(source.getKafkaTopicName()))
.map(source -> SourceDescriptionFactory.create(
source,
false,
ImmutableList.of(),
ImmutableList.of(),
Optional.empty(),
ImmutableList.of(),
ImmutableList.of(),
ksqlExecutionContext.metricCollectors()
)
)
.collect(Collectors.toList());
} else {
sources = ImmutableList.of();
}
final ConnectorDescription description = new ConnectorDescription(
configuredStatement.getMaskedStatementText(),
info.config().get(CONNECTOR_CLASS_CONFIG),
status,
sources,
topics,
warnings
);
return StatementExecutorResponse.handled(Optional.of(description));
} | @Test
public void shouldNotWarnClientOnMissingTopicsEndpoint() {
// Given:
when(connectClient.topics(any())).thenReturn(ConnectResponse.failure("not found",
HttpStatus.SC_NOT_FOUND));
// When:
final Optional<KsqlEntity> entity = executor
.execute(describeStatement, mock(SessionProperties.class), engine, serviceContext)
.getEntity();
// Then:
verify(engine).getMetaStore();
verify(metaStore).getAllDataSources();
verify(connectClient).status("connector");
verify(connectClient).describe("connector");
verify(connectClient).topics("connector");
assertThat("Expected a response", entity.isPresent());
assertThat(entity.get(), instanceOf(ConnectorDescription.class));
final ConnectorDescription description = (ConnectorDescription) entity.get();
assertThat(description.getTopics(), empty());
assertThat(description.getWarnings(), empty());
} |
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException {
byte[] bytes = pollEntryBytes(timeout);
if (bytes == null) {
return null;
}
return DLQEntry.deserialize(bytes);
} | @Test
public void testWriteReadRandomEventSize() throws Exception {
Event event = new Event(Collections.emptyMap());
int maxEventSize = BLOCK_SIZE * 2; // 64kb
int eventCount = 1024; // max = 1000 * 64kb = 64mb
long startTime = System.currentTimeMillis();
try(DeadLetterQueueWriter writeManager = DeadLetterQueueWriter
.newBuilder(dir, 10 * 1024 * 1024, defaultDlqSize, Duration.ofSeconds(1))
.build()) {
for (int i = 0; i < eventCount; i++) {
event.setField("message", generateMessageContent((int)(Math.random() * (maxEventSize))));
DLQEntry entry = new DLQEntry(event, "", "", String.valueOf(i), new Timestamp(startTime++));
writeManager.writeEntry(entry);
}
}
try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) {
for (int i = 0; i < eventCount;i++) {
DLQEntry entry = readManager.pollEntry(100);
assertThat(entry.getReason(), is(String.valueOf(i)));
}
}
} |
@Override
public ActionResult apply(Agent agent, Map<String, String> input) {
log.debug("Fetching url {} for agent {}", input.get("url"), agent.getId());
String url = input.get("url");
if (url == null || url.isEmpty()) {
return errorResult("An error occurred while attempting to browse a site", "The url parameter is missing or has an empty value.");
}
try {
return browsePage(url, input.get("query"));
} catch (Exception e) {
log.warn("Browsing error for {}", url, e);
return errorResult(
String.format("An error occured while attempting to browse %s", url),
"Browsing failed, you should try another site.");
}
} | @Test
void testApplyWithBrowsingException() {
String url = "http://example.com";
String query = "What is this page about?";
Map<String, String> input = new HashMap<>();
input.put("url", url);
input.put("query", query);
when(browserContext.newPage()).thenReturn(page);
when(page.navigate(eq(url), any(Page.NavigateOptions.class))).thenThrow(new PlaywrightException("Navigation failed"));
ActionResult result = playwrightBrowserAction.apply(agent, input);
assertEquals(ActionResult.Status.FAILURE, result.getStatus());
assertEquals("An error occured while attempting to browse http://example.com", result.getSummary());
assertEquals("Browsing failed, you should try another site.", result.getError());
verify(page).navigate(eq(url), any(Page.NavigateOptions.class));
} |
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
} | @Test
public void processPathDirOrderMtimeYears() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + Integer.MAX_VALUE);
testfile02.setMtime(NOW.getTime() + Integer.MIN_VALUE);
testfile03.setMtime(NOW.getTime() + 0);
testfile04.setMtime(NOW.getTime() + Integer.MAX_VALUE + Integer.MAX_VALUE);
testfile05.setMtime(NOW.getTime() + 0);
testfile06.setMtime(NOW.getTime() + Integer.MIN_VALUE + Integer.MIN_VALUE);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
} |
public static boolean privateKeyMatchesPublicKey(PrivateKey privateKey, PublicKey publicKey) {
byte[] someRandomData = new byte[64];
new Random().nextBytes(someRandomData);
Signature signer = SignatureUtils.createSigner(privateKey);
Signature verifier = SignatureUtils.createVerifier(publicKey);
try {
signer.update(someRandomData);
verifier.update(someRandomData);
byte[] signature = signer.sign();
return verifier.verify(signature);
} catch (SignatureException e) {
throw new RuntimeException(e);
}
} | @Test
void verifies_matching_cert_and_key() {
KeyPair ecKeypairA = KeyUtils.generateKeypair(KeyAlgorithm.EC, 256);
KeyPair ecKeypairB = KeyUtils.generateKeypair(KeyAlgorithm.EC, 256);
KeyPair rsaKeypairA = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 1024);
KeyPair rsaKeypairB = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 1024);
assertTrue(X509CertificateUtils.privateKeyMatchesPublicKey(ecKeypairA.getPrivate(), ecKeypairA.getPublic()));
assertTrue(X509CertificateUtils.privateKeyMatchesPublicKey(rsaKeypairA.getPrivate(), rsaKeypairA.getPublic()));
assertFalse(X509CertificateUtils.privateKeyMatchesPublicKey(ecKeypairA.getPrivate(), ecKeypairB.getPublic()));
assertFalse(X509CertificateUtils.privateKeyMatchesPublicKey(rsaKeypairA.getPrivate(), rsaKeypairB.getPublic()));
} |
@Override
@MethodNotAvailable
public void close() {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testClose() {
adapter.close();
} |
public Struct put(String fieldName, Object value) {
Field field = lookupField(fieldName);
return put(field, value);
} | @Test
public void testInvalidArrayFieldElements() {
assertThrows(DataException.class,
() -> new Struct(NESTED_SCHEMA).put("array", Collections.singletonList("should fail since elements should be int8s")));
} |
@Override
public void onClick(View v) {
AppCompatActivity activity = (AppCompatActivity) getActivity();
if (activity == null) return;
switch (v.getId()) {
case R.id.ask_for_contact_permissions_action -> enableContactsDictionary();
case R.id.disable_contacts_dictionary -> {
mSharedPrefs
.edit()
.putBoolean(getString(R.string.settings_key_use_contacts_dictionary), false)
.apply();
refreshWizardPager();
}
case R.id.open_permissions_wiki_action -> {
Intent browserIntent =
new Intent(
Intent.ACTION_VIEW,
Uri.parse(getResources().getString(R.string.permissions_wiki_site_url)));
try {
startActivity(browserIntent);
} catch (ActivityNotFoundException weirdException) {
// https://github.com/AnySoftKeyboard/AnySoftKeyboard/issues/516
// this means that there is nothing on the device
// that can handle Intent.ACTION_VIEW with "https" schema..
// silently swallowing it
Logger.w(
"WizardPermissionsFragment",
"Can not open '%' since there is nothing on the device that can handle" + " it.",
browserIntent.getData());
}
}
case R.id.ask_for_notification_permissions_action ->
AnyApplication.notifier(activity).askForNotificationPostPermission(this);
case R.id.skip_notification_permissions_action -> {
mNotificationSkipped = true;
refreshWizardPager();
}
default ->
throw new IllegalArgumentException(
"Failed to handle " + v.getId() + " in WizardPermissionsFragment");
}
} | @Test
@Config(sdk = Build.VERSION_CODES.TIRAMISU)
public void testHidesNotificationGroupIfNotGrantedButSkipped() {
var fragment = startFragment();
var group = fragment.getView().findViewById(R.id.notification_permission_group);
Assert.assertEquals(View.VISIBLE, group.getVisibility());
View skipLink = fragment.getView().findViewById(R.id.skip_notification_permissions_action);
var clickSkipHandler = Shadows.shadowOf(skipLink).getOnClickListener();
Assert.assertNotNull(clickSkipHandler);
clickSkipHandler.onClick(skipLink);
Assert.assertEquals(View.GONE, group.getVisibility());
} |
public static boolean isValidProjectKey(String keyCandidate) {
return VALID_PROJECT_KEY_REGEXP.matcher(keyCandidate).matches();
} | @Test
public void invalid_project_key() {
assertThat(ComponentKeys.isValidProjectKey("0123")).isFalse();
assertThat(ComponentKeys.isValidProjectKey("ab/12")).isFalse();
assertThat(ComponentKeys.isValidProjectKey("코드품질")).isFalse();
assertThat(ComponentKeys.isValidProjectKey("")).isFalse();
assertThat(ComponentKeys.isValidProjectKey(" ")).isFalse();
assertThat(ComponentKeys.isValidProjectKey("ab 12")).isFalse();
assertThat(ComponentKeys.isValidProjectKey(" ab")).isFalse();
assertThat(ComponentKeys.isValidProjectKey("ab ")).isFalse();
} |
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
for (TopicPartitionReplica replica : replicaAssignment.keySet())
futures.put(replica, new KafkaFutureImpl<>());
Map<Integer, AlterReplicaLogDirsRequestData> replicaAssignmentByBroker = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry: replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String logDir = entry.getValue();
int brokerId = replica.brokerId();
AlterReplicaLogDirsRequestData value = replicaAssignmentByBroker.computeIfAbsent(brokerId,
key -> new AlterReplicaLogDirsRequestData());
AlterReplicaLogDir alterReplicaLogDir = value.dirs().find(logDir);
if (alterReplicaLogDir == null) {
alterReplicaLogDir = new AlterReplicaLogDir();
alterReplicaLogDir.setPath(logDir);
value.dirs().add(alterReplicaLogDir);
}
AlterReplicaLogDirTopic alterReplicaLogDirTopic = alterReplicaLogDir.topics().find(replica.topic());
if (alterReplicaLogDirTopic == null) {
alterReplicaLogDirTopic = new AlterReplicaLogDirTopic().setName(replica.topic());
alterReplicaLogDir.topics().add(alterReplicaLogDirTopic);
}
alterReplicaLogDirTopic.partitions().add(replica.partition());
}
final long now = time.milliseconds();
for (Map.Entry<Integer, AlterReplicaLogDirsRequestData> entry: replicaAssignmentByBroker.entrySet()) {
final int brokerId = entry.getKey();
final AlterReplicaLogDirsRequestData assignment = entry.getValue();
runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) {
return new AlterReplicaLogDirsRequest.Builder(assignment);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) {
for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) {
TopicPartitionReplica replica = new TopicPartitionReplica(
topicResult.topicName(), partitionResult.partitionIndex(), brokerId);
KafkaFutureImpl<Void> future = futures.get(replica);
if (future == null) {
log.warn("The partition {} in the response from broker {} is not in the request",
new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()),
brokerId);
} else if (partitionResult.errorCode() == Errors.NONE.code()) {
future.complete(null);
} else {
future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception());
}
}
}
// The server should send back a response for every replica. But do a sanity check anyway.
completeUnrealizedFutures(
futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId),
replica -> "The response from broker " + brokerId +
" did not contain a result for replica " + replica);
}
@Override
void handleFailure(Throwable throwable) {
// Only completes the futures of brokerId
completeAllExceptionally(
futures.entrySet().stream()
.filter(entry -> entry.getKey().brokerId() == brokerId)
.map(Map.Entry::getValue),
throwable);
}
}, now);
}
return new AlterReplicaLogDirsResult(new HashMap<>(futures));
} | @Test
public void testAlterReplicaLogDirsLogDirNotFound() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 0);
createAlterLogDirsResponse(env, env.cluster().nodeById(1), Errors.LOG_DIR_NOT_FOUND, 0);
TopicPartitionReplica tpr0 = new TopicPartitionReplica("topic", 0, 0);
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 0, 1);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr0, "/data0");
logDirs.put(tpr1, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
assertNull(result.values().get(tpr0).get());
TestUtils.assertFutureError(result.values().get(tpr1), LogDirNotFoundException.class);
}
} |
@Override
public FileClient getFileClient(Long id) {
return clientCache.getUnchecked(id);
} | @Test
public void testGetFileClient() {
// mock 数据
FileConfigDO fileConfig = randomFileConfigDO().setMaster(false);
fileConfigMapper.insert(fileConfig);
// 准备参数
Long id = fileConfig.getId();
// mock 获得 Client
FileClient fileClient = new LocalFileClient(id, new LocalFileClientConfig());
when(fileClientFactory.getFileClient(eq(id))).thenReturn(fileClient);
// 调用,并断言
assertSame(fileClient, fileConfigService.getFileClient(id));
// 断言缓存
verify(fileClientFactory).createOrUpdateFileClient(eq(id), eq(fileConfig.getStorage()),
eq(fileConfig.getConfig()));
} |
public static Set<Member> selectTargetMembers(Collection<Member> members, Predicate<Member> filter) {
return members.stream().filter(filter).collect(Collectors.toSet());
} | @Test
void testSelectTargetMembers() {
Collection<Member> input = new HashSet<>();
input.add(originalMember);
Member member = buildMember();
member.setIp("2.2.2.2");
input.add(member);
Set<Member> actual = MemberUtil.selectTargetMembers(input, member1 -> member1.getIp().equals(IP));
assertEquals(1, actual.size());
} |
@ScalarOperator(ADD)
@SqlType(StandardTypes.SMALLINT)
public static long add(@SqlType(StandardTypes.SMALLINT) long left, @SqlType(StandardTypes.SMALLINT) long right)
{
try {
return Shorts.checkedCast(left + right);
}
catch (IllegalArgumentException e) {
throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("smallint addition overflow: %s + %s", left, right), e);
}
} | @Test
public void testAdd()
{
assertFunction("SMALLINT'37' + SMALLINT'37'", SMALLINT, (short) (37 + 37));
assertFunction("SMALLINT'37' + SMALLINT'17'", SMALLINT, (short) (37 + 17));
assertFunction("SMALLINT'17' + SMALLINT'37'", SMALLINT, (short) (17 + 37));
assertFunction("SMALLINT'17' + SMALLINT'17'", SMALLINT, (short) (17 + 17));
assertNumericOverflow(format("SMALLINT'%s' + SMALLINT'1'", Short.MAX_VALUE), "smallint addition overflow: 32767 + 1");
} |
public MapConfig setBackupCount(final int backupCount) {
this.backupCount = checkBackupCount(backupCount, asyncBackupCount);
return this;
} | @Test
public void setBackupCount_whenItsZero() {
MapConfig config = new MapConfig();
config.setBackupCount(0);
} |
@Override
public void abortCheckpointOnBarrier(long checkpointId, CheckpointException cause)
throws IOException {
if (isCurrentSyncSavepoint(checkpointId)) {
throw new FlinkRuntimeException("Stop-with-savepoint failed.");
}
subtaskCheckpointCoordinator.abortCheckpointOnBarrier(checkpointId, cause, operatorChain);
} | @Test
void testSavepointSuspendedAborted() {
assertThatThrownBy(
() ->
testSyncSavepointWithEndInput(
(task, id) ->
task.abortCheckpointOnBarrier(
id,
new CheckpointException(
UNKNOWN_TASK_CHECKPOINT_NOTIFICATION_FAILURE)),
SavepointType.suspend(SavepointFormatType.CANONICAL),
false))
.isInstanceOf(FlinkRuntimeException.class)
.hasMessage("Stop-with-savepoint failed.");
} |
public List<RowMetaInterface> getRecommendedIndexes() {
List<RowMetaInterface> indexes = new ArrayList<RowMetaInterface>();
// First index : ID_BATCH if any is used.
if ( isBatchIdUsed() ) {
indexes.add( addFieldsToIndex( getKeyField() ) );
}
// The next index includes : ERRORS, STATUS, TRANSNAME:
indexes.add( addFieldsToIndex( findField( ID.ERRORS ), findField( ID.STATUS ), findField( ID.JOBNAME ) ) );
// Index used for deleting rows during cleanup
indexes.add( addFieldsToIndex( findField( ID.JOBNAME ), findField( ID.LOGDATE ) ) );
return indexes;
} | @Test
public void getRecommendedIndexes() {
List<RowMetaInterface> indexes = jobLogTable.getRecommendedIndexes();
String[] expected = new String[]{ "JOBNAME", "LOGDATE" };
assertTrue( "No indicies present", indexes.size() > 0 );
boolean found = false;
for ( RowMetaInterface rowMeta : indexes ) {
if ( Arrays.equals( rowMeta.getFieldNames(), expected ) ) {
found = true;
break;
}
}
if ( !found ) {
fail( "Cound not find index with " + Arrays.toString( expected ) );
}
} |
public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) {
_windowRollingLock.lock();
try {
long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex);
long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1);
if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) {
return new MetricSampleCompleteness<>(generation(), _windowMs);
}
maybeUpdateAggregatorState();
return _aggregatorState.completeness(fromWindowIndex,
toWindowIndex,
interpretAggregationOptions(options),
generation());
} finally {
_windowRollingLock.unlock();
}
} | @Test
public void testAggregationOption4() {
MetricSampleAggregator<String, IntegerEntity> aggregator = prepareCompletenessTestEnv();
// Change the option to have 0.5 as minValidEntityGroupRatio. This will exclude window index 3, 4, 20.
AggregationOptions<String, IntegerEntity> options =
new AggregationOptions<>(0.0, 0.0, NUM_WINDOWS, 5,
new HashSet<>(Arrays.asList(ENTITY1, ENTITY2, ENTITY3)),
AggregationOptions.Granularity.ENTITY, true);
MetricSampleCompleteness<String, IntegerEntity> completeness = aggregator.completeness(-1, Long.MAX_VALUE, options);
assertEquals(17, completeness.validWindowIndices().size());
assertEquals(2, completeness.validEntities().size());
assertTrue(completeness.validEntities().contains(ENTITY1));
assertTrue(completeness.validEntities().contains(ENTITY3));
assertTrue(completeness.validEntityGroups().contains(ENTITY3.group()));
assertCompletenessByWindowIndex(completeness);
} |
public void addLongPollingClient(HttpServletRequest req, HttpServletResponse rsp, Map<String, String> clientMd5Map,
int probeRequestSize) {
String noHangUpFlag = req.getHeader(LongPollingService.LONG_POLLING_NO_HANG_UP_HEADER);
long start = System.currentTimeMillis();
List<String> changedGroups = MD5Util.compareMd5(req, rsp, clientMd5Map);
if (changedGroups.size() > 0) {
generateResponse(req, rsp, changedGroups);
LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "instant",
RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize,
changedGroups.size());
return;
} else if (noHangUpFlag != null && noHangUpFlag.equalsIgnoreCase(TRUE_STR)) {
LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "nohangup",
RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize,
changedGroups.size());
return;
}
// Must be called by http thread, or send response.
final AsyncContext asyncContext = req.startAsync();
// AsyncContext.setTimeout() is incorrect, Control by oneself
asyncContext.setTimeout(0L);
String ip = RequestUtil.getRemoteIp(req);
ConnectionCheckResponse connectionCheckResponse = checkLimit(req);
if (!connectionCheckResponse.isSuccess()) {
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(
() -> generate503Response(asyncContext, rsp, connectionCheckResponse.getMessage()),
1000L + new Random().nextInt(2000), TimeUnit.MILLISECONDS);
return;
}
String appName = req.getHeader(RequestUtil.CLIENT_APPNAME_HEADER);
String tag = req.getHeader("Vipserver-Tag");
int delayTime = SwitchService.getSwitchInteger(SwitchService.FIXED_DELAY_TIME, 500);
int minLongPoolingTimeout = SwitchService.getSwitchInteger("MIN_LONG_POOLING_TIMEOUT", 10000);
// Add delay time for LoadBalance, and one response is returned 500 ms in advance to avoid client timeout.
String requestLongPollingTimeOut = req.getHeader(LongPollingService.LONG_POLLING_HEADER);
long timeout = Math.max(minLongPoolingTimeout, Long.parseLong(requestLongPollingTimeOut) - delayTime);
ConfigExecutor.executeLongPolling(
new ClientLongPolling(asyncContext, clientMd5Map, ip, probeRequestSize, timeout, appName, tag));
} | @Test
void testRejectByConnectionLimit() throws Exception {
//mock connection no limit
ConnectionCheckResponse connectionCheckResponse = new ConnectionCheckResponse();
connectionCheckResponse.setSuccess(false);
Mockito.when(connectionControlManager.check(any())).thenReturn(connectionCheckResponse);
HttpServletResponse httpServletResponse = Mockito.mock(HttpServletResponse.class);
PrintWriter printWriter = Mockito.mock(PrintWriter.class);
Mockito.when(httpServletResponse.getWriter()).thenReturn(printWriter);
HttpServletRequest httpServletRequest = Mockito.mock(HttpServletRequest.class);
Mockito.when(httpServletRequest.getHeader(eq(LongPollingService.LONG_POLLING_NO_HANG_UP_HEADER))).thenReturn(null);
String clientIp = "192.168.0.1";
Mockito.when(httpServletRequest.getHeader(eq("X-Forwarded-For"))).thenReturn(clientIp);
Mockito.when(httpServletRequest.startAsync()).thenReturn(Mockito.mock(AsyncContext.class));
int propSize = 3;
Map<String, String> clientMd5Map = new HashMap<>();
longPollingService.addLongPollingClient(httpServletRequest, httpServletResponse, clientMd5Map, propSize);
Thread.sleep(3000L);
//expect response not returned
Mockito.verify(httpServletResponse, times(1)).setStatus(eq(503));
} |
public static int getXpForLevel(int level)
{
if (level < 1 || level > MAX_VIRT_LEVEL)
{
throw new IllegalArgumentException(level + " is not a valid level");
}
// XP_FOR_LEVEL[0] is XP for level 1
return XP_FOR_LEVEL[level - 1];
} | @Test(expected = IllegalArgumentException.class)
public void testGetXpForLowLevel()
{
int xp = Experience.getXpForLevel(0);
} |
public static Map<String, String> parseParameters(String rawParameters) {
if (StringUtils.isBlank(rawParameters)) {
return Collections.emptyMap();
}
Matcher matcher = PARAMETERS_PATTERN.matcher(rawParameters);
if (!matcher.matches()) {
return Collections.emptyMap();
}
String pairs = matcher.group(1);
String[] pairArr = pairs.split("\\s*,\\s*");
Map<String, String> parameters = new HashMap<>();
for (String pair : pairArr) {
Matcher pairMatcher = PAIR_PARAMETERS_PATTERN.matcher(pair);
if (pairMatcher.matches()) {
parameters.put(pairMatcher.group(1), pairMatcher.group(2));
}
}
return parameters;
} | @Test
void testParseParameters() {
String legalStr = "[{key1:value1},{key2:value2}]";
Map<String, String> legalMap = StringUtils.parseParameters(legalStr);
assertEquals(2, legalMap.size());
assertEquals("value2", legalMap.get("key2"));
String str = StringUtils.encodeParameters(legalMap);
assertEqualsWithoutSpaces(legalStr, str);
String legalSpaceStr = "[{key1: value1}, {key2 :value2}]";
Map<String, String> legalSpaceMap = StringUtils.parseParameters(legalSpaceStr);
assertEquals(2, legalSpaceMap.size());
assertEquals("value2", legalSpaceMap.get("key2"));
str = StringUtils.encodeParameters(legalSpaceMap);
assertEqualsWithoutSpaces(legalSpaceStr, str);
String legalSpecialStr = "[{key-1: value*.1}, {key.2 :value*.-_2}]";
Map<String, String> legalSpecialMap = StringUtils.parseParameters(legalSpecialStr);
assertEquals(2, legalSpecialMap.size());
assertEquals("value*.1", legalSpecialMap.get("key-1"));
assertEquals("value*.-_2", legalSpecialMap.get("key.2"));
str = StringUtils.encodeParameters(legalSpecialMap);
assertEqualsWithoutSpaces(legalSpecialStr, str);
String illegalStr = "[{key=value},{aa:bb}]";
Map<String, String> illegalMap = StringUtils.parseParameters(illegalStr);
assertEquals(0, illegalMap.size());
str = StringUtils.encodeParameters(illegalMap);
assertEquals(null, str);
String emptyMapStr = "[]";
Map<String, String> emptyMap = StringUtils.parseParameters(emptyMapStr);
assertEquals(0, emptyMap.size());
} |
@Override
public Set<Link> getEgressLinks(ConnectPoint connectPoint) {
checkNotNull(connectPoint, CONNECT_POINT_NULL);
return manager.getVirtualLinks(this.networkId())
.stream()
.filter(link -> (connectPoint.equals(link.dst())))
.collect(Collectors.toSet());
} | @Test(expected = NullPointerException.class)
public void testGetEgressLinksByNullId() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
LinkService linkService = manager.get(virtualNetwork.id(), LinkService.class);
// test the getEgressLinks() method with a null connect point.
linkService.getEgressLinks(null);
} |
public ZkService chooseService() {
if (zkService != null) {
return zkService;
}
synchronized (this) {
if (zkService == null) {
final String version = lbConfig.getZkServerVersion();
if (version.startsWith(VERSION_34_PREFIX)) {
zkService = PluginServiceManager.getPluginService(ZkService34.class);
}
}
}
if (zkService == null) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH,
"Can not get target zookeeper client version(%s) service", lbConfig.getZkServerVersion()));
}
return zkService;
} | @Test(expected = IllegalArgumentException.class)
public void chooseServiceWithNoVersion() {
lbConfig.setZkServerVersion("9.9.9");
final ZkServiceManager zkServiceManager = new ZkServiceManager();
zkServiceManager.chooseService();
} |
public static Object decodeToJson(ExecutionContext ctx, List<Byte> bytesList) throws IOException {
return TbJson.parse(ctx, bytesToString(bytesList));
} | @Test
public void parseStringDecodeToJson() throws IOException {
String expectedStr = "{\"hello\": \"world\"}";
ExecutionHashMap<String, Object> expectedJson = new ExecutionHashMap<>(1, ctx);
expectedJson.put("hello", "world");
Object actualJson = TbUtils.decodeToJson(ctx, expectedStr);
Assertions.assertEquals(expectedJson, actualJson);
} |
@Override
public CodegenTableDO getCodegenTable(Long id) {
return codegenTableMapper.selectById(id);
} | @Test
public void testGetCodegenTable() {
// mock 数据
CodegenTableDO tableDO = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(tableDO);
// 准备参数
Long id = tableDO.getId();
// 调用
CodegenTableDO result = codegenService.getCodegenTable(id);
// 断言
assertPojoEquals(tableDO, result);
} |
@Override
public int getPartitionId() {
return partitionId;
} | @Test
public void testGetPartitionId() {
assertEquals(1, batchEventData.getPartitionId());
assertEquals(1, batchEventDataSameAttribute.getPartitionId());
assertEquals(1, batchEventDataOtherSource.getPartitionId());
assertEquals(2, batchEventDataOtherPartitionId.getPartitionId());
assertEquals(1, batchEventDataOtherEvent.getPartitionId());
assertEquals(1, batchEventDataNoEvent.getPartitionId());
} |
public AnnouncerHostPrefixGenerator(String hostName)
{
if (hostName == null)
{
_hostName = null;
}
else
{
// Since just want to use the machine name for pre-fix and not the entire FQDN to reduce the size of name
int machineNameEndIndex = hostName.indexOf('.');
_hostName = machineNameEndIndex > 0 ? hostName.substring(0, machineNameEndIndex) : hostName;
}
} | @Test(dataProvider = "prefixGeneratorDataProvider")
public void testAnnouncerHostPrefixGenerator(String hostName, String expectedPrefix)
{
AnnouncerHostPrefixGenerator prefixGenerator = new AnnouncerHostPrefixGenerator(hostName);
String actualPrefix = prefixGenerator.generatePrefix();
Assert.assertEquals(actualPrefix, expectedPrefix);
} |
public static Tags fromString(String tagsString) {
if (tagsString == null || tagsString.isBlank()) return empty();
return new Tags(Set.of(tagsString.trim().split(" +")));
} | @Test
public void testDeserialization() {
assertEquals(new Tags(Set.of("tag1", "tag2")), Tags.fromString(" tag1 tag2 "));
} |
@Override
public Iterator<String> iterator() {
return Arrays.asList(getPathComponents()).iterator();
} | @Test
public void testIterator() {
List<String> queuePathCollection = ImmutableList.copyOf(TEST_QUEUE_PATH.iterator());
List<String> queuePathWithEmptyPartCollection = ImmutableList.copyOf(
QUEUE_PATH_WITH_EMPTY_PART.iterator());
List<String> rootPathCollection = ImmutableList.copyOf(ROOT_PATH.iterator());
Assert.assertEquals(4, queuePathCollection.size());
Assert.assertEquals(CapacitySchedulerConfiguration.ROOT, queuePathCollection.get(0));
Assert.assertEquals("level_3", queuePathCollection.get(3));
Assert.assertEquals(3, queuePathWithEmptyPartCollection.size());
Assert.assertEquals(CapacitySchedulerConfiguration.ROOT,
queuePathWithEmptyPartCollection.get(0));
Assert.assertEquals("level_2", queuePathWithEmptyPartCollection.get(2));
Assert.assertEquals(1, rootPathCollection.size());
Assert.assertEquals(CapacitySchedulerConfiguration.ROOT, rootPathCollection.get(0));
} |
public static String getSIntA( int... intA ) {
//
String Info = "";
//
if ( intA == null ) return "?";
if ( intA.length == 0 ) return "?";
//
for ( int K = 0; K < intA.length; K ++ ) {
//
Info += ( Info.isEmpty() )? "" : ", ";
Info += BTools.getSInt( intA[ K ] );
}
//
return Info;
} | @Test
public void testgetSIntA() throws Exception {
//
assertEquals( "?", BTools.getSIntA( null ) );
assertEquals( "?", BTools.getSIntA( ) );
assertEquals( "0", BTools.getSIntA( 0 ) );
assertEquals( "5, 6, 7", BTools.getSIntA( 5, 6, 7 ) );
int[] intA = { 2, 3, 4, 5, 6 };
assertEquals( "2, 3, 4, 5, 6", BTools.getSIntA( intA ) );
//
} |
public Resource getIncrementAllocation() {
Long memory = null;
Integer vCores = null;
Map<String, Long> others = new HashMap<>();
ResourceInformation[] resourceTypes = ResourceUtils.getResourceTypesArray();
for (int i=0; i < resourceTypes.length; ++i) {
String name = resourceTypes[i].getName();
String propertyKey = getAllocationIncrementPropKey(name);
String propValue = get(propertyKey);
if (propValue != null) {
Matcher matcher = RESOURCE_REQUEST_VALUE_PATTERN.matcher(propValue);
if (matcher.matches()) {
long value = Long.parseLong(matcher.group(1));
String unit = matcher.group(2);
long valueInDefaultUnits = getValueInDefaultUnits(value, unit, name);
others.put(name, valueInDefaultUnits);
} else {
throw new IllegalArgumentException("Property " + propertyKey +
" is not in \"value [unit]\" format: " + propValue);
}
}
}
if (others.containsKey(ResourceInformation.MEMORY_MB.getName())) {
memory = others.get(ResourceInformation.MEMORY_MB.getName());
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.MEMORY_MB.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_MB +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) + " property");
}
others.remove(ResourceInformation.MEMORY_MB.getName());
} else {
memory = getLong(
RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB);
}
if (others.containsKey(ResourceInformation.VCORES.getName())) {
vCores = others.get(ResourceInformation.VCORES.getName()).intValue();
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.VCORES.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) + " property");
}
others.remove(ResourceInformation.VCORES.getName());
} else {
vCores = getInt(
RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES);
}
return Resource.newInstance(memory, vCores, others);
} | @Test
public void testAllocationIncrementCustomResource() {
try {
initResourceTypes();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RESOURCE_TYPES + ".a-custom-resource" +
FairSchedulerConfiguration.INCREMENT_ALLOCATION, "10");
FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
Resource increment = fsc.getIncrementAllocation();
DominantResourceCalculator calculator =
new DominantResourceCalculator();
Resource min = Resources.createResource(0L, 0);
Resource max = Resource.newInstance(Long.MAX_VALUE,
Integer.MAX_VALUE, Collections.singletonMap(A_CUSTOM_RESOURCE,
Long.MAX_VALUE / UnitsConversionUtil.convert("k", "", 1L)));
assertEquals(customResourceInformation(10000L, ""),
calculator.normalize(customResource(9999L, ""), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
assertEquals(customResourceInformation(10000L, ""),
calculator.normalize(customResource(10000L, ""), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
assertEquals(customResourceInformation(20000L, ""),
calculator.normalize(customResource(19999L, ""), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
assertEquals(customResourceInformation(10L, "k"),
calculator.normalize(customResource(9L, "k"), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
assertEquals(customResourceInformation(10L, "k"),
calculator.normalize(customResource(10L, "k"), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
assertEquals(customResourceInformation(20L, "k"),
calculator.normalize(customResource(11L, "k"), min, max, increment)
.getResourceInformation(A_CUSTOM_RESOURCE));
} finally {
ResourceUtils.resetResourceTypes(new Configuration());
}
} |
public ReferenceBuilder<T> client(String client) {
this.client = client;
return getThis();
} | @Test
void client() {
ReferenceBuilder builder = new ReferenceBuilder();
builder.client("client");
Assertions.assertEquals("client", builder.build().getClient());
} |
@Override
public List<Class<? extends Event>> subscribeTypes() {
return new LinkedList<>(interestedEvents.keySet());
} | @Test
void testSubscribeTypes() {
List<Class<? extends Event>> actual = combinedTraceSubscriber.subscribeTypes();
assertEquals(10, actual.size());
assertTrue(actual.contains(RegisterInstanceTraceEvent.class));
assertTrue(actual.contains(DeregisterInstanceTraceEvent.class));
assertTrue(actual.contains(UpdateInstanceTraceEvent.class));
assertTrue(actual.contains(RegisterServiceTraceEvent.class));
assertTrue(actual.contains(DeregisterServiceTraceEvent.class));
assertTrue(actual.contains(SubscribeServiceTraceEvent.class));
assertTrue(actual.contains(UnsubscribeServiceTraceEvent.class));
assertTrue(actual.contains(UpdateServiceTraceEvent.class));
assertTrue(actual.contains(PushServiceTraceEvent.class));
assertTrue(actual.contains(HealthStateChangeTraceEvent.class));
} |
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
} | @Test
void testArrayOnLhs() {
run(
"match [] == '#[]'"
);
} |
public final void isLessThan(int other) {
isLessThan((double) other);
} | @Test
public void isLessThan_int() {
expectFailureWhenTestingThat(2.0).isLessThan(2);
assertThat(2.0).isLessThan(3);
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testGettersAnnotatedWithInconsistentDefaultValue() throws Exception {
// Initial construction is valid.
GetterWithDefault options = PipelineOptionsFactory.as(GetterWithDefault.class);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"Property [object] is marked with contradictory annotations. Found ["
+ "[Default.Integer(value=1) on org.apache.beam.sdk.options.PipelineOptionsFactoryTest"
+ "$GetterWithDefault#getObject()], "
+ "[Default.Integer(value=0) on org.apache.beam.sdk.options.PipelineOptionsFactoryTest"
+ "$GetterWithInconsistentDefaultValue#getObject()]].");
// When we attempt to convert, we should error at this moment.
options.as(GetterWithInconsistentDefaultValue.class);
} |
public static Font createFont() {
return new Font(null);
} | @Test
public void createFontTest(){
final Font font = FontUtil.createFont();
assertNotNull(font);
} |
@Override
public boolean dropNamespace(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
return false;
}
try {
clients.run(client -> {
client.dropDatabase(namespace.level(0),
false /* deleteData */,
false /* ignoreUnknownDb */,
false /* cascade */);
return null;
});
LOG.info("Dropped namespace: {}", namespace);
return true;
} catch (InvalidOperationException e) {
throw new NamespaceNotEmptyException(e, "Namespace %s is not empty. One or more tables exist.", namespace);
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop namespace " + namespace + " in Hive Metastore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to drop dropDatabase(name) " + namespace + " in Hive Metastore", e);
}
} | @Test
public void dropNamespace() {
Namespace namespace = Namespace.of("dbname_drop");
TableIdentifier identifier = TableIdentifier.of(namespace, "table");
Schema schema = getTestSchema();
catalog.createNamespace(namespace, meta);
catalog.createTable(identifier, schema);
Map<String, String> nameMata = catalog.loadNamespaceMetadata(namespace);
assertThat(nameMata).containsEntry("owner", "apache");
assertThat(nameMata).containsEntry("group", "iceberg");
assertThatThrownBy(() -> catalog.dropNamespace(namespace))
.isInstanceOf(NamespaceNotEmptyException.class)
.hasMessage("Namespace dbname_drop is not empty. One or more tables exist.");
assertThat(catalog.dropTable(identifier, true)).isTrue();
assertThat(catalog.dropNamespace(namespace))
.as("Should fail to drop namespace if it is not empty")
.isTrue();
assertThat(catalog.dropNamespace(Namespace.of("db.ns1")))
.as("Should fail to drop when namespace doesn't exist")
.isFalse();
assertThatThrownBy(() -> catalog.loadNamespaceMetadata(namespace))
.isInstanceOf(NoSuchNamespaceException.class)
.hasMessage("Namespace does not exist: dbname_drop");
} |
private void syncFromAddressUrl() throws Exception {
RestResult<String> result = restTemplate
.get(addressServerUrl, Header.EMPTY, Query.EMPTY, genericType.getType());
if (result.ok()) {
isAddressServerHealth = true;
Reader reader = new StringReader(result.getData());
try {
afterLookup(MemberUtil.readServerConf(EnvUtil.analyzeClusterConf(reader)));
} catch (Throwable e) {
Loggers.CLUSTER.error("[serverlist] exception for analyzeClusterConf, error : {}",
ExceptionUtil.getAllExceptionMsg(e));
}
addressServerFailCount = 0;
} else {
addressServerFailCount++;
if (addressServerFailCount >= maxFailCount) {
isAddressServerHealth = false;
}
Loggers.CLUSTER.error("[serverlist] failed to get serverlist, error code {}", result.getCode());
}
} | @Test
void testSyncFromAddressUrl() throws Exception {
RestResult<String> result = restTemplate.get(addressServerUrl, Header.EMPTY, Query.EMPTY, genericType.getType());
assertEquals("1.1.1.1:8848", result.getData());
} |
@Override
public int indexOf(int fromIndex, int toIndex, byte value) {
if (fromIndex <= toIndex) {
return ByteBufUtil.firstIndexOf(this, fromIndex, toIndex, value);
}
return ByteBufUtil.lastIndexOf(this, fromIndex, toIndex, value);
} | @Test
public void testIndexOf() {
buffer.clear();
// Ensure the buffer is completely zero'ed.
buffer.setZero(0, buffer.capacity());
buffer.writeByte((byte) 1);
buffer.writeByte((byte) 2);
buffer.writeByte((byte) 3);
buffer.writeByte((byte) 2);
buffer.writeByte((byte) 1);
assertEquals(-1, buffer.indexOf(1, 4, (byte) 1));
assertEquals(-1, buffer.indexOf(4, 1, (byte) 1));
assertEquals(1, buffer.indexOf(1, 4, (byte) 2));
assertEquals(3, buffer.indexOf(4, 1, (byte) 2));
try {
buffer.indexOf(0, buffer.capacity() + 1, (byte) 0);
fail();
} catch (IndexOutOfBoundsException expected) {
// expected
}
try {
buffer.indexOf(buffer.capacity(), -1, (byte) 0);
fail();
} catch (IndexOutOfBoundsException expected) {
// expected
}
assertEquals(4, buffer.indexOf(buffer.capacity() + 1, 0, (byte) 1));
assertEquals(0, buffer.indexOf(-1, buffer.capacity(), (byte) 1));
} |
public static <S> S loadFirst(final Class<S> clazz) {
final ServiceLoader<S> loader = loadAll(clazz);
final Iterator<S> iterator = loader.iterator();
if (!iterator.hasNext()) {
throw new IllegalStateException(String.format(
"No implementation defined in /META-INF/services/%s, please check whether the file exists and has the right implementation class!",
clazz.getName()));
}
return iterator.next();
} | @Test
public void testLoadFirstNoDefined() {
assertThrows(IllegalStateException.class, () -> SpiLoadFactory.loadFirst(List.class));
} |
protected DurationSerializer() {
super(Duration.class);
} | @Test
void testDurationSerializer() throws IOException {
Duration duration = Duration.ofNanos(323567890098765L);
JsonGenerator jsonGenerator = mock(JsonGenerator.class);
durationSerializer.serialize(duration, jsonGenerator, null);
ArgumentCaptor<String> argumentCaptor = ArgumentCaptor.forClass(String.class);
verify(jsonGenerator).writeNumber(argumentCaptor.capture());
String captorValue = argumentCaptor.getValue();
assertThat(captorValue).isEqualTo("323567.890098765");
} |
protected boolean setActive(String appName) {
try {
File active = appFile(appName, "active");
createParentDirs(active);
return active.createNewFile() && updateTime(appName);
} catch (IOException e) {
log.warn("Unable to mark app {} as active", appName, e);
throw new ApplicationException("Unable to mark app as active", e);
}
} | @Test(expected = ApplicationException.class)
@Ignore("No longer needed")
public void setBadActive() throws IOException {
aar.setActive("org.foo.BAD");
} |
public <T extends Output<T>> void save(Path csvPath, Dataset<T> dataset, String responseName) throws IOException {
save(csvPath, dataset, Collections.singleton(responseName));
} | @Test
public void testSave() throws IOException {
URL path = CSVSaverTest.class.getResource("/org/tribuo/data/csv/test.csv");
Set<String> responses = Collections.singleton("RESPONSE");
//
// Load the csv
CSVLoader<MockOutput> loader = new CSVLoader<>(new MockOutputFactory());
DataSource<MockOutput> source = loader.loadDataSource(path, responses);
Dataset<MockOutput> before = new MutableDataset<>(source);
//
// Save the dataset
CSVSaver saver = new CSVSaver();
Path tmp = Files.createTempFile("foo","csv");
tmp.toFile().deleteOnExit();
saver.save(tmp, before, "RESPONSE");
//
// Reload and check that before & after are equivalent.
MutableDataset<MockOutput> after = loader.load(tmp, responses);
assertEquals(before.getData(), after.getData());
assertEquals(before.getOutputIDInfo().size(), after.getOutputIDInfo().size());
assertEquals(before.getFeatureIDMap().size(), after.getFeatureIDMap().size());
for (int i = 0; i < before.size(); i++) {
Example<MockOutput> b = before.getExample(i);
Example<MockOutput> a = after.getExample(i);
assertEquals(a,b);
}
} |
@Override
public ExecuteContext onThrow(ExecuteContext context) {
ThreadLocalUtils.removeRequestTag();
return context;
} | @Test
public void testOnThrow() {
ThreadLocalUtils.addRequestTag(Collections.singletonMap("bar", Collections.singletonList("foo")));
Assert.assertNotNull(ThreadLocalUtils.getRequestTag());
// Test the onThrow method to verify if thread variables are released
interceptor.onThrow(context);
Assert.assertNull(ThreadLocalUtils.getRequestTag());
} |
@Override
public Decorator findById(String decoratorId) throws NotFoundException {
final Decorator result = coll.findOneById(decoratorId);
if (result == null) {
throw new NotFoundException("Decorator with id " + decoratorId + " not found.");
}
return result;
} | @Test
public void findByIdThrowsNotFoundExceptionForMissingDecorator() throws NotFoundException {
expectedException.expect(NotFoundException.class);
expectedException.expectMessage("Decorator with id 588bcafebabedeadbeef0001 not found.");
decoratorService.findById("588bcafebabedeadbeef0001");
} |
public void run(String[] args) {
if (!parseArguments(args)) {
showOptions();
return;
}
if (command == null) {
System.out.println("Error: Command is empty");
System.out.println();
showOptions();
return;
}
if (password == null) {
System.out.println("Error: Password is empty");
System.out.println();
showOptions();
return;
}
if (input == null) {
System.out.println("Error: Input is empty");
System.out.println();
showOptions();
return;
}
encryptor.setPassword(password);
if (algorithm != null) {
encryptor.setAlgorithm(algorithm);
}
if (randomSaltGeneratorAlgorithm != null) {
encryptor.setSaltGenerator(new RandomSaltGenerator(randomSaltGeneratorAlgorithm));
}
if (randomIvGeneratorAlgorithm != null) {
encryptor.setIvGenerator(new RandomIvGenerator(randomIvGeneratorAlgorithm));
}
if ("encrypt".equals(command)) {
System.out.println("Encrypted text: " + encryptor.encrypt(input));
} else {
System.out.println("Decrypted text: " + encryptor.decrypt(input));
}
} | @Test
public void testMissingInput() {
Main main = new Main();
assertDoesNotThrow(() -> main.run("-c encrypt -p secret".split(" ")));
} |
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
} | @Test
public void arrayToConnect() {
byte[] arrayJson = "{ \"schema\": { \"type\": \"array\", \"items\": { \"type\" : \"int32\" } }, \"payload\": [1, 2, 3] }".getBytes();
assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson));
} |
public Duration cacheMaxTimeToLive() {
return cacheMaxTimeToLive;
} | @Test
void cacheMaxTimeToLive() {
assertThat(builder.build().cacheMaxTimeToLive()).isEqualTo(DEFAULT_CACHE_MAX_TIME_TO_LIVE);
Duration cacheMaxTimeToLive = Duration.ofSeconds(5);
builder.cacheMaxTimeToLive(cacheMaxTimeToLive);
assertThat(builder.build().cacheMaxTimeToLive()).isEqualTo(cacheMaxTimeToLive);
} |
@Override
public InterpreterResult interpret(final String st, final InterpreterContext context)
throws InterpreterException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("st:\n{}", st);
}
final FormType form = getFormType();
RemoteInterpreterProcess interpreterProcess = null;
try {
interpreterProcess = getOrCreateInterpreterProcess();
} catch (IOException e) {
throw new InterpreterException(e);
}
if (!interpreterProcess.isRunning()) {
return new InterpreterResult(InterpreterResult.Code.ERROR,
"Interpreter process is not running\n" + interpreterProcess.getErrorMessage());
}
return interpreterProcess.callRemoteFunction(client -> {
RemoteInterpreterResult remoteResult = client.interpret(
sessionId, className, st, convert(context));
Map<String, Object> remoteConfig = (Map<String, Object>) GSON.fromJson(
remoteResult.getConfig(), new TypeToken<Map<String, Object>>() {
}.getType());
context.getConfig().clear();
if (remoteConfig != null) {
context.getConfig().putAll(remoteConfig);
}
GUI currentGUI = context.getGui();
GUI currentNoteGUI = context.getNoteGui();
if (form == FormType.NATIVE) {
GUI remoteGui = GUI.fromJson(remoteResult.getGui());
GUI remoteNoteGui = GUI.fromJson(remoteResult.getNoteGui());
currentGUI.clear();
currentGUI.setParams(remoteGui.getParams());
currentGUI.setForms(remoteGui.getForms());
currentNoteGUI.setParams(remoteNoteGui.getParams());
currentNoteGUI.setForms(remoteNoteGui.getForms());
} else if (form == FormType.SIMPLE) {
final Map<String, Input> currentForms = currentGUI.getForms();
final Map<String, Object> currentParams = currentGUI.getParams();
final GUI remoteGUI = GUI.fromJson(remoteResult.getGui());
final Map<String, Input> remoteForms = remoteGUI.getForms();
final Map<String, Object> remoteParams = remoteGUI.getParams();
currentForms.putAll(remoteForms);
currentParams.putAll(remoteParams);
}
return convert(remoteResult);
}
);
} | @Test
void testFailToLaunchInterpreterProcess_ErrorInRunner() {
try {
System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName(),
zeppelinHome.getAbsolutePath() + "/zeppelin-zengine/src/test/resources/bin/interpreter_invalid.sh");
final Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", note1Id, "sleep");
final InterpreterContext context1 = createDummyInterpreterContext();
// run this dummy interpret method first to launch the RemoteInterpreterProcess to avoid the
// time overhead of launching the process.
try {
interpreter1.interpret("1", context1);
fail("Should not be able to launch interpreter process");
} catch (InterpreterException e) {
assertTrue(ExceptionUtils.getStackTrace(e).contains("invalid_command:"));
}
} finally {
System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName());
}
} |
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) {
FunctionConfig mergedConfig = existingConfig.toBuilder().build();
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getJar())) {
mergedConfig.setJar(newConfig.getJar());
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getCustomSerdeInputs() != null) {
newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getCustomSchemaInputs() != null) {
newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
mergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName()
.equals(existingConfig.getOutputSerdeClassName())) {
throw new IllegalArgumentException("Output Serde mismatch");
}
if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType()
.equals(existingConfig.getOutputSchemaType())) {
throw new IllegalArgumentException("Output Schema mismatch");
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (!StringUtils.isEmpty(newConfig.getOutput())) {
mergedConfig.setOutput(newConfig.getOutput());
}
if (newConfig.getUserConfig() != null) {
mergedConfig.setUserConfig(newConfig.getUserConfig());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) {
throw new IllegalArgumentException("Runtime cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getMaxMessageRetries() != null) {
mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries());
}
if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) {
mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic());
}
if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName()
.equals(existingConfig.getSubName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getWindowConfig() != null) {
mergedConfig.setWindowConfig(newConfig.getWindowConfig());
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
} | @Test
public void testMergeRuntimeFlags() {
FunctionConfig functionConfig = createFunctionConfig();
FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("runtimeFlags", "-Dfoo=bar2");
FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig);
assertEquals(
mergedConfig.getRuntimeFlags(), "-Dfoo=bar2"
);
mergedConfig.setRuntimeFlags(functionConfig.getRuntimeFlags());
assertEquals(
new Gson().toJson(functionConfig),
new Gson().toJson(mergedConfig)
);
} |
@Override
public Integer addAndGetRevRank(double score, V object) {
return get(addAndGetRevRankAsync(score, object));
} | @Test
public void testAddAndGetRevRank() {
RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple");
Integer res = set.addAndGetRevRank(0.3, 1);
assertThat(res).isEqualTo(0);
Integer res2 = set.addAndGetRevRank(0.4, 2);
assertThat(res2).isEqualTo(0);
Integer res3 = set.addAndGetRevRank(0.2, 3);
assertThat(res3).isEqualTo(2);
Assertions.assertTrue(set.contains(3));
} |
@Override
public String toString() { return toString(false); } | @Test
void testToString() {
assertEquals("HTTP 200/OK", new HttpResult().setContent("Foo").toString());
assertEquals("HTTP 200/OK\n\nFoo", new HttpResult().setContent("Foo").toString(true));
assertEquals("HTTP 200/OK", new HttpResult().toString(true));
assertEquals("HTTP 200/OK", new HttpResult().setContent("").toString(true));
} |
@SuppressWarnings("deprecation")
public static void setClasspath(Map<String, String> environment,
Configuration conf) throws IOException {
boolean userClassesTakesPrecedence =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
String classpathEnvVar =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)
? Environment.APP_CLASSPATH.name() : Environment.CLASSPATH.name();
MRApps.addToEnvironment(environment,
classpathEnvVar, crossPlatformifyMREnv(conf, Environment.PWD), conf);
if (!userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
/*
* We use "*" for the name of the JOB_JAR instead of MRJobConfig.JOB_JAR for
* the case where the job jar is not necessarily named "job.jar". This can
* happen, for example, when the job is leveraging a resource from the YARN
* shared cache.
*/
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "*", conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "classes" + Path.SEPARATOR, conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "lib" + Path.SEPARATOR + "*", conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
crossPlatformifyMREnv(conf, Environment.PWD) + Path.SEPARATOR + "*", conf);
// a * in the classpath will only find a .jar, so we need to filter out
// all .jars and add everything else
addToClasspathIfNotJar(JobContextImpl.getFileClassPaths(conf),
JobContextImpl.getCacheFiles(conf),
conf,
environment, classpathEnvVar);
addToClasspathIfNotJar(JobContextImpl.getArchiveClassPaths(conf),
JobContextImpl.getCacheArchives(conf),
conf,
environment, classpathEnvVar);
if (userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
} | @Test
@Timeout(120000)
public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
Map<String, String> env = new HashMap<String, String>();
MRApps.setClasspath(env, conf);
String cp = env.get("CLASSPATH");
String appCp = env.get("APP_CLASSPATH");
assertFalse(cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"),
"MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!");
assertFalse(cp.contains("PWD"), "MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!");
String expectedAppClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/*", "job.jar/classes/",
"job.jar/lib/*", ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals(expectedAppClasspath, appCp,
"MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!");
} |
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
boolean cancelled = original.cancel(mayInterruptIfRunning);
try {
return peel().cancel(mayInterruptIfRunning);
} catch (CancellationException e) {
// ignore; cancelled before scheduled-in
ignore();
}
return cancelled;
} | @Test
public void cancel_twice() {
ScheduledFuture<Future<Integer>> original = taskScheduler.schedule(new SimpleCallableTestTask(), 10, TimeUnit.SECONDS);
ScheduledFuture stripper = new DelegatingScheduledFutureStripper<Future<Integer>>(original);
stripper.cancel(true);
stripper.cancel(true);
} |
public void resetOffsetsTo(final Consumer<byte[], byte[]> client,
final Set<TopicPartition> inputTopicPartitions,
final Long offset) {
final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>(inputTopicPartitions.size());
for (final TopicPartition topicPartition : inputTopicPartitions) {
topicPartitionsAndOffset.put(topicPartition, offset);
}
final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
for (final TopicPartition topicPartition : inputTopicPartitions) {
client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
}
} | @Test
public void testResetToSpecificOffsetWhenAfterEndOffset() {
final Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(topicPartition, 3L);
consumer.updateEndOffsets(endOffsets);
final Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(topicPartition, 0L);
consumer.updateBeginningOffsets(beginningOffsets);
streamsResetter.resetOffsetsTo(consumer, inputTopicPartitions, 4L);
final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500));
assertEquals(2, records.count());
} |
@Override
public Object pageListService(String namespaceId, String groupName, String serviceName, int pageNo, int pageSize,
String instancePattern, boolean ignoreEmptyService) throws NacosException {
ObjectNode result = JacksonUtils.createEmptyJsonNode();
List<ServiceView> serviceViews = new LinkedList<>();
Collection<Service> services = patternServices(namespaceId, groupName, serviceName);
if (ignoreEmptyService) {
services = services.stream().filter(each -> 0 != serviceStorage.getData(each).ipCount())
.collect(Collectors.toList());
}
result.put(FieldsConstants.COUNT, services.size());
services = doPage(services, pageNo - 1, pageSize);
for (Service each : services) {
ServiceMetadata serviceMetadata = metadataManager.getServiceMetadata(each).orElseGet(ServiceMetadata::new);
ServiceView serviceView = new ServiceView();
serviceView.setName(each.getName());
serviceView.setGroupName(each.getGroup());
serviceView.setClusterCount(serviceStorage.getClusters(each).size());
serviceView.setIpCount(serviceStorage.getData(each).ipCount());
serviceView.setHealthyInstanceCount(countHealthyInstance(serviceStorage.getData(each)));
serviceView.setTriggerFlag(isProtectThreshold(serviceView, serviceMetadata) ? "true" : "false");
serviceViews.add(serviceView);
}
result.set(FieldsConstants.SERVICE_LIST, JacksonUtils.transferToJsonNode(serviceViews));
return result;
} | @Test
void testPageListServiceNotSpecifiedName() throws NacosException {
ServiceInfo serviceInfo = new ServiceInfo();
serviceInfo.setHosts(Collections.singletonList(new Instance()));
Mockito.when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo);
ServiceMetadata metadata = new ServiceMetadata();
metadata.setProtectThreshold(0.75F);
Mockito.when(metadataManager.getServiceMetadata(Mockito.any())).thenReturn(Optional.of(metadata));
ObjectNode obj = (ObjectNode) catalogServiceV2Impl.pageListService("A", "", "", 1, 10, null, false);
assertEquals(1, obj.get(FieldsConstants.COUNT).asInt());
} |
protected boolean shouldOverwrite( OverwritePrompter prompter, Props props, String message, String rememberMessage ) {
boolean askOverwrite = Props.isInitialized() ? props.askAboutReplacingDatabaseConnections() : false;
boolean overwrite = Props.isInitialized() ? props.replaceExistingDatabaseConnections() : true;
if ( askOverwrite ) {
if ( prompter != null ) {
overwrite = prompter.overwritePrompt( message, rememberMessage, Props.STRING_ASK_ABOUT_REPLACING_DATABASES );
}
}
return overwrite;
} | @Test
public void testShouldOverwrite() {
assertTrue( meta.shouldOverwrite( null, null, null, null ) );
Props.init( Props.TYPE_PROPERTIES_EMPTY );
assertTrue( meta.shouldOverwrite( null, Props.getInstance(), "message", "remember" ) );
Props.getInstance().setProperty( Props.STRING_ASK_ABOUT_REPLACING_DATABASES, "Y" );
OverwritePrompter prompter = mock( OverwritePrompter.class );
when( prompter.overwritePrompt( "message", "remember", Props.STRING_ASK_ABOUT_REPLACING_DATABASES ) )
.thenReturn( false );
assertFalse( meta.shouldOverwrite( prompter, Props.getInstance(), "message", "remember" ) );
} |
@Override
public boolean isDetected() {
// https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project
// JENKINS_URL is not enough to identify Jenkins. It can be easily used on a non-Jenkins job.
return isNotBlank(system.envVariable("JENKINS_URL")) && isNotBlank(system.envVariable("EXECUTOR_NUMBER"));
} | @Test
public void isDetected() {
setEnvVariable("JENKINS_URL", "http://foo");
setEnvVariable("EXECUTOR_NUMBER", "12");
assertThat(underTest.isDetected()).isTrue();
setEnvVariable("JENKINS_URL", null);
setEnvVariable("EXECUTOR_NUMBER", "12");
assertThat(underTest.isDetected()).isFalse();
setEnvVariable("JENKINS_URL", "http://foo");
setEnvVariable("EXECUTOR_NUMBER", null);
assertThat(underTest.isDetected()).isFalse();
} |
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
} | @Test
public void testValidateValueMismatchInt8() {
assertThrows(DataException.class,
() -> ConnectSchema.validateValue(Schema.INT8_SCHEMA, 1));
} |
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
} | @Test
public void shouldCastStringRoundDown() {
// When:
final BigDecimal decimal = DecimalUtil.cast("1.12", 2, 1);
// Then:
assertThat(decimal, is(new BigDecimal("1.1")));
} |
public static HttpResponseStatus parseLine(CharSequence line) {
return (line instanceof AsciiString) ? parseLine((AsciiString) line) : parseLine(line.toString());
} | @Test
public void parseLineStringMalformedCodeWithPhrase() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
parseLine("200a foo");
}
});
} |
private static Timestamp fromLong(Long elapsedSinceEpoch, TimestampPrecise precise) throws IllegalArgumentException {
final long seconds;
final int nanos;
switch (precise) {
case Millis:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) THOUSAND);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) THOUSAND) * MILLION;
break;
case Micros:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) MILLION);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) MILLION) * THOUSAND;
break;
case Nanos:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) BILLION);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) BILLION);
break;
default:
throw new IllegalArgumentException("Unknown precision: " + precise);
}
if (seconds < SECONDS_LOWERLIMIT || seconds > SECONDS_UPPERLIMIT) {
throw new IllegalArgumentException("given seconds is out of range");
}
if (nanos < NANOSECONDS_LOWERLIMIT || nanos > NANOSECONDS_UPPERLIMIT) {
// NOTE here is unexpected cases because exceeded part is
// moved to seconds by floor methods
throw new IllegalArgumentException("given nanos is out of range");
}
return Timestamp.newBuilder().setSeconds(seconds).setNanos(nanos).build();
} | @Test
void timestampMillisConversionSecondsLowerLimit() throws Exception {
assertThrows(IllegalArgumentException.class, () -> {
TimestampMillisConversion conversion = new TimestampMillisConversion();
long exceeded = (ProtoConversions.SECONDS_LOWERLIMIT - 1) * 1000;
conversion.fromLong(exceeded, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis());
});
} |
public static <T> ValueOnlyWindowedValueCoder<T> getValueOnlyCoder(Coder<T> valueCoder) {
return ValueOnlyWindowedValueCoder.of(valueCoder);
} | @Test
public void testValueOnlyWindowedValueCoderIsSerializableWithWellKnownCoderType() {
CoderProperties.coderSerializable(WindowedValue.getValueOnlyCoder(GlobalWindow.Coder.INSTANCE));
} |
protected ProducerService getProducerService() {
return service;
} | @Test
void testStartDoesNotCreateNewProducerService() {
// setup
testObj.start();
ProducerService expectedService = testObj.getProducerService();
testObj.start();
// act
ProducerService result = testObj.getProducerService();
// assert
assertEquals(expectedService, result);
} |
LoadImbalance updateImbalance() {
clearWorkingImbalance();
updateNewWorkingImbalance();
updateNewFinalImbalance();
printDebugTable();
return imbalance;
} | @Test
public void testUpdateImbalance() throws Exception {
MigratablePipeline owner1Pipeline1 = mock(MigratablePipeline.class);
when(owner1Pipeline1.load()).thenReturn(0L)
.thenReturn(100L);
when(owner1Pipeline1.owner())
.thenReturn(owner1);
loadTracker.addPipeline(owner1Pipeline1);
MigratablePipeline owner2Pipeline1 = mock(MigratablePipeline.class);
when(owner2Pipeline1.load())
.thenReturn(0L)
.thenReturn(200L);
when(owner2Pipeline1.owner())
.thenReturn(owner2);
loadTracker.addPipeline(owner2Pipeline1);
MigratablePipeline owner2Pipeline3 = mock(MigratablePipeline.class);
when(owner2Pipeline3.load())
.thenReturn(0L)
.thenReturn(100L);
when(owner2Pipeline3.owner())
.thenReturn(owner2);
loadTracker.addPipeline(owner2Pipeline3);
LoadImbalance loadImbalance = loadTracker.updateImbalance();
assertEquals(0, loadImbalance.minimumLoad);
assertEquals(0, loadImbalance.maximumLoad);
loadTracker.updateImbalance();
assertEquals(100, loadImbalance.minimumLoad);
assertEquals(300, loadImbalance.maximumLoad);
assertEquals(owner1, loadImbalance.dstOwner);
assertEquals(owner2, loadImbalance.srcOwner);
} |
public static String replaceFirst(String input, String from, String to) {
if (from == null || to == null) {
return input;
}
int pos = input.indexOf(from);
if (pos != -1) {
int len = from.length();
return input.substring(0, pos) + to + input.substring(pos + len);
} else {
return input;
}
} | @Test
public void testReplaceFirst() {
assertEquals("jms:queue:bar", replaceFirst("jms:queue:bar", "foo", "bar"));
assertEquals("jms:queue:bar", replaceFirst("jms:queue:foo", "foo", "bar"));
assertEquals("jms:queue:bar?blah=123", replaceFirst("jms:queue:foo?blah=123", "foo", "bar"));
assertEquals("jms:queue:bar?blah=foo", replaceFirst("jms:queue:foo?blah=foo", "foo", "bar"));
} |
public void handleSend(HttpServerResponse response, Span span) {
handleFinish(response, span);
} | @Test void handleSend_oneOfResponseError() {
brave.Span span = mock(brave.Span.class);
assertThatThrownBy(() -> handler.handleSend(null, span))
.isInstanceOf(NullPointerException.class)
.hasMessage("response == null");
} |
@Override
public void clear() {
throw MODIFICATION_ATTEMPT_ERROR;
} | @Test
void testClear() throws IOException {
long value = valueState.value();
assertThat(value).isEqualTo(42L);
assertThatThrownBy(() -> valueState.clear())
.isInstanceOf(UnsupportedOperationException.class);
} |
public String getAgentStatusReport(String pluginId, JobIdentifier jobIdentifier, String elasticAgentId, Map<String, String> cluster) {
LOGGER.debug("Processing get plugin status report for plugin: {} with job-identifier: {} with elastic-agent-id: {} and cluster: {}", pluginId, jobIdentifier, elasticAgentId, cluster);
final String agentStatusReportView = extension.getAgentStatusReport(pluginId, jobIdentifier, elasticAgentId, cluster);
LOGGER.debug("Done processing get plugin status report for plugin: {} with job-identifier: {} with elastic-agent-id: {} and cluster: {}", pluginId, jobIdentifier, elasticAgentId, cluster);
return agentStatusReportView;
} | @Test
public void shouldTalkToExtensionToGetAgentStatusReport() {
final JobIdentifier jobIdentifier = new JobIdentifier();
elasticAgentPluginRegistry.getAgentStatusReport(PLUGIN_ID, jobIdentifier, "some-id", null);
verify(elasticAgentExtension, times(1)).getAgentStatusReport(PLUGIN_ID, jobIdentifier, "some-id", null);
verifyNoMoreInteractions(elasticAgentExtension);
} |
@Override
public void checkVersion() {
// Does nothing. (Version is always compatible since it's in memory)
} | @Test
public void checkVersion() {
try {
confStore.checkVersion();
} catch (Exception e) {
fail("checkVersion threw exception");
}
} |
@Override
public Optional<ResultDecorator<EncryptRule>> newInstance(final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database,
final EncryptRule encryptRule, final ConfigurationProperties props, final SQLStatementContext sqlStatementContext) {
if (sqlStatementContext instanceof SelectStatementContext) {
return Optional.of(new EncryptDQLResultDecorator(database, encryptRule, (SelectStatementContext) sqlStatementContext));
}
if (sqlStatementContext.getSqlStatement() instanceof DALStatement) {
return Optional.of(new EncryptDALResultDecorator(globalRuleMetaData));
}
return Optional.empty();
} | @Test
void assertNewInstanceWithOtherStatement() {
EncryptResultDecoratorEngine engine = (EncryptResultDecoratorEngine) OrderedSPILoader.getServices(ResultProcessEngine.class, Collections.singleton(rule)).get(rule);
assertFalse(engine.newInstance(mock(RuleMetaData.class), database, rule, mock(ConfigurationProperties.class), mock(InsertStatementContext.class)).isPresent());
} |
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
} | @Test
public void testUdp() {
FilterPredicate pred = userDefined(intColumn, SevensAndEightsUdp.class);
FilterPredicate invPred = LogicalInverseRewriter.rewrite(not(userDefined(intColumn, SevensAndEightsUdp.class)));
FilterPredicate udpDropMissingColumn = userDefined(missingColumn2, DropNullUdp.class);
FilterPredicate invUdpDropMissingColumn =
LogicalInverseRewriter.rewrite(not(userDefined(missingColumn2, DropNullUdp.class)));
FilterPredicate udpKeepMissingColumn = userDefined(missingColumn2, SevensAndEightsUdp.class);
FilterPredicate invUdpKeepMissingColumn =
LogicalInverseRewriter.rewrite(not(userDefined(missingColumn2, SevensAndEightsUdp.class)));
FilterPredicate allPositivePred = userDefined(doubleColumn, AllPositiveUdp.class);
IntStatistics seven = new IntStatistics();
seven.setMinMax(7, 7);
IntStatistics eight = new IntStatistics();
eight.setMinMax(8, 8);
IntStatistics neither = new IntStatistics();
neither.setMinMax(1, 2);
assertTrue(canDrop(pred, Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(
canDrop(pred, Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(
canDrop(pred, Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(
canDrop(invPred, Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertTrue(
canDrop(invPred, Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(
invPred, Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
// udpDropMissingColumn drops null column.
assertTrue(canDrop(
udpDropMissingColumn,
Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertTrue(canDrop(
udpDropMissingColumn,
Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertTrue(canDrop(
udpDropMissingColumn,
Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
// invUdpDropMissingColumn (i.e., not(udpDropMissingColumn)) keeps null column.
assertFalse(canDrop(
invUdpDropMissingColumn,
Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(
invUdpDropMissingColumn,
Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(
invUdpDropMissingColumn,
Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
// udpKeepMissingColumn keeps null column.
assertFalse(canDrop(
udpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(
udpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(
udpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
// invUdpKeepMissingColumn (i.e., not(udpKeepMissingColumn)) drops null column.
assertTrue(canDrop(
invUdpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(seven, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertTrue(canDrop(
invUdpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(eight, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertTrue(canDrop(
invUdpKeepMissingColumn,
Arrays.asList(getIntColumnMeta(neither, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(allPositivePred, missingMinMaxColumnMetas));
} |
public void registerEndpoint(final Class<?> pojo) {
ShenyuServerEndpoint annotation = AnnotatedElementUtils.findMergedAnnotation(pojo, ShenyuServerEndpoint.class);
if (annotation == null) {
throw new ShenyuException("Class missing annotation ShenyuServerEndpoint! class name: " + pojo.getName());
}
String path = annotation.value();
Class<? extends ServerEndpointConfig.Configurator> configuratorClazz = annotation.configurator();
ServerEndpointConfig.Configurator configurator = null;
if (!configuratorClazz.equals(ServerEndpointConfig.Configurator.class)) {
try {
configurator = annotation.configurator().getConstructor().newInstance();
} catch (ReflectiveOperationException ex) {
LOG.error("ShenyuServerEndpoint configurator init fail! Class name: {}, configurator name: {}", pojo.getName(), annotation.configurator().getName());
throw new ShenyuException(ex);
}
}
ServerEndpointConfig sec = ServerEndpointConfig.Builder.create(pojo, path)
.decoders(Arrays.asList(annotation.decoders()))
.encoders(Arrays.asList(annotation.encoders()))
.subprotocols(Arrays.asList(annotation.subprotocols()))
.configurator(configurator).build();
this.registerEndpoint(sec);
} | @Test
public void registerEndpointTest() throws DeploymentException {
exporter.registerEndpoint(pojoWithAnnotation.getClass());
verify(serverContainer).addEndpoint(any(ServerEndpointConfig.class));
} |
public static void main(String[] args) throws InterruptedException {
var taskSet = new TaskSet();
var taskHandler = new TaskHandler();
var workCenter = new WorkCenter();
workCenter.createWorkers(4, taskSet, taskHandler);
execute(workCenter, taskSet);
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
StartupProgressView prog = startupProgress.createView();
MetricsRecordBuilder builder = collector.addRecord(
STARTUP_PROGRESS_METRICS_INFO);
builder.addCounter(info("ElapsedTime", "overall elapsed time"),
prog.getElapsedTime());
builder.addGauge(info("PercentComplete", "overall percent complete"),
prog.getPercentComplete());
for (Phase phase: prog.getPhases()) {
addCounter(builder, phase, "Count", " count", prog.getCount(phase));
addCounter(builder, phase, "ElapsedTime", " elapsed time",
prog.getElapsedTime(phase));
addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
addGauge(builder, phase, "PercentComplete", " percent complete",
prog.getPercentComplete(phase));
}
} | @Test
public void testInitialState() {
MetricsRecordBuilder builder = getMetrics(metrics, true);
assertCounter("ElapsedTime", 0L, builder);
assertGauge("PercentComplete", 0.0f, builder);
assertCounter("LoadingFsImageCount", 0L, builder);
assertCounter("LoadingFsImageElapsedTime", 0L, builder);
assertCounter("LoadingFsImageTotal", 0L, builder);
assertGauge("LoadingFsImagePercentComplete", 0.0f, builder);
assertCounter("LoadingEditsCount", 0L, builder);
assertCounter("LoadingEditsElapsedTime", 0L, builder);
assertCounter("LoadingEditsTotal", 0L, builder);
assertGauge("LoadingEditsPercentComplete", 0.0f, builder);
assertCounter("SavingCheckpointCount", 0L, builder);
assertCounter("SavingCheckpointElapsedTime", 0L, builder);
assertCounter("SavingCheckpointTotal", 0L, builder);
assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
assertCounter("SafeModeCount", 0L, builder);
assertCounter("SafeModeElapsedTime", 0L, builder);
assertCounter("SafeModeTotal", 0L, builder);
assertGauge("SafeModePercentComplete", 0.0f, builder);
} |
public CompletableFuture<Void> deleteEntireBackup(final AuthenticatedBackupUser backupUser) {
checkBackupLevel(backupUser, BackupLevel.MESSAGES);
return backupsDb
// Try to swap out the backupDir for the user
.scheduleBackupDeletion(backupUser)
// If there was already a pending swap, try to delete the cdn objects directly
.exceptionallyCompose(ExceptionUtils.exceptionallyHandler(BackupsDb.PendingDeletionException.class, e ->
AsyncTimerUtil.record(SYNCHRONOUS_DELETE_TIMER, () ->
deletePrefix(backupUser.backupDir(), DELETION_CONCURRENCY))));
} | @Test
public void deleteEntireBackup() {
final AuthenticatedBackupUser original = backupUser(TestRandomUtil.nextBytes(16), BackupLevel.MEDIA);
testClock.pin(Instant.ofEpochSecond(10));
// Deleting should swap the backupDir for the user
backupManager.deleteEntireBackup(original).join();
verifyNoInteractions(remoteStorageManager);
final AuthenticatedBackupUser after = retrieveBackupUser(original.backupId(), BackupLevel.MEDIA);
assertThat(original.backupDir()).isNotEqualTo(after.backupDir());
assertThat(original.mediaDir()).isNotEqualTo(after.mediaDir());
// Trying again should do the deletion inline
when(remoteStorageManager.list(anyString(), any(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(new RemoteStorageManager.ListResult(
Collections.emptyList(),
Optional.empty()
)));
backupManager.deleteEntireBackup(after).join();
verify(remoteStorageManager, times(1))
.list(eq(after.backupDir() + "/"), eq(Optional.empty()), anyLong());
// The original prefix to expire should be flagged as requiring expiration
final ExpiredBackup expiredBackup = backupManager
.getExpiredBackups(1, Schedulers.immediate(), Instant.ofEpochSecond(1L))
.collectList().block()
.getFirst();
assertThat(expiredBackup.hashedBackupId()).isEqualTo(hashedBackupId(original.backupId()));
assertThat(expiredBackup.prefixToDelete()).isEqualTo(original.backupDir());
assertThat(expiredBackup.expirationType()).isEqualTo(ExpiredBackup.ExpirationType.GARBAGE_COLLECTION);
} |
protected List<String> parse(final int response, final String[] reply) {
final List<String> result = new ArrayList<String>(reply.length);
for(final String line : reply) {
// Some servers include the status code for every line.
if(line.startsWith(String.valueOf(response))) {
try {
String stripped = line;
stripped = StringUtils.strip(StringUtils.removeStart(stripped, String.valueOf(String.format("%d-", response))));
stripped = StringUtils.strip(StringUtils.removeStart(stripped, String.valueOf(response)));
result.add(stripped);
}
catch(IndexOutOfBoundsException e) {
log.error(String.format("Failed parsing line %s", line), e);
}
}
else {
result.add(StringUtils.strip(line));
}
}
return result;
} | @Test
public void testParse8006() throws Exception {
final List<String> lines = Arrays.asList(
"212-Status of /cgi-bin:",
" drwxr-xr-x 3 1564466 15000 4 Jan 19 19:56 .",
" drwxr-x--- 13 1564466 15000 44 Jun 13 18:36 ..",
" drwxr-xr-x 2 1564466 15000 2 May 25 2009 tmp",
" End of status",
"212 -rw-r--r-- 1 1564466 15000 9859 Jan 19 19:56 adoptees.php");
final FTPFileEntryParser parser = new UnixFTPEntryParser();
final List<String> list = new FTPStatListService(null, parser).parse(
212, lines.toArray(new String[lines.size()]));
assertEquals(6, list.size());
final Path parent = new Path("/cgi-bin", EnumSet.of(Path.Type.directory));
final AttributedList<Path> parsed = new FTPListResponseReader(parser, true).read(
parent, list
);
assertEquals(2, parsed.size());
assertTrue(parsed.contains(new Path(parent, "tmp", EnumSet.of(Path.Type.directory))));
assertTrue(parsed.contains(new Path(parent, "adoptees.php", EnumSet.of(Path.Type.file))));
} |
public static Object[] toArray(Object arrayObj) {
if (arrayObj == null) {
return null;
}
if (!arrayObj.getClass().isArray()) {
throw new ClassCastException("'arrayObj' is not an array, can't cast to Object[]");
}
int length = Array.getLength(arrayObj);
Object[] array = new Object[length];
if (length > 0) {
for (int i = 0; i < length; ++i) {
array[i] = Array.get(arrayObj, i);
}
}
return array;
} | @Test
public void testToArray() {
Assertions.assertNull(ArrayUtils.toArray(null));
Object obj = new String[]{"1", "2", "3"};
Object[] array = ArrayUtils.toArray(obj);
Assertions.assertArrayEquals(new String[]{"1", "2", "3"}, array);
Object obj1 = new String[]{};
Object[] array1 = ArrayUtils.toArray(obj1);
Assertions.assertArrayEquals(new String[]{}, array1);
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void updateFileConfigMaster(Long id) {
// 校验存在
validateFileConfigExists(id);
// 更新其它为非 master
fileConfigMapper.updateBatch(new FileConfigDO().setMaster(false));
// 更新
fileConfigMapper.updateById(new FileConfigDO().setId(id).setMaster(true));
// 清空缓存
clearCache(null, true);
} | @Test
public void testUpdateFileConfigMaster_success() {
// mock 数据
FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false);
fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据
FileConfigDO masterFileConfig = randomFileConfigDO().setMaster(true);
fileConfigMapper.insert(masterFileConfig);// @Sql: 先插入出一条存在的数据
// 调用
fileConfigService.updateFileConfigMaster(dbFileConfig.getId());
// 断言数据
assertTrue(fileConfigMapper.selectById(dbFileConfig.getId()).getMaster());
assertFalse(fileConfigMapper.selectById(masterFileConfig.getId()).getMaster());
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(0L));
} |
public boolean relaxedOffer(E e) {
return offer(e);
} | @Test(dataProvider = "empty")
public void relaxedOffer_whenEmpty(MpscGrowableArrayQueue<Integer> queue) {
assertThat(queue.relaxedOffer(1)).isTrue();
assertThat(queue).hasSize(1);
} |
@Override
public Page<User> getUsers(int pageNo, int pageSize, String username) {
AuthPaginationHelper<User> helper = createPaginationHelper();
String sqlCountRows = "SELECT count(*) FROM users ";
String sqlFetchRows = "SELECT username,password FROM users ";
StringBuilder where = new StringBuilder(" WHERE 1 = 1 ");
List<String> params = new ArrayList<>();
if (StringUtils.isNotBlank(username)) {
where.append(" AND username = ? ");
params.add(username);
}
try {
Page<User> pageInfo = helper.fetchPage(sqlCountRows + where, sqlFetchRows + where, params.toArray(), pageNo,
pageSize, USER_ROW_MAPPER);
if (pageInfo == null) {
pageInfo = new Page<>();
pageInfo.setTotalCount(0);
pageInfo.setPageItems(new ArrayList<>());
}
return pageInfo;
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e.toString(), e);
throw e;
}
} | @Test
void testGetUsers() {
Page<User> users = externalUserPersistService.getUsers(1, 10, "nacos");
assertNotNull(users);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.