focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static Object[] cast(Class<?> type, Object arrayObj) throws NullPointerException, IllegalArgumentException {
if (null == arrayObj) {
throw new NullPointerException("Argument [arrayObj] is null !");
}
if (false == arrayObj.getClass().isArray()) {
throw new IllegalArgumentException("Argument [arrayObj] is not array !");
}
if (null == type) {
return (Object[]) arrayObj;
}
final Class<?> componentType = type.isArray() ? type.getComponentType() : type;
final Object[] array = (Object[]) arrayObj;
final Object[] result = ArrayUtil.newArray(componentType, array.length);
System.arraycopy(array, 0, result, 0, array.length);
return result;
} | @Test
public void castTest() {
Object[] values = {"1", "2", "3"};
String[] cast = (String[]) ArrayUtil.cast(String.class, values);
assertEquals(values[0], cast[0]);
assertEquals(values[1], cast[1]);
assertEquals(values[2], cast[2]);
} |
@SuppressWarnings("unchecked")
@Override
public synchronized ProxyInfo<T> getProxy() {
if (currentUsedHandler != null) {
return currentUsedHandler;
}
Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>();
StringBuilder combinedInfo = new StringBuilder("[");
for (int i = 0; i < proxies.size(); i++) {
ProxyInfo<T> pInfo = super.getProxy();
incrementProxyIndex();
targetProxyInfos.put(pInfo.proxyInfo, pInfo);
combinedInfo.append(pInfo.proxyInfo).append(',');
}
combinedInfo.append(']');
T wrappedProxy = (T) Proxy.newProxyInstance(
RequestHedgingInvocationHandler.class.getClassLoader(),
new Class<?>[]{xface},
new RequestHedgingInvocationHandler(targetProxyInfos));
currentUsedHandler =
new ProxyInfo<T>(wrappedProxy, combinedInfo.toString());
return currentUsedHandler;
} | @Test
public void testRequestNNAfterOneSuccess() throws Exception {
final AtomicInteger goodCount = new AtomicInteger(0);
final AtomicInteger badCount = new AtomicInteger(0);
final ClientProtocol goodMock = mock(ClientProtocol.class);
when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
goodCount.incrementAndGet();
Thread.sleep(1000);
return new long[]{1};
}
});
final ClientProtocol badMock = mock(ClientProtocol.class);
when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
badCount.incrementAndGet();
throw new IOException("Bad mock !!");
}
});
RequestHedgingProxyProvider<ClientProtocol> provider =
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
createFactory(badMock, goodMock));
ClientProtocol proxy = provider.getProxy().proxy;
proxy.getStats();
assertEquals(1, goodCount.get());
assertEquals(1, badCount.get());
// We will only use the successful proxy after a successful invocation.
proxy.getStats();
assertEquals(2, goodCount.get());
assertEquals(1, badCount.get());
} |
@Override
public BackgroundException map(final SSLException failure) {
final StringBuilder buffer = new StringBuilder();
for(Throwable cause : ExceptionUtils.getThrowableList(failure)) {
if(cause instanceof SocketException) {
// Connection has been shutdown: javax.net.ssl.SSLException: java.net.SocketException: Broken pipe
return new DefaultSocketExceptionMappingService().map((SocketException) cause);
}
}
final String message = failure.getMessage();
for(Alert alert : Alert.values()) {
if(StringUtils.containsIgnoreCase(message, alert.name())) {
this.append(buffer, alert.getDescription());
break;
}
}
if(failure instanceof SSLHandshakeException) {
if(ExceptionUtils.getRootCause(failure) instanceof CertificateException) {
log.warn(String.format("Ignore certificate failure %s and drop connection", failure.getMessage()));
// Server certificate not accepted
return new ConnectionCanceledException(failure);
}
if(ExceptionUtils.getRootCause(failure) instanceof EOFException) {
// SSL peer shut down incorrectly
return this.wrap(failure, buffer);
}
return new SSLNegotiateException(buffer.toString(), failure);
}
if(ExceptionUtils.getRootCause(failure) instanceof GeneralSecurityException) {
this.append(buffer, ExceptionUtils.getRootCause(failure).getMessage());
return new InteroperabilityException(buffer.toString(), failure);
}
this.append(buffer, message);
return new InteroperabilityException(buffer.toString(), failure);
} | @Test
public void testMap() {
final BackgroundException f = new SSLExceptionMappingService().map(new SSLException(
"Connection has been shutdown: javax.net.ssl.SSLException: java.net.SocketException: Broken pipe",
new SSLException("javax.net.ssl.SSLException: java.net.SocketException: Broken pipe",
new SocketException("Broken pipe"))));
assertEquals("Connection failed", f.getMessage());
assertEquals("Broken pipe. The connection attempt was rejected. The server may be down, or your network may not be properly configured.", f.getDetail());
} |
@Override
public void disconnectTaskManager(final ResourceID resourceId, final Exception cause) {
closeTaskManagerConnection(resourceId, cause)
.ifPresent(ResourceManager.this::stopWorkerIfSupported);
} | @Test
void testDisconnectTaskManager() throws Exception {
final ResourceID taskExecutorId = ResourceID.generate();
final CompletableFuture<Exception> disconnectFuture = new CompletableFuture<>();
final CompletableFuture<ResourceID> stopWorkerFuture = new CompletableFuture<>();
final TaskExecutorGateway taskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setDisconnectResourceManagerConsumer(disconnectFuture::complete)
.createTestingTaskExecutorGateway();
rpcService.registerGateway(taskExecutorGateway.getAddress(), taskExecutorGateway);
resourceManager =
new ResourceManagerBuilder()
.withStopWorkerConsumer(stopWorkerFuture::complete)
.withSlotManager(createSlotManager())
.buildAndStart();
registerTaskExecutor(resourceManager, taskExecutorId, taskExecutorGateway.getAddress());
resourceManager.disconnectTaskManager(taskExecutorId, new FlinkException("Test exception"));
assertThatFuture(disconnectFuture).eventuallySucceeds().isInstanceOf(FlinkException.class);
assertThatFuture(stopWorkerFuture).eventuallySucceeds().isEqualTo(taskExecutorId);
} |
@Override
public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView,
String fileId, int count) throws ResourceExhaustedException {
if (workerClusterView.size() < count) {
throw new ResourceExhaustedException(String.format(
"Not enough workers in the cluster %d workers in the cluster but %d required",
workerClusterView.size(), count));
}
Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds();
mHashProvider.refresh(workerIdentities);
List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count);
if (workers.size() != count) {
throw new ResourceExhaustedException(String.format(
"Found %d workers from the hash ring but %d required", workers.size(), count));
}
ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder();
for (WorkerIdentity worker : workers) {
Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker);
final WorkerInfo workerInfo;
if (optionalWorkerInfo.isPresent()) {
workerInfo = optionalWorkerInfo.get();
} else {
// the worker returned by the policy does not exist in the cluster view
// supplied by the client.
// this can happen when the membership changes and some callers fail to update
// to the latest worker cluster view.
// in this case, just skip this worker
LOG.debug("Inconsistency between caller's view of cluster and that of "
+ "the consistent hash policy's: worker {} selected by policy does not exist in "
+ "caller's view {}. Skipping this worker.",
worker, workerClusterView);
continue;
}
BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo(
worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(),
workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE
);
builder.add(blockWorkerInfo);
}
List<BlockWorkerInfo> infos = builder.build();
return infos;
} | @Test
public void getMultipleWorkers() throws Exception {
WorkerLocationPolicy policy = WorkerLocationPolicy.Factory.create(mConf);
assertTrue(policy instanceof ConsistentHashPolicy);
// Prepare a worker list
WorkerClusterView workers = new WorkerClusterView(Arrays.asList(
new WorkerInfo()
.setIdentity(WorkerIdentityTestUtils.ofLegacyId(1))
.setAddress(new WorkerNetAddress()
.setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000))
.setCapacityBytes(1024)
.setUsedBytes(0),
new WorkerInfo()
.setIdentity(WorkerIdentityTestUtils.ofLegacyId(2))
.setAddress(new WorkerNetAddress()
.setHost("master2").setRpcPort(29998).setDataPort(29999).setWebPort(30000))
.setCapacityBytes(1024)
.setUsedBytes(0)));
List<BlockWorkerInfo> assignedWorkers = policy.getPreferredWorkers(workers, "hdfs://a/b/c", 2);
assertEquals(2, assignedWorkers.size());
assertTrue(assignedWorkers.stream().allMatch(w -> contains(workers, w)));
// The order of the workers should be consistent
assertEquals(assignedWorkers.get(0).getNetAddress().getHost(), "master2");
assertEquals(assignedWorkers.get(1).getNetAddress().getHost(), "master1");
assertThrows(ResourceExhaustedException.class, () -> {
// Getting 2 out of 1 worker will result in an error
policy.getPreferredWorkers(
new WorkerClusterView(Arrays.asList(
new WorkerInfo()
.setIdentity(WorkerIdentityTestUtils.ofLegacyId(1))
.setAddress(new WorkerNetAddress()
.setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000))
.setCapacityBytes(1024)
.setUsedBytes(0))),
"hdfs://a/b/c", 2);
});
} |
@VisibleForTesting
static String truncateLongClasspath(ImmutableList<String> imageEntrypoint) {
List<String> truncated = new ArrayList<>();
UnmodifiableIterator<String> iterator = imageEntrypoint.iterator();
while (iterator.hasNext()) {
String element = iterator.next();
truncated.add(element);
if (element.equals("-cp") || element.equals("-classpath")) {
String classpath = iterator.next();
if (classpath.length() > 200) {
truncated.add(classpath.substring(0, 200) + "<... classpath truncated ...>");
} else {
truncated.add(classpath);
}
}
}
return truncated.toString();
} | @Test
public void testTruncateLongClasspath_shortClasspath() {
ImmutableList<String> entrypoint =
ImmutableList.of(
"java", "-Dmy-property=value", "-cp", "/app/classes:/app/libs/*", "com.example.Main");
Assert.assertEquals(
"[java, -Dmy-property=value, -cp, /app/classes:/app/libs/*, com.example.Main]",
BuildImageStep.truncateLongClasspath(entrypoint));
} |
@Override
public <K, V> void forward(final K key, final V value) {
throw new StreamsException(EXPLANATION);
} | @Test
public void shouldThrowOnForward() {
assertThrows(StreamsException.class, () -> context.forward("key", "value"));
} |
public static Labels fromString(String stringLabels) throws IllegalArgumentException {
Map<String, String> labels = new HashMap<>();
try {
if (stringLabels != null && !stringLabels.isEmpty()) {
String[] labelsArray = stringLabels.split(",");
for (String label : labelsArray) {
String[] fields = label.split("=");
labels.put(fields[0].trim(), fields[1].trim());
}
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to parse labels from string " + stringLabels, e);
}
return new Labels(labels);
} | @Test
public void testParseInvalidLabels3() {
assertThrows(IllegalArgumentException.class, () -> {
String invalidLabels = "key2";
Labels.fromString(invalidLabels);
});
} |
@Description("Scrub runtime information such as plan estimates and variable names from the query plan, leaving the structure of the plan intact")
@ScalarFunction("json_presto_query_plan_scrub")
@SqlType(StandardTypes.JSON)
@SqlNullable
public static Slice jsonScrubPlan(@SqlType(StandardTypes.JSON) Slice jsonPlan)
{
Map<PlanFragmentId, Map<String, JsonRenderedNode>> jsonRenderedNodeMap = parseJsonPlanFragments(jsonPlan);
if (jsonRenderedNodeMap == null) {
return null;
}
jsonRenderedNodeMap.forEach((key, planMap) ->
{
planMap.put("plan", scrubJsonPlan(planMap.get("plan")));
});
JsonCodec<Map<PlanFragmentId, Map<String, JsonRenderer.JsonRenderedNode>>> planMapCodec = constructJsonPlanMapCodec();
return utf8Slice(planMapCodec.toJson(jsonRenderedNodeMap));
} | @Test
public void testJsonScrubPlan()
{
assertFunction("json_presto_query_plan_scrub(json '" + TestJsonPrestoQueryPlanFunctionUtils.joinPlan.replaceAll("'", "''") + "')", JSON,
TestJsonPrestoQueryPlanFunctionUtils.scrubbedJoinPlan);
} |
@Override
public Map<MetricName, Metric> getMetrics() {
final Map<MetricName, Metric> gauges = new HashMap<>();
gauges.put(MetricName.build("name"), (Gauge<String>) runtime::getName);
gauges.put(MetricName.build("vendor"), (Gauge<String>) () -> String.format(Locale.US,
"%s %s %s (%s)",
runtime.getVmVendor(),
runtime.getVmName(),
runtime.getVmVersion(),
runtime.getSpecVersion()));
gauges.put(MetricName.build("uptime"), (Gauge<Long>) runtime::getUptime);
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasASetOfGauges() throws Exception {
assertThat(gauges.getMetrics().keySet())
.containsOnly(
MetricName.build("vendor"),
MetricName.build("name"),
MetricName.build("uptime"));
} |
@Operation(summary = "Get collect metadata process errors by connection metadata process result id")
@GetMapping(value = "collect_metadata/errors/{result_id}", produces = "application/json")
@ResponseBody
public Page<SamlMetadataProcessError> findBySamlMetadataProcessResultId(@RequestParam(name = "page") int pageIndex,
@RequestParam(name = "size") int pageSize, @PathVariable("result_id") Long resultId) {
return metadataRetrieverService.getSamlMetadataById(resultId, pageIndex, pageSize);
} | @Test
public void findBySamlMetadataProcessResultId() {
when(metadataRetrieverServiceMock.getSamlMetadataById(anyLong(), anyInt(), anyInt())).thenReturn(MetadataProcessHelper.getPageSamlMetadataProcessError());
Page<SamlMetadataProcessError> result = controllerMock.findBySamlMetadataProcessResultId(1, 10, 1L);
verify(metadataRetrieverServiceMock, times(1)).getSamlMetadataById(anyLong(), anyInt(), anyInt());
assertNotNull(result);
} |
public static AfterProcessingTimeStateMachine pastFirstElementInPane() {
return new AfterProcessingTimeStateMachine(IDENTITY);
} | @Test
public void testAfterProcessingTimeWithMergingWindow() throws Exception {
SimpleTriggerStateMachineTester<IntervalWindow> tester =
TriggerStateMachineTester.forTrigger(
AfterProcessingTimeStateMachine.pastFirstElementInPane()
.plusDelayOf(Duration.millis(5)),
Sessions.withGapDuration(Duration.millis(10)));
tester.advanceProcessingTime(new Instant(10));
tester.injectElements(1); // in [1, 11), timer for 15
IntervalWindow firstWindow = new IntervalWindow(new Instant(1), new Instant(11));
assertFalse(tester.shouldFire(firstWindow));
tester.advanceProcessingTime(new Instant(12));
tester.injectElements(3); // in [3, 13), timer for 17
IntervalWindow secondWindow = new IntervalWindow(new Instant(3), new Instant(13));
assertFalse(tester.shouldFire(secondWindow));
tester.mergeWindows();
IntervalWindow mergedWindow = new IntervalWindow(new Instant(1), new Instant(13));
tester.advanceProcessingTime(new Instant(16));
assertTrue(tester.shouldFire(mergedWindow));
} |
public void retrieveDocuments() throws DocumentRetrieverException {
boolean first = true;
String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster);
MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route);
documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams);
session = documentAccess.createSyncSession(new SyncParameters.Builder().build());
int trace = params.traceLevel;
if (trace > 0) {
session.setTraceLevel(trace);
}
Iterator<String> iter = params.documentIds;
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println('[');
}
while (iter.hasNext()) {
if (params.jsonOutput && !params.printIdsOnly) {
if (!first) {
System.out.println(',');
} else {
first = false;
}
}
String docid = iter.next();
Message msg = createDocumentRequest(docid);
Reply reply = session.syncSend(msg);
printReply(reply);
}
if (params.jsonOutput && !params.printIdsOnly) {
System.out.println(']');
}
} | @Test
void testClusterLookup() throws DocumentRetrieverException {
final String cluster = "storage",
expectedRoute = "[Content:cluster=storage]";
ClientParameters params = createParameters()
.setCluster(cluster)
.build();
ClusterList clusterList = new ClusterList(List.of(new ClusterDef(cluster)));
DocumentRetriever documentRetriever = createDocumentRetriever(params, clusterList);
documentRetriever.retrieveDocuments();
verify(mockedFactory).createDocumentAccess(argThat(o -> o.getRoute().equals(expectedRoute)));
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
if(containerService.isContainer(folder)) {
final S3BucketCreateService service = new S3BucketCreateService(session);
service.create(folder, StringUtils.isBlank(status.getRegion()) ?
new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion());
return folder;
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
return new S3TouchFeature(session, acl).withWriter(writer).touch(folder
.withType(type), status
// Add placeholder object
.withMime(MIMETYPE)
.withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status)));
}
} | @Test
@Ignore
public void testCreateBucket() throws Exception {
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final S3DirectoryFeature feature = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl);
for(Location.Name region : session.getHost().getProtocol().getRegions()) {
switch(region.getIdentifier()) {
case "me-south-1":
case "ap-east-1":
// Not enabled for account
break;
default:
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AsciiRandomStringService(30).random(), EnumSet.of(Path.Type.directory, Path.Type.volume));
assertTrue(feature.isSupported(test.getParent(), test.getName()));
test.attributes().setRegion(region.getIdentifier());
feature.mkdir(test, new TransferStatus().withRegion(region.getIdentifier()));
assertTrue(new S3FindFeature(session, acl).find(test));
assertEquals(region.getIdentifier(), new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(test).getIdentifier());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
}
} |
@Override
public String toString() {
return format();
} | @Test
public void formatTest2() {
BetweenFormatter formater = new BetweenFormatter(584, Level.SECOND, 1);
assertEquals(formater.toString(), "0η§");
} |
@Override
public String getName() {
return name;
} | @Test
public void testConstructor_withName() {
config = new NearCacheConfig("foobar");
assertEquals("foobar", config.getName());
} |
protected boolean isClusterVersionUnknownOrGreaterThan(Version version) {
Version clusterVersion = getNodeEngine().getClusterService().getClusterVersion();
return clusterVersion.isUnknownOrGreaterThan(version);
} | @Test
public void testClusterVersion_isUnknownGreaterThan_previousVersion() {
assertTrue(object.isClusterVersionUnknownOrGreaterThan(VersionsTest.getPreviousClusterVersion()));
} |
@VisibleForTesting
static Map<Stack, List<String>> deduplicateThreadStacks(
Map<Thread, StackTraceElement[]> allStacks) {
Map<Stack, List<String>> stacks = new HashMap<>();
for (Map.Entry<Thread, StackTraceElement[]> entry : allStacks.entrySet()) {
Thread thread = entry.getKey();
if (thread != Thread.currentThread()) {
Stack stack = new Stack(entry.getValue(), thread.getState());
List<String> threads = stacks.get(stack);
if (threads == null) {
threads = new ArrayList<>();
stacks.put(stack, threads);
}
threads.add(thread.toString());
}
}
return stacks;
} | @Test
public void testDeduping() throws Exception {
Map<Thread, StackTraceElement[]> stacks =
ImmutableMap.of(
new Thread("Thread1"),
new StackTraceElement[] {new StackTraceElement("Class", "Method1", "File", 11)},
new Thread("Thread2"),
new StackTraceElement[] {new StackTraceElement("Class", "Method1", "File", 11)},
new Thread("Thread3"),
new StackTraceElement[] {new StackTraceElement("Class", "Method2", "File", 17)});
Map<Stack, List<String>> deduped = ThreadzServlet.deduplicateThreadStacks(stacks);
assertEquals(2, deduped.size());
assertThat(
deduped,
Matchers.hasEntry(
new Stack(
new StackTraceElement[] {new StackTraceElement("Class", "Method1", "File", 11)},
Thread.State.NEW),
Arrays.asList("Thread[Thread1,5,main]", "Thread[Thread2,5,main]")));
assertThat(
deduped,
Matchers.hasEntry(
new Stack(
new StackTraceElement[] {new StackTraceElement("Class", "Method2", "File", 17)},
Thread.State.NEW),
Arrays.asList("Thread[Thread3,5,main]")));
} |
public void insertChanges(IssueChangeMapper mapper, DefaultIssue issue, UuidFactory uuidFactory) {
for (DefaultIssueComment comment : issue.defaultIssueComments()) {
if (comment.isNew()) {
IssueChangeDto changeDto = IssueChangeDto.of(comment, issue.projectUuid());
changeDto.setUuid(uuidFactory.create());
changeDto.setProjectUuid(issue.projectUuid());
mapper.insert(changeDto);
}
}
FieldDiffs diffs = issue.currentChange();
if (issue.isCopied()) {
for (FieldDiffs d : issue.changes()) {
IssueChangeDto changeDto = IssueChangeDto.of(issue.key(), d, issue.projectUuid());
changeDto.setUuid(uuidFactory.create());
changeDto.setProjectUuid(issue.projectUuid());
mapper.insert(changeDto);
}
} else if ((!issue.isNew() || issue.getAnticipatedTransitionUuid().isPresent()) && diffs != null) {
IssueChangeDto changeDto = IssueChangeDto.of(issue.key(), diffs, issue.projectUuid());
changeDto.setUuid(uuidFactory.create());
changeDto.setProjectUuid(issue.projectUuid());
mapper.insert(changeDto);
}
} | @Test
public void when_newIssueWithAnticipatedTransitionInserted_twoChangelogCreated() {
when(uuidFactory.create()).thenReturn("uuid");
String issueKey = "ABCDE";
String commentText = "comment for new issue";
DefaultIssueComment comment = DefaultIssueComment.create(issueKey, "user_uuid", commentText);
comment.setKey("FGHIJ");
Date date = DateUtils.parseDateTime("2013-05-18T12:00:00+0000");
DefaultIssue issue = new DefaultIssue()
.setKey(issueKey)
.setType(RuleType.BUG)
.setNew(true)
.setRuleKey(RuleKey.of("keyRepo", "r:2145"))
.setProjectUuid("projectUuid")
.setComponentUuid("fileUuid")
.setLine(5000)
.setEffort(Duration.create(10L))
.setResolution("wontfix")
.setStatus("CLOSED")
.setSeverity("BLOCKER")
.addComment(comment)
.setCreationDate(date)
.setUpdateDate(date)
.setCloseDate(date)
.setCurrentChange(new FieldDiffs())
.setAnticipatedTransitionUuid("anticipatedTransitionUuid");
IssueChangeDto mockCreated = mock(IssueChangeDto.class);
IssueChangeDto mockAnticipatedTransition = mock(IssueChangeDto.class);
try (MockedStatic<IssueChangeDto> issueChangeDtoMockedStatic = mockStatic(IssueChangeDto.class)) {
issueChangeDtoMockedStatic.when(() -> IssueChangeDto.of(any(DefaultIssueComment.class), anyString()))
.thenReturn(mockCreated);
issueChangeDtoMockedStatic.when(() -> IssueChangeDto.of(anyString(), any(FieldDiffs.class), anyString()))
.thenReturn(mockAnticipatedTransition);
underTest.insertChanges(issueChangeMapper, issue, uuidFactory);
}
verify(issueChangeMapper, times(2)).insert(any(IssueChangeDto.class));
verify(issueChangeMapper).insert(mockCreated);
verify(issueChangeMapper).insert(mockAnticipatedTransition);
} |
Settings.Builder getSettings() {
return settings;
} | @Test
@UseDataProvider("indexWithAndWithoutRelations")
public void in_standalone_searchReplicas_is_not_overridable(Index index) {
settings.setProperty(SEARCH_REPLICAS.getKey(), "5");
NewIndex newIndex = new SimplestNewIndex(IndexType.main(index, "foo"), defaultSettingsConfiguration);
assertThat(newIndex.getSettings().get("index.number_of_replicas")).isEqualTo("0");
} |
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
} | @Test
void invokeArrayOfStrings() {
FunctionTestUtil.assertResult(minFunction.invoke(new Object[]{"a"}), "a");
FunctionTestUtil.assertResult(minFunction.invoke(new Object[]{"a", "b", "c"}), "a");
FunctionTestUtil.assertResult(minFunction.invoke(new Object[]{"b", "a", "c"}), "a");
FunctionTestUtil.assertResult(minFunction.invoke(new Object[]{"b", "c", "a"}), "a");
} |
@Override
public ByteBuf unwrap() {
return buffer;
} | @Test
public void testUnwrap() {
ByteBuf buf = buffer(1);
assertSame(buf, unmodifiableBuffer(buf).unwrap());
} |
@Override
public <T> CompletableFuture<T> submit(Callable<T> callable) {
final CompletableFuture<T> promise = new CompletableFuture<>();
try {
CompletableFuture.supplyAsync(ContextPropagator.decorateSupplier(config.getContextPropagator(),() -> {
try {
publishBulkheadEvent(() -> new BulkheadOnCallPermittedEvent(name));
return callable.call();
} catch (CompletionException e) {
throw e;
} catch (Exception e){
throw new CompletionException(e);
}
}), executorService).whenComplete((result, throwable) -> {
publishBulkheadEvent(() -> new BulkheadOnCallFinishedEvent(name));
if (throwable != null) {
promise.completeExceptionally(throwable);
} else {
promise.complete(result);
}
});
} catch (RejectedExecutionException rejected) {
publishBulkheadEvent(() -> new BulkheadOnCallRejectedEvent(name));
throw BulkheadFullException.createBulkheadFullException(this);
}
return promise;
} | @Test
public void testRunnableThreadLocalContextPropagator() {
TestThreadLocalContextHolder.put("ValueShouldCrossThreadBoundary");
AtomicReference<String> reference = new AtomicReference<>();
fixedThreadPoolBulkhead
.submit(() -> reference.set((String) TestThreadLocalContextHolder.get().orElse(null)));
waitAtMost(5, TimeUnit.SECONDS).until(matches(() ->
assertThat(reference).hasValue("ValueShouldCrossThreadBoundary")));
} |
public BundleProcessor getProcessor(
BeamFnApi.ProcessBundleDescriptor descriptor,
List<RemoteInputDestination> remoteInputDesinations) {
checkState(
!descriptor.hasStateApiServiceDescriptor(),
"The %s cannot support a %s containing a state %s.",
BundleProcessor.class.getSimpleName(),
BeamFnApi.ProcessBundleDescriptor.class.getSimpleName(),
Endpoints.ApiServiceDescriptor.class.getSimpleName());
return getProcessor(descriptor, remoteInputDesinations, NoOpStateDelegator.INSTANCE);
} | @Test
public void testNewBundleNoDataDoesNotCrash() throws Exception {
CompletableFuture<InstructionResponse> processBundleResponseFuture = new CompletableFuture<>();
when(fnApiControlClient.handle(any(BeamFnApi.InstructionRequest.class)))
.thenReturn(processBundleResponseFuture);
FullWindowedValueCoder<String> coder =
FullWindowedValueCoder.of(StringUtf8Coder.of(), Coder.INSTANCE);
BundleProcessor processor =
sdkHarnessClient.getProcessor(
descriptor,
Collections.singletonList(
RemoteInputDestination.of(
(FullWindowedValueCoder) coder, SDK_GRPC_READ_TRANSFORM)));
when(dataService.createOutboundAggregator(any(), anyBoolean()))
.thenReturn(mock(BeamFnDataOutboundAggregator.class));
try (RemoteBundle activeBundle =
processor.newBundle(Collections.emptyMap(), BundleProgressHandler.ignored())) {
// Correlating the ProcessBundleRequest and ProcessBundleResponse is owned by the underlying
// FnApiControlClient. The SdkHarnessClient owns just wrapping the request and unwrapping
// the response.
//
// Currently there are no fields so there's nothing to check. This test is formulated
// to match the pattern it should have if/when the response is meaningful.
BeamFnApi.ProcessBundleResponse response = ProcessBundleResponse.getDefaultInstance();
processBundleResponseFuture.complete(
BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response).build());
}
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void incompleteForgeInstallation6() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/incomplete_forge_installation6.txt")),
CrashReportAnalyzer.Rule.INCOMPLETE_FORGE_INSTALLATION);
} |
@Override
public int getDegree(int vertex) {
if (digraph) {
return getIndegree(vertex) + getOutdegree(vertex);
} else {
return getOutdegree(vertex);
}
} | @Test
public void testGetDegree() {
System.out.println("getDegree");
assertEquals(0, g1.getDegree(1));
assertEquals(2, g2.getDegree(1));
g2.addEdge(1, 1);
assertEquals(4, g2.getDegree(1));
assertEquals(4, g3.getDegree(1));
assertEquals(4, g3.getDegree(2));
assertEquals(4, g3.getDegree(3));
assertEquals(2, g4.getDegree(4));
assertEquals(0, g5.getDegree(1));
assertEquals(1, g6.getDegree(1));
g6.addEdge(1, 1);
assertEquals(2, g6.getDegree(1));
assertEquals(2, g7.getDegree(1));
assertEquals(2, g7.getDegree(2));
assertEquals(2, g7.getDegree(3));
assertEquals(2, g8.getDegree(4));
} |
@Override
@Deprecated
public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doFlatTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueWithKeySupplier() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransformValues(
flatValueTransformerWithKeySupplier,
(String[]) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array"));
} |
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (listClass != null || inner != null) {
log.error("Could not configure ListDeserializer as some parameters were already set -- listClass: {}, inner: {}", listClass, inner);
throw new ConfigException("List deserializer was already initialized using a non-default constructor");
}
configureListClass(configs, isKey);
configureInnerSerde(configs, isKey);
} | @Test
public void testListValueDeserializerNoArgConstructorsShouldThrowKafkaExceptionDueInvalidInnerClass() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS, ArrayList.class);
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, new FakeObject());
final KafkaException exception = assertThrows(
KafkaException.class,
() -> listDeserializer.configure(props, false)
);
assertEquals("Could not determine the inner serde class instance using "
+ "\"" + CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + "\" property.", exception.getMessage());
} |
@Override
public long connectionDelay(Node node, long now) {
return connectionStates.connectionDelay(node.idString(), now);
} | @Test
public void testConnectionDelayConnected() {
awaitReady(client, node);
long now = time.milliseconds();
long delay = client.connectionDelay(node, now);
assertEquals(Long.MAX_VALUE, delay);
} |
@POST
@Path("{nodeId}/reset")
@ApiOperation("Reset a removed node to rejoin the cluster")
@AuditEvent(type = DATANODE_RESET)
@RequiresPermissions(RestPermissions.DATANODE_RESET)
public DataNodeDto resetNode(@ApiParam(name = "nodeId", required = true) @PathParam("nodeId") String nodeId) {
try {
return dataNodeCommandService.resetNode(nodeId);
} catch (NodeNotFoundException e) {
throw new NotFoundException("Node " + nodeId + " not found");
}
} | @Test
public void verifyResetServiceCalled() throws NodeNotFoundException {
classUnderTest.resetNode(NODEID);
verify(dataNodeCommandService).resetNode(NODEID);
} |
public void schedule(BeanContainer container, String id, String cron, String interval, String zoneId, String className, String methodName, List<JobParameter> parameterList) {
JobScheduler scheduler = container.beanInstance(JobScheduler.class);
String jobId = getId(id);
String optionalCronExpression = getCronExpression(cron);
String optionalInterval = getInterval(interval);
if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Either cron or interval attribute is required.");
if (StringUtils.isNotNullOrEmpty(cron) && StringUtils.isNotNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed.");
if (Recurring.RECURRING_JOB_DISABLED.equals(optionalCronExpression) || Recurring.RECURRING_JOB_DISABLED.equals(optionalInterval)) {
if (isNullOrEmpty(jobId)) {
LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id.");
} else {
scheduler.deleteRecurringJob(jobId);
}
} else {
JobDetails jobDetails = new JobDetails(className, null, methodName, parameterList);
jobDetails.setCacheable(true);
if (isNotNullOrEmpty(optionalCronExpression)) {
scheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(optionalCronExpression), getZoneId(zoneId));
} else {
scheduler.scheduleRecurrently(id, jobDetails, new Interval(optionalInterval), getZoneId(zoneId));
}
}
} | @Test
void scheduleSchedulesCronJobWithJobRunr() {
final String id = "my-job-id";
final JobDetails jobDetails = jobDetails().build();
final String cron = "*/15 * * * *";
final String interval = null;
final String zoneId = null;
jobRunrRecurringJobRecorder.schedule(beanContainer, id, cron, interval, zoneId, jobDetails.getClassName(), jobDetails.getMethodName(), jobDetails.getJobParameters());
verify(jobScheduler).scheduleRecurrently(eq(id), jobDetailsArgumentCaptor.capture(), eq(CronExpression.create("*/15 * * * *")), eq(ZoneId.systemDefault()));
assertThat(jobDetailsArgumentCaptor.getValue())
.hasClassName(jobDetails.getClassName())
.hasMethodName(jobDetails.getMethodName())
.hasArgs(jobDetails.getJobParameterValues());
} |
public static String replaceInternalStorage(
RunContext runContext,
@Nullable String command,
boolean replaceWithRelativePath
) throws IOException {
if (command == null) {
return "";
}
return INTERNAL_STORAGE_PATTERN
.matcher(command)
.replaceAll(throwFunction(matchResult -> {
String localFile = saveOnLocalStorage(runContext, matchResult.group()).replace("\\", "/");
if (!replaceWithRelativePath) {
return localFile;
}
return runContext.workingDir().path().relativize(Path.of(localFile)).toString();
}));
} | @Test
void uploadInputFiles() throws IOException {
var runContext = runContextFactory.of();
Path path = Path.of("/tmp/unittest/file.txt");
if (!path.toFile().exists()) {
Files.createFile(path);
}
List<File> filesToDelete = new ArrayList<>();
String internalStorageUri = "kestra://some/file.txt";
try {
String wdir = "/my/wd";
var commands = ScriptService.replaceInternalStorage(
runContext,
Map.of("workingDir", wdir),
List.of(
"my command with an internal storage file: " + internalStorageUri,
"my command with some additional var usage: {{ workingDir }}"
),
false
);
assertThat(commands, not(empty()));
assertThat(commands.getFirst(), not(is("my command with an internal storage file: " + internalStorageUri)));
Matcher matcher = COMMAND_PATTERN_CAPTURE_LOCAL_PATH.matcher(commands.getFirst());
assertThat(matcher.matches(), is(true));
File file = Path.of(matcher.group(1)).toFile();
assertThat(file.exists(), is(true));
filesToDelete.add(file);
assertThat(commands.get(1), is("my command with some additional var usage: " + wdir));
commands = ScriptService.replaceInternalStorage(runContext, Collections.emptyMap(), List.of("my command with an internal storage file: " + internalStorageUri), true);
matcher = COMMAND_PATTERN_CAPTURE_LOCAL_PATH.matcher(commands.getFirst());
assertThat(matcher.matches(), is(true));
file = runContext.workingDir().resolve(Path.of(matcher.group(1))).toFile();
assertThat(file.exists(), is(true));
filesToDelete.add(file);
} catch (IllegalVariableEvaluationException e) {
throw new RuntimeException(e);
} finally {
filesToDelete.forEach(File::delete);
path.toFile().delete();
}
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testExpiryOfFirstBatchShouldCauseEpochBumpIfFutureBatchesFail() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0));
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce(); // send request
time.sleep(1000L);
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce(); // send request
assertEquals(2, client.inFlightRequestCount());
sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1);
sender.runOnce(); // receive first response
Node node = metadata.fetch().nodes().get(0);
time.sleep(1000L);
client.disconnect(node.idString());
client.backoff(node, 10);
sender.runOnce(); // now expire the first batch.
assertFutureFailure(request1, TimeoutException.class);
assertTrue(transactionManager.hasUnresolvedSequence(tp0));
// let's enqueue another batch, which should not be dequeued until the unresolved state is clear.
appendToAccumulator(tp0);
time.sleep(20);
assertFalse(request2.isDone());
sender.runOnce(); // send second request
sendIdempotentProducerResponse(1, tp0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, 1);
sender.runOnce(); // receive second response, the third request shouldn't be sent since we are in an unresolved state.
Deque<ProducerBatch> batches = accumulator.getDeque(tp0);
// The epoch should be bumped and the second request should be requeued
assertEquals(2, batches.size());
sender.runOnce();
assertEquals((short) 1, transactionManager.producerIdAndEpoch().epoch);
assertEquals(1, transactionManager.sequenceNumber(tp0));
assertFalse(transactionManager.hasUnresolvedSequence(tp0));
} |
public void pullMessage(final PullRequest pullRequest) {
final ProcessQueue processQueue = pullRequest.getProcessQueue();
if (processQueue.isDropped()) {
log.info("the pull request[{}] is dropped.", pullRequest.toString());
return;
}
pullRequest.getProcessQueue().setLastPullTimestamp(System.currentTimeMillis());
try {
this.makeSureStateOK();
} catch (MQClientException e) {
log.warn("pullMessage exception, consumer state not ok", e);
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
return;
}
if (this.isPause()) {
log.warn("consumer was paused, execute pull request later. instanceName={}, group={}", this.defaultMQPushConsumer.getInstanceName(), this.defaultMQPushConsumer.getConsumerGroup());
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_SUSPEND);
return;
}
long cachedMessageCount = processQueue.getMsgCount().get();
long cachedMessageSizeInMiB = processQueue.getMsgSize().get() / (1024 * 1024);
if (cachedMessageCount > this.defaultMQPushConsumer.getPullThresholdForQueue()) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_CACHE_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
"the cached message count exceeds the threshold {}, so do flow control, minOffset={}, maxOffset={}, count={}, size={} MiB, pullRequest={}, flowControlTimes={}",
this.defaultMQPushConsumer.getPullThresholdForQueue(), processQueue.getMsgTreeMap().firstKey(), processQueue.getMsgTreeMap().lastKey(), cachedMessageCount, cachedMessageSizeInMiB, pullRequest, queueFlowControlTimes);
}
return;
}
if (cachedMessageSizeInMiB > this.defaultMQPushConsumer.getPullThresholdSizeForQueue()) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_CACHE_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
"the cached message size exceeds the threshold {} MiB, so do flow control, minOffset={}, maxOffset={}, count={}, size={} MiB, pullRequest={}, flowControlTimes={}",
this.defaultMQPushConsumer.getPullThresholdSizeForQueue(), processQueue.getMsgTreeMap().firstKey(), processQueue.getMsgTreeMap().lastKey(), cachedMessageCount, cachedMessageSizeInMiB, pullRequest, queueFlowControlTimes);
}
return;
}
if (!this.consumeOrderly) {
if (processQueue.getMaxSpan() > this.defaultMQPushConsumer.getConsumeConcurrentlyMaxSpan()) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_CACHE_FLOW_CONTROL);
if ((queueMaxSpanFlowControlTimes++ % 1000) == 0) {
log.warn(
"the queue's messages, span too long, so do flow control, minOffset={}, maxOffset={}, maxSpan={}, pullRequest={}, flowControlTimes={}",
processQueue.getMsgTreeMap().firstKey(), processQueue.getMsgTreeMap().lastKey(), processQueue.getMaxSpan(),
pullRequest, queueMaxSpanFlowControlTimes);
}
return;
}
} else {
if (processQueue.isLocked()) {
if (!pullRequest.isPreviouslyLocked()) {
long offset = -1L;
try {
offset = this.rebalanceImpl.computePullFromWhereWithException(pullRequest.getMessageQueue());
if (offset < 0) {
throw new MQClientException(ResponseCode.SYSTEM_ERROR, "Unexpected offset " + offset);
}
} catch (Exception e) {
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
log.error("Failed to compute pull offset, pullResult: {}", pullRequest, e);
return;
}
boolean brokerBusy = offset < pullRequest.getNextOffset();
log.info("the first time to pull message, so fix offset from broker. pullRequest: {} NewOffset: {} brokerBusy: {}",
pullRequest, offset, brokerBusy);
if (brokerBusy) {
log.info("[NOTIFYME]the first time to pull message, but pull request offset larger than broker consume offset. pullRequest: {} NewOffset: {}",
pullRequest, offset);
}
pullRequest.setPreviouslyLocked(true);
pullRequest.setNextOffset(offset);
}
} else {
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
log.info("pull message later because not locked in broker, {}", pullRequest);
return;
}
}
final MessageQueue messageQueue = pullRequest.getMessageQueue();
final SubscriptionData subscriptionData = this.rebalanceImpl.getSubscriptionInner().get(messageQueue.getTopic());
if (null == subscriptionData) {
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
log.warn("find the consumer's subscription failed, {}", pullRequest);
return;
}
final long beginTimestamp = System.currentTimeMillis();
PullCallback pullCallback = new PullCallback() {
@Override
public void onSuccess(PullResult pullResult) {
if (pullResult != null) {
pullResult = DefaultMQPushConsumerImpl.this.pullAPIWrapper.processPullResult(pullRequest.getMessageQueue(), pullResult,
subscriptionData);
switch (pullResult.getPullStatus()) {
case FOUND:
long prevRequestOffset = pullRequest.getNextOffset();
pullRequest.setNextOffset(pullResult.getNextBeginOffset());
long pullRT = System.currentTimeMillis() - beginTimestamp;
DefaultMQPushConsumerImpl.this.getConsumerStatsManager().incPullRT(pullRequest.getConsumerGroup(),
pullRequest.getMessageQueue().getTopic(), pullRT);
long firstMsgOffset = Long.MAX_VALUE;
if (pullResult.getMsgFoundList() == null || pullResult.getMsgFoundList().isEmpty()) {
DefaultMQPushConsumerImpl.this.executePullRequestImmediately(pullRequest);
} else {
firstMsgOffset = pullResult.getMsgFoundList().get(0).getQueueOffset();
DefaultMQPushConsumerImpl.this.getConsumerStatsManager().incPullTPS(pullRequest.getConsumerGroup(),
pullRequest.getMessageQueue().getTopic(), pullResult.getMsgFoundList().size());
boolean dispatchToConsume = processQueue.putMessage(pullResult.getMsgFoundList());
DefaultMQPushConsumerImpl.this.consumeMessageService.submitConsumeRequest(
pullResult.getMsgFoundList(),
processQueue,
pullRequest.getMessageQueue(),
dispatchToConsume);
if (DefaultMQPushConsumerImpl.this.defaultMQPushConsumer.getPullInterval() > 0) {
DefaultMQPushConsumerImpl.this.executePullRequestLater(pullRequest,
DefaultMQPushConsumerImpl.this.defaultMQPushConsumer.getPullInterval());
} else {
DefaultMQPushConsumerImpl.this.executePullRequestImmediately(pullRequest);
}
}
if (pullResult.getNextBeginOffset() < prevRequestOffset
|| firstMsgOffset < prevRequestOffset) {
log.warn(
"[BUG] pull message result maybe data wrong, nextBeginOffset: {} firstMsgOffset: {} prevRequestOffset: {}",
pullResult.getNextBeginOffset(),
firstMsgOffset,
prevRequestOffset);
}
break;
case NO_NEW_MSG:
case NO_MATCHED_MSG:
pullRequest.setNextOffset(pullResult.getNextBeginOffset());
DefaultMQPushConsumerImpl.this.correctTagsOffset(pullRequest);
DefaultMQPushConsumerImpl.this.executePullRequestImmediately(pullRequest);
break;
case OFFSET_ILLEGAL:
log.warn("the pull request offset illegal, {} {}",
pullRequest.toString(), pullResult.toString());
pullRequest.setNextOffset(pullResult.getNextBeginOffset());
pullRequest.getProcessQueue().setDropped(true);
DefaultMQPushConsumerImpl.this.executeTask(new Runnable() {
@Override
public void run() {
try {
DefaultMQPushConsumerImpl.this.offsetStore.updateAndFreezeOffset(pullRequest.getMessageQueue(),
pullRequest.getNextOffset());
DefaultMQPushConsumerImpl.this.offsetStore.persist(pullRequest.getMessageQueue());
// removeProcessQueue will also remove offset to cancel the frozen status.
DefaultMQPushConsumerImpl.this.rebalanceImpl.removeProcessQueue(pullRequest.getMessageQueue());
DefaultMQPushConsumerImpl.this.rebalanceImpl.getmQClientFactory().rebalanceImmediately();
log.warn("fix the pull request offset, {}", pullRequest);
} catch (Throwable e) {
log.error("executeTaskLater Exception", e);
}
}
});
break;
default:
break;
}
}
}
@Override
public void onException(Throwable e) {
if (!pullRequest.getMessageQueue().getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
if (e instanceof MQBrokerException && ((MQBrokerException) e).getResponseCode() == ResponseCode.SUBSCRIPTION_NOT_LATEST) {
log.warn("the subscription is not latest, group={}, messageQueue={}", groupName(), messageQueue);
} else {
log.warn("execute the pull request exception, group={}, messageQueue={}", groupName(), messageQueue, e);
}
}
if (e instanceof MQBrokerException && ((MQBrokerException) e).getResponseCode() == ResponseCode.FLOW_CONTROL) {
DefaultMQPushConsumerImpl.this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_BROKER_FLOW_CONTROL);
} else {
DefaultMQPushConsumerImpl.this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
}
}
};
boolean commitOffsetEnable = false;
long commitOffsetValue = 0L;
if (MessageModel.CLUSTERING == this.defaultMQPushConsumer.getMessageModel()) {
commitOffsetValue = this.offsetStore.readOffset(pullRequest.getMessageQueue(), ReadOffsetType.READ_FROM_MEMORY);
if (commitOffsetValue > 0) {
commitOffsetEnable = true;
}
}
String subExpression = null;
boolean classFilter = false;
SubscriptionData sd = this.rebalanceImpl.getSubscriptionInner().get(pullRequest.getMessageQueue().getTopic());
if (sd != null) {
if (this.defaultMQPushConsumer.isPostSubscriptionWhenPull() && !sd.isClassFilterMode()) {
subExpression = sd.getSubString();
}
classFilter = sd.isClassFilterMode();
}
int sysFlag = PullSysFlag.buildSysFlag(
commitOffsetEnable, // commitOffset
true, // suspend
subExpression != null, // subscription
classFilter // class filter
);
try {
this.pullAPIWrapper.pullKernelImpl(
pullRequest.getMessageQueue(),
subExpression,
subscriptionData.getExpressionType(),
subscriptionData.getSubVersion(),
pullRequest.getNextOffset(),
this.defaultMQPushConsumer.getPullBatchSize(),
this.defaultMQPushConsumer.getPullBatchSizeInBytes(),
sysFlag,
commitOffsetValue,
BROKER_SUSPEND_MAX_TIME_MILLIS,
CONSUMER_TIMEOUT_MILLIS_WHEN_SUSPEND,
CommunicationMode.ASYNC,
pullCallback
);
} catch (Exception e) {
log.error("pullKernelImpl exception", e);
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
}
} | @Test
public void testPullMessageWithStateNotOk() {
when(pullRequest.getProcessQueue()).thenReturn(processQueue);
defaultMQPushConsumerImpl.pullMessage(pullRequest);
} |
@Override
public int getGlyphId(int characterCode)
{
Integer glyphId = characterCodeToGlyphId.get(characterCode);
return glyphId == null ? 0 : glyphId;
} | @Test
void testVerticalSubstitution() throws IOException
{
File ipaFont = new File("target/fonts/ipag00303", "ipag.ttf");
TrueTypeFont ttf = new TTFParser().parse(new RandomAccessReadBufferedFile(ipaFont));
CmapLookup unicodeCmapLookup1 = ttf.getUnicodeCmapLookup();
int hgid1 = unicodeCmapLookup1.getGlyphId('γ');
int hgid2 = unicodeCmapLookup1.getGlyphId('γ');
ttf.enableVerticalSubstitutions();
CmapLookup unicodeCmapLookup2 = ttf.getUnicodeCmapLookup();
int vgid1 = unicodeCmapLookup2.getGlyphId('γ');
int vgid2 = unicodeCmapLookup2.getGlyphId('γ');
System.out.println(hgid1 + " " + hgid2);
System.out.println(vgid1 + " " + vgid2);
Assertions.assertEquals(441, hgid1);
Assertions.assertEquals(442, hgid2);
Assertions.assertEquals(7392, vgid1);
Assertions.assertEquals(7393, vgid2);
} |
@Override
public void trackAppInstall(JSONObject properties, boolean disableCallback) {
} | @Test
public void trackAppInstall() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackAppInstall();
} |
public static void main(String[] args) {
// simple troll
LOGGER.info("A simple looking troll approaches.");
var troll = new SimpleTroll();
troll.attack();
troll.fleeBattle();
LOGGER.info("Simple troll power: {}.\n", troll.getAttackPower());
// change the behavior of the simple troll by adding a decorator
LOGGER.info("A troll with huge club surprises you.");
var clubbedTroll = new ClubbedTroll(troll);
clubbedTroll.attack();
clubbedTroll.fleeBattle();
LOGGER.info("Clubbed troll power: {}.\n", clubbedTroll.getAttackPower());
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public static void processElectLeadersResult(ElectLeadersResult result, Set<TopicPartition> deletedTopicPartitions) {
if (result == null) {
return;
}
try {
Map<TopicPartition, Optional<Throwable>> partitions = result.partitions().get();
Set<TopicPartition> noElectionNeeded = new HashSet<>();
Set<TopicPartition> preferredLeaderUnavailable = new HashSet<>();
for (Map.Entry<TopicPartition, Optional<Throwable>> entry : partitions.entrySet()) {
TopicPartition tp = entry.getKey();
if (entry.getValue().isEmpty()) {
LOG.debug("Leader election for {} has succeeded.", tp);
} else {
if (Errors.ELECTION_NOT_NEEDED.exception().getClass() == entry.getValue().get().getClass()) {
// The leader is already the preferred leader.
noElectionNeeded.add(tp);
} else if (Errors.UNKNOWN_TOPIC_OR_PARTITION.exception().getClass() == entry.getValue().get().getClass()
|| Errors.INVALID_TOPIC_EXCEPTION.exception().getClass() == entry.getValue().get().getClass()) {
// Topic (1) has been deleted -- i.e. since partition does not exist, it is assumed to be deleted or (2) is being deleted.
deletedTopicPartitions.add(tp);
} else if (Errors.PREFERRED_LEADER_NOT_AVAILABLE.exception().getClass() == entry.getValue().get().getClass()) {
// We tried to elect the preferred leader but it is offline.
preferredLeaderUnavailable.add(tp);
} else {
// Based on the KafkaAdminClient code, looks like there is no handling / retry in response to a Errors.NOT_CONTROLLER -- e.g. if
// the controller changes during a request, the leader election will be dropped with an error response. In such cases, a
// followup execution would be needed (i.e. see Executor#maybeReexecuteLeadershipTasks(Set<TopicPartition>)).
LOG.warn("Failed to elect preferred leader for {}.", tp, entry.getValue().get());
}
}
}
// Log relevant information for debugging purposes.
if (!noElectionNeeded.isEmpty()) {
LOG.debug("Leader election not needed for {}.", noElectionNeeded);
}
if (!preferredLeaderUnavailable.isEmpty()) {
LOG.debug("The preferred leader was not available for {}.", preferredLeaderUnavailable);
}
if (!deletedTopicPartitions.isEmpty()) {
LOG.debug("Corresponding topics have been deleted before leader election {}.", deletedTopicPartitions);
}
} catch (ExecutionException ee) {
if (Errors.REQUEST_TIMED_OUT.exception().getClass() == ee.getCause().getClass()) {
throw new IllegalStateException(String.format("electLeaders request timed out. Check for Kafka broker- or controller-side issues and"
+ " consider increasing the configured timeout (see %s).",
ExecutorConfig.ADMIN_CLIENT_REQUEST_TIMEOUT_MS_CONFIG), ee.getCause());
} else if (Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass() == ee.getCause().getClass()) {
throw new IllegalStateException("Cruise Control is not authorized to trigger leader election", ee.getCause());
} else {
// Not expected to happen.
throw new IllegalStateException("An unknown execution exception is encountered in response to electLeaders.", ee.getCause());
}
} catch (InterruptedException e) {
LOG.warn("Interrupted during the process of ElectLeadersResult.", e);
}
} | @Test
public void testProcessElectLeadersResult() throws Exception {
// Case 1: Handle null result input. Expect no side effect.
ExecutionUtils.processElectLeadersResult(null, Collections.emptySet());
KafkaFutureImpl<Map<TopicPartition, Optional<Throwable>>> partitions = EasyMock.mock(KafkaFutureImpl.class);
Constructor<ElectLeadersResult> constructor = null;
try {
constructor = ElectLeadersResult.class.getDeclaredConstructor(KafkaFuture.class);
} catch (NoSuchMethodException e) {
LOG.debug("Unable to find Kafka 3.0+ constructor for ElectLeaderResult class", e);
}
if (constructor == null) {
try {
constructor = ElectLeadersResult.class.getDeclaredConstructor(KafkaFutureImpl.class);
} catch (NoSuchMethodException e) {
LOG.debug("Unable to find Kafka 3.0- constructor for ElectLeaderResult class", e);
}
}
if (constructor == null) {
throw new NoSuchElementException("Unable to find viable constructor for the ElectionLeadersResult class");
}
constructor.setAccessible(true);
ElectLeadersResult result;
// Case 2: Handle both partitions are successful
// Case 3: Handle one partition successful, the other has ElectionNotNeededException
// Case 4: Handle one partition successful, the other has PreferredLeaderNotAvailableException
// Case 5: Handle one partition successful, the other has NotControllerException
for (Map<TopicPartition, Optional<Throwable>> entry : Set.of(SUCCESSFUL_PARTITIONS, ONE_WITH_ENN, ONE_WITH_PLNA, ONE_WITH_NC)) {
result = constructor.newInstance(partitions);
EasyMock.expect(partitions.get()).andReturn(entry).once();
EasyMock.replay(partitions);
ExecutionUtils.processElectLeadersResult(result, Collections.emptySet());
EasyMock.verify(partitions);
EasyMock.reset(partitions);
}
// Case 6: Handle one partition successful, the other has UnknownTopicOrPartitionException
// Case 7: Handle one partition successful, the other has InvalidTopicException
for (Map<TopicPartition, Optional<Throwable>> entry : Set.of(ONE_WITH_UTOP, ONE_WITH_IT)) {
result = constructor.newInstance(partitions);
EasyMock.expect(partitions.get()).andReturn(entry).once();
EasyMock.replay(partitions);
Set<TopicPartition> deletedTopicPartitions = new HashSet<>();
ExecutionUtils.processElectLeadersResult(result, deletedTopicPartitions);
Assert.assertEquals(1, deletedTopicPartitions.size());
Assert.assertEquals(P1, deletedTopicPartitions.iterator().next());
EasyMock.verify(partitions);
EasyMock.reset(partitions);
}
// Case 8: Handle execution timeout exception. Expect no side effect.
// Case 9: Handle execution ClusterAuthorization exception. Expect no side effect.
// Case 10: Handle unexpected execution exception (i.e. ControllerMovedException). Expect no side effect.
for (Throwable entry : Set.of(new org.apache.kafka.common.errors.TimeoutException(),
new org.apache.kafka.common.errors.ClusterAuthorizationException(""),
new org.apache.kafka.common.errors.ControllerMovedException(""))) {
result = constructor.newInstance(partitions);
EasyMock.expect(partitions.get()).andThrow(new ExecutionException(entry)).once();
EasyMock.replay(partitions);
ElectLeadersResult exceptionResult = result;
Exception thrownException = assertThrows(IllegalStateException.class,
() -> ExecutionUtils.processElectLeadersResult(exceptionResult, Collections.emptySet()));
Assert.assertEquals(entry, thrownException.getCause());
EasyMock.verify(partitions);
EasyMock.reset(partitions);
}
// Case 11: Handle future wait interrupted exception
result = constructor.newInstance(partitions);
EasyMock.expect(partitions.get()).andThrow(new InterruptedException()).once();
EasyMock.replay(partitions);
ExecutionUtils.processElectLeadersResult(result, Collections.emptySet());
EasyMock.verify(partitions);
EasyMock.reset(partitions);
} |
public String managementAddress() {
return get(MANAGEMENT_ADDRESS, null);
} | @Test
public void testSetManagementAddress() {
SW_BDC.managementAddress(MANAGEMENT_ADDRESS_NEW);
assertEquals("Incorrect managementAddress", MANAGEMENT_ADDRESS_NEW, SW_BDC.managementAddress());
} |
@Override
public Mono<SaveResult> save(Publisher<DictionaryItemEntity> entityPublisher) {
return super.save(this.fillOrdinal(entityPublisher))
.doOnSuccess(r -> eventPublisher.publishEvent(ClearDictionaryCacheEvent.of()));
} | @Test
public void testAutoOrdinal() {
//θͺε¨ε‘«ε
ordinal
DictionaryItemEntity itemEntity = createItem("test-auto");
itemEntity.setOrdinal(null);
DictionaryItemEntity itemEntity2 = createItem("test-auto");
itemEntity2.setOrdinal(null);
defaultDictionaryItemService
.save(Flux.just(itemEntity, itemEntity2))
.then()
.as(StepVerifier::create)
.expectComplete()
.verify();
defaultDictionaryItemService
.query(QueryParamEntity.of("value","test-auto").noPaging())
.doOnNext(System.out::println)
.count()
.as(StepVerifier::create)
.expectNext(2L)
.verifyComplete();
} |
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
} | @Test
public void linearSingle() {
// 0 - 1
g.edge(0, 1).setDistance(1).set(speedEnc, 10, 10);
ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false);
assertEquals(2, result.getEdgeKeys());
assertEquals(1, result.getTotalComponents());
assertEquals(1, result.getComponents().size());
assertTrue(result.getSingleEdgeComponents().isEmpty());
assertEquals(result.getComponents().get(0), result.getBiggestComponent());
assertEquals(IntArrayList.from(1, 0), result.getComponents().get(0));
} |
@Override
public boolean removeAll(Collection<?> c) {
return get(removeAllAsync(c));
} | @Test
public void testRemoveAll() {
RSetCache<Integer> set = redisson.getSetCache("set");
set.add(1);
set.add(2, 10, TimeUnit.SECONDS);
set.add(3);
assertThat(set.removeAll(Arrays.asList(1, 3))).isTrue();
assertThat(set.removeAll(Arrays.asList(1, 3))).isFalse();
assertThat(set).containsOnly(2);
set.destroy();
} |
Mono<Post> getById(UUID id) {
return client.get()
.uri(uriBuilder -> uriBuilder.path("/posts/{id}").build(id))
.accept(MediaType.APPLICATION_JSON)
.exchangeToMono(response -> {
if (response.statusCode().equals(HttpStatus.OK)) {
return response.bodyToMono(Post.class);
}
return response.createError();
});
} | @SneakyThrows
@Test
public void testGetPostById() {
var id = UUID.randomUUID();
var data = new Post(id, "title1", "content1", Status.DRAFT, LocalDateTime.now());
stubFor(get("/posts/" + id)
.willReturn(
aResponse()
.withHeader("Content-Type", "application/json")
.withResponseBody(Body.fromJsonBytes(Json.toByteArray(data)))
)
);
postClient.getById(id)
.as(StepVerifier::create)
.consumeNextWith(
post -> {
assertThat(post.id()).isEqualTo(id);
assertThat(post.title()).isEqualTo(data.title());
assertThat(post.content()).isEqualTo(data.content());
assertThat(post.status()).isEqualTo(data.status());
assertThat(post.createdAt()).isEqualTo(data.createdAt());
}
)
.verifyComplete();
verify(getRequestedFor(urlEqualTo("/posts/" + id))
.withHeader("Accept", equalTo("application/json"))
);
} |
void addHits(CounterRequest counterRequest) {
if (counterRequest.getHits() > 0) {
// clone pour Γͺtre thread-safe ici
final CounterRequest newRequest = counterRequest.clone();
final CounterRequest request = getCounterRequestInternal(newRequest.getName());
synchronized (request) {
request.addHits(newRequest);
}
}
} | @Test
public void testAddHits() {
final CounterRequest counterRequest = createCounterRequest();
counter.addHits(counterRequest);
final List<CounterRequest> before = counter.getOrderedRequests();
counter.addHits(counterRequest);
counter.addHits(new CounterRequest("test", counter.getName()));
final List<CounterRequest> after = counter.getOrderedRequests();
after.get(0).removeHits(counterRequest);
// on teste le contenu des CounterRequest par le contenu de toString
assertEquals("requests", before.toString(), after.toString());
// on remet counterRequests.getHits() Γ 0
counterRequest.removeHits(counterRequest);
// on teste l'ajout de hits avec counterRequests Γ 0 hit(s)
counter.addHits(counterRequest);
} |
public static String getLocalhostStr() {
InetAddress localhost = getLocalhost();
if (null != localhost) {
return localhost.getHostAddress();
}
return null;
} | @Test
@Disabled
public void getLocalhostStrTest() {
final String localhost = NetUtil.getLocalhostStr();
assertNotNull(localhost);
} |
@Operation(summary = "unauthorizedUser", description = "UNAUTHORIZED_USER_NOTES")
@Parameters({
@Parameter(name = "alertgroupId", description = "ALERT_GROUP_ID", required = true, schema = @Schema(implementation = String.class))
})
@GetMapping(value = "/unauth-user")
@ResponseStatus(HttpStatus.OK)
@ApiException(UNAUTHORIZED_USER_ERROR)
public Result unauthorizedUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("alertgroupId") Integer alertgroupId) {
Map<String, Object> result = usersService.unauthorizedUser(loginUser, alertgroupId);
return returnDataList(result);
} | @Disabled
@Test
public void testUnauthorizedUser() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("alertgroupId", "1");
MvcResult mvcResult = mockMvc.perform(get("/users/unauth-user")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
} |
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof DependencyVersion)) {
return false;
}
if (this == obj) {
return true;
}
final DependencyVersion other = (DependencyVersion) obj;
final int minVersionMatchLength = Math.min(this.versionParts.size(), other.versionParts.size());
final int maxVersionMatchLength = Math.max(this.versionParts.size(), other.versionParts.size());
if (minVersionMatchLength == 1 && maxVersionMatchLength >= 3) {
return false;
}
//TODO steal better version of code from compareTo
for (int i = 0; i < minVersionMatchLength; i++) {
final String thisPart = this.versionParts.get(i);
final String otherPart = other.versionParts.get(i);
if (!thisPart.equals(otherPart)) {
return false;
}
}
if (this.versionParts.size() > minVersionMatchLength) {
for (int i = minVersionMatchLength; i < this.versionParts.size(); i++) {
if (!"0".equals(this.versionParts.get(i))) {
return false;
}
}
}
if (other.versionParts.size() > minVersionMatchLength) {
for (int i = minVersionMatchLength; i < other.versionParts.size(); i++) {
if (!"0".equals(other.versionParts.get(i))) {
return false;
}
}
}
/*
* if (this.versionParts != other.versionParts && (this.versionParts == null || !this.versionParts.equals(other.versionParts))) {
* return false;
* }
*/
return true;
} | @Test
public void testEquals() {
DependencyVersion obj = new DependencyVersion("1.2.3.r1");
DependencyVersion instance = new DependencyVersion("1.2.3");
boolean expResult = false;
boolean result = instance.equals(obj);
assertEquals(expResult, result);
obj = new DependencyVersion("1.2.3");
expResult = true;
result = instance.equals(obj);
assertEquals(expResult, result);
instance = new DependencyVersion("2.0.0");
obj = new DependencyVersion("2");
expResult = false;
result = instance.equals(obj);
assertEquals(expResult, result);
obj = new DependencyVersion("2.0");
expResult = true;
result = instance.equals(obj);
assertEquals(expResult, result);
} |
public final void fillContext(String dataId, String group) {
this.dataId = dataId;
this.group = group;
} | @Test
void testFillContext() {
assertEquals(0, receivedMap.size());
MockShardListener listener = new MockShardListener();
listener.receiveConfigInfo(CONFIG_CONTENT);
assertEquals(2, receivedMap.size());
assertNull(receivedMap.get("group"));
assertNull(receivedMap.get("dataId"));
listener.fillContext("aaa", "ggg");
listener.receiveConfigInfo(CONFIG_CONTENT);
assertEquals(2, receivedMap.size());
assertEquals("ggg", receivedMap.get("group"));
assertEquals("aaa", receivedMap.get("dataId"));
} |
@Override
protected void decode(ChannelHandlerContext context, ByteBuf in, List<Object> out) throws Exception {
if (in.readableBytes() < SslUtils.SSL_RECORD_HEADER_LENGTH) {
return;
}
if (SslHandler.isEncrypted(in)) {
handleSsl(context);
} else {
handleNonSsl(context);
}
} | @Test
public void handlerReplaced() throws Exception {
final ChannelHandler nonSslHandler = Mockito.mock(ChannelHandler.class);
OptionalSslHandler handler = new OptionalSslHandler(sslContext) {
@Override
protected ChannelHandler newNonSslHandler(ChannelHandlerContext context) {
return nonSslHandler;
}
@Override
protected String newNonSslHandlerName() {
return HANDLER_NAME;
}
};
final ByteBuf payload = Unpooled.copiedBuffer("plaintext".getBytes());
try {
handler.decode(context, payload, null);
verify(pipeline).replace(handler, HANDLER_NAME, nonSslHandler);
} finally {
payload.release();
}
} |
public Optional<Number> evaluate(final List<KiePMMLDefineFunction> defineFunctions,
final List<KiePMMLDerivedField> derivedFields,
final List<KiePMMLOutputField> outputFields,
final Map<String, Object> inputData,
final PMMLRuntimeContext context,
final Number initialScore,
final REASONCODE_ALGORITHM reasoncodeAlgorithm,
final boolean useReasonCodes,
final Number baselineScore) {
Number accumulator = null;
for (KiePMMLCharacteristic characteristic : characteristics) {
final KiePMMLCharacteristic.ReasonCodeValue evaluation = characteristic.evaluate(defineFunctions,
derivedFields,
outputFields, inputData);
if (evaluation != null) {
final Number evaluationScore = evaluation.getScore();
if (accumulator == null) {
accumulator = initialScore != null ? initialScore : 0;
}
accumulator = addNumbers(accumulator, evaluationScore);
if (useReasonCodes && evaluation.getReasonCode() != null) {
populateReasonCodes(evaluation, characteristic, reasoncodeAlgorithm, context.getOutputFieldsMap()
, baselineScore);
}
}
}
return Optional.ofNullable(accumulator);
} | @Test
void evaluate() {
Double initialScore = 25.23;
KiePMMLCharacteristics kiePMMLCharacteristics = new KiePMMLCharacteristics("NAME", Collections.emptyList(),
getKiePMMLCharacteristicList());
PMMLRuntimeContextTest pmmlContextTest = new PMMLRuntimeContextTest();
Optional<Number> retrieved = kiePMMLCharacteristics.evaluate(Collections.emptyList(), Collections.emptyList()
, Collections.emptyList(), Collections.emptyMap(),
pmmlContextTest,
initialScore,
REASONCODE_ALGORITHM.POINTS_BELOW,
true,
0);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isPresent();
Double EVALUATION_20 = baselineScore - value2;
Double EVALUATION_11 = baselineScore - value1;
Double expected = initialScore + value2 + value1 + 1;
assertThat(retrieved.get()).isEqualTo(expected);
final Map<String, Object> outputFieldsMap = pmmlContextTest.getOutputFieldsMap();
assertThat(outputFieldsMap).hasSize(2);
assertThat(outputFieldsMap).containsKey("REASON_CODE_20");
assertThat(outputFieldsMap.get("REASON_CODE_20")).isEqualTo(EVALUATION_20);
assertThat(outputFieldsMap).containsKey("REASON_CODE_11");
assertThat(outputFieldsMap.get("REASON_CODE_11")).isEqualTo(EVALUATION_11);
} |
public Optional<YamlRuleConfiguration> swapToYamlRuleConfiguration(final Collection<RepositoryTuple> repositoryTuples, final Class<? extends YamlRuleConfiguration> toBeSwappedType) {
RepositoryTupleEntity tupleEntity = toBeSwappedType.getAnnotation(RepositoryTupleEntity.class);
if (null == tupleEntity) {
return Optional.empty();
}
return tupleEntity.leaf()
? swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, tupleEntity)
: swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, getFields(toBeSwappedType));
} | @Test
void assertSwapToYamlRuleConfigurationWithEmptyNodeYamlRuleConfiguration() {
Optional<YamlRuleConfiguration> actual = new RepositoryTupleSwapperEngine().swapToYamlRuleConfiguration(
Collections.singleton(new RepositoryTuple("/metadata/foo_db/rules/node/string_value/versions/0", "")), NodeYamlRuleConfiguration.class);
assertTrue(actual.isPresent());
NodeYamlRuleConfiguration actualYamlConfig = (NodeYamlRuleConfiguration) actual.get();
assertThat(actualYamlConfig.getStringValue(), is(""));
} |
@ConstantFunction(name = "int_divide", argTypes = {SMALLINT, SMALLINT}, returnType = SMALLINT)
public static ConstantOperator intDivideSmallInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createSmallInt((short) (first.getSmallint() / second.getSmallint()));
} | @Test
public void intDivideSmallInt() {
assertEquals(1, ScalarOperatorFunctions.intDivideSmallInt(O_SI_10, O_SI_10).getSmallint());
} |
public static Timestamp previous(Timestamp timestamp) {
if (timestamp.equals(Timestamp.MIN_VALUE)) {
return timestamp;
}
final int nanos = timestamp.getNanos();
final long seconds = timestamp.getSeconds();
if (nanos - 1 >= 0) {
return Timestamp.ofTimeSecondsAndNanos(seconds, nanos - 1);
} else {
return Timestamp.ofTimeSecondsAndNanos(seconds - 1, NANOS_PER_SECOND - 1);
}
} | @Test
public void testPreviousDecrementsNanosWhenPossible() {
assertEquals(
Timestamp.ofTimeSecondsAndNanos(10L, 0),
TimestampUtils.previous(Timestamp.ofTimeSecondsAndNanos(10L, 1)));
} |
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
} | @Test
public void testThrottlingDiscovery() throws Exception {
// create 10 splits
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 10, 1);
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT)
// discover one snapshot at a time
.maxPlanningSnapshotCount(1)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register reader-2, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// add splits[0] to the planner for next discovery
splitPlanner.addSplits(Arrays.asList(splits.get(0)));
enumeratorContext.triggerAllActions();
// because discovered split was assigned to reader, pending splits should be empty
assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty();
// split assignment to reader-2 should contain splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// add the remaining 9 splits (one for every snapshot)
// run discovery cycles while reader-2 still processing the splits[0]
for (int i = 1; i < 10; ++i) {
splitPlanner.addSplits(Arrays.asList(splits.get(i)));
enumeratorContext.triggerAllActions();
}
// can only discover up to 3 snapshots/splits
assertThat(enumerator.snapshotState(2).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// now reader-2 finished splits[0]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(0).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned splits[1] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(3).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// run 3 more split discovery cycles
for (int i = 0; i < 3; ++i) {
enumeratorContext.triggerAllActions();
}
// no more splits are discovered due to throttling
assertThat(enumerator.snapshotState(4).pendingSplits()).hasSize(3);
// split assignment to reader-2 should still be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// now reader-2 finished splits[1]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(1).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned new splits[2] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(5).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 3)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 3));
} |
@Override
protected boolean hasPluginInfo() {
return metadataStore().getPluginInfo(getPluginId()) != null;
} | @Test
public void shouldReturnFalseIfPluginInfoIsDefined() {
final ArtifactStore artifactStore = new ArtifactStore("id", "plugin_id");
assertFalse(artifactStore.hasPluginInfo());
} |
public static String generateReleaseKey(Namespace namespace) {
return generate(namespace.getAppId(), namespace.getClusterName(), namespace.getNamespaceName());
} | @Test
public void testGenerateReleaseKey() throws Exception {
String someAppId = "someAppId";
String someCluster = "someCluster";
String someNamespace = "someNamespace";
String anotherAppId = "anotherAppId";
Namespace namespace = MockBeanFactory.mockNamespace(someAppId, someCluster, someNamespace);
Namespace anotherNamespace = MockBeanFactory.mockNamespace(anotherAppId, someCluster, someNamespace);
int generateTimes = 50000;
Set<String> releaseKeys = Sets.newConcurrentHashSet();
ExecutorService executorService = Executors.newFixedThreadPool(2);
CountDownLatch latch = new CountDownLatch(1);
executorService.submit(generateReleaseKeysTask(namespace, releaseKeys, generateTimes, latch));
executorService.submit(generateReleaseKeysTask(anotherNamespace, releaseKeys, generateTimes, latch));
latch.countDown();
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.SECONDS);
//make sure keys are unique
assertEquals(generateTimes * 2, releaseKeys.size());
} |
void fetchPluginSettingsMetaData(GoPluginDescriptor pluginDescriptor) {
String pluginId = pluginDescriptor.id();
List<ExtensionSettingsInfo> allMetadata = findSettingsAndViewOfAllExtensionsIn(pluginId);
List<ExtensionSettingsInfo> validMetadata = allSettingsAndViewPairsWhichAreValid(allMetadata);
if (validMetadata.isEmpty()) {
LOGGER.warn("Failed to fetch plugin settings metadata for plugin {}. Maybe the plugin does not implement plugin settings and view?", pluginId);
LOGGER.warn("Plugin: {} - Metadata load info: {}", pluginId, allMetadata);
LOGGER.warn("Not all plugins are required to implement the request above. This error may be safe to ignore.");
return;
}
if (validMetadata.size() > 1) {
throw new RuntimeException(String.format("Plugin with ID: %s has more than one extension which supports plugin settings. " +
"Only one extension should support it and respond to %s and %s.", pluginId, REQUEST_PLUGIN_SETTINGS_CONFIGURATION, REQUEST_PLUGIN_SETTINGS_VIEW));
}
ExtensionSettingsInfo extensionSettingsInfo = validMetadata.get(0);
metadataStore.addMetadataFor(pluginId, extensionSettingsInfo.extensionName, extensionSettingsInfo.configuration, extensionSettingsInfo.viewTemplate);
} | @Test
public void shouldFailWhenAPluginWithMultipleExtensionsHasMoreThanOneExtensionRespondingWithSettings() {
PluginSettingsConfiguration configuration = new PluginSettingsConfiguration();
configuration.add(new PluginSettingsProperty("k1").with(Property.REQUIRED, true).with(Property.SECURE, false));
String pluginId = "plugin-id";
GoPluginDescriptor pluginDescriptor = GoPluginDescriptor.builder().id(pluginId).build();
setupSettingsResponses(notificationExtension, pluginId, configuration, "view");
setupSettingsResponses(packageRepositoryExtension, pluginId, configuration, "view");
assertThatThrownBy(() -> metadataLoader.fetchPluginSettingsMetaData(pluginDescriptor))
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Plugin with ID: plugin-id has more than one extension which supports plugin settings");
assertThat(PluginSettingsMetadataStore.getInstance().hasPlugin(pluginDescriptor.id())).isFalse();
} |
public static <T> String join(final String... elements) {
return join(elements, EMPTY_STRING);
} | @Test
public void testStringJoinWithJavaImpl() {
assertNull(StringUtils.join(",", null));
assertEquals("", String.join(",", Collections.singletonList("")));
assertEquals(",", String.join(",", Arrays.asList("", "")));
assertEquals("a,", String.join(",", Arrays.asList("a", "")));
} |
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments(
final MethodCallExpr methodExpression,
final MvelCompilerContext mvelCompilerContext,
final Optional<TypedExpression> scope,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!");
Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
} else {
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
return resolveMethodResult;
} else {
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList);
return resolveMethodResult;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No method found, return empty.
return new Pair<>(Optional.empty(), scope);
}
}
} | @Test
public void resolveMethodWithEmptyCollectionArgumentsEmptyCollectionIndexesAreNull() {
Assertions.assertThatThrownBy(
() -> MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments(
new MethodCallExpr(),
new MvelCompilerContext(null),
Optional.empty(),
Collections.emptyList(),
null))
.isInstanceOf(NullPointerException.class);
} |
void registerSelfToCluster(String groupId, PeerId selfIp, Configuration conf) {
while (!isShutdown) {
try {
List<PeerId> peerIds = cliService.getPeers(groupId, conf);
if (peerIds.contains(selfIp)) {
return;
}
Status status = cliService.addPeer(groupId, conf, selfIp);
if (status.isOk()) {
return;
}
Loggers.RAFT.warn("Failed to join the cluster, retry...");
} catch (Exception e) {
Loggers.RAFT.error("Failed to join the cluster, retry...", e);
}
ThreadUtils.sleep(1_000L);
}
} | @Test
void testRegisterSelfToCluster() {
PeerId selfPeerId = new PeerId("4.4.4.4", 8080);
server.registerSelfToCluster(groupId, selfPeerId, conf);
verify(cliServiceMock).addPeer(groupId, conf, selfPeerId);
} |
public ConsumerFilterData get(final String topic, final String consumerGroup) {
if (!this.filterDataByTopic.containsKey(topic)) {
return null;
}
if (this.filterDataByTopic.get(topic).getGroupFilterData().isEmpty()) {
return null;
}
return this.filterDataByTopic.get(topic).getGroupFilterData().get(consumerGroup);
} | @Test
public void testPersist() {
ConsumerFilterManager filterManager = gen(10, 10);
try {
filterManager.persist();
ConsumerFilterData filterData = filterManager.get("topic9", "CID_9");
assertThat(filterData).isNotNull();
assertThat(filterData.isDead()).isFalse();
ConsumerFilterManager loadFilter = new ConsumerFilterManager();
assertThat(loadFilter.load()).isTrue();
filterData = loadFilter.get("topic9", "CID_9");
assertThat(filterData).isNotNull();
assertThat(filterData.isDead()).isTrue();
assertThat(filterData.getCompiledExpression()).isNotNull();
} finally {
UtilAll.deleteFile(new File("./unit_test"));
}
} |
public static LockTime unset() {
return LockTime.ofBlockHeight(0);
} | @Test
public void unset() {
LockTime unset = LockTime.unset();
assertTrue(unset instanceof HeightLock);
assertEquals(0, unset.rawValue());
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
if (args.length > 0) {
return "Unsupported parameter " + Arrays.toString(args) + " for pwd.";
}
String service =
commandContext.getRemote().attr(ChangeTelnet.SERVICE_KEY).get();
StringBuilder buf = new StringBuilder();
if (StringUtils.isEmpty(service)) {
buf.append('/');
} else {
buf.append(service);
}
return buf.toString();
} | @Test
void testService() throws RemotingException {
defaultAttributeMap
.attr(ChangeTelnet.SERVICE_KEY)
.set("org.apache.dubbo.rpc.protocol.dubbo.support.DemoService");
String result = pwdTelnet.execute(mockCommandContext, new String[0]);
assertEquals("org.apache.dubbo.rpc.protocol.dubbo.support.DemoService", result);
} |
@Override
public boolean add(final Long value) {
return add(value.longValue());
} | @Test
public void testLongArrayConstructor() {
long[] items = {42L, 43L};
long missingValue = -1L;
final LongHashSet hashSet = new LongHashSet(items, missingValue);
set.add(42L);
set.add(43L);
assertEquals(set, hashSet);
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test
public void testSiblingsOnResourceRequestBody() {
Reader reader = new Reader(new SwaggerConfiguration().openAPI(new OpenAPI()).openAPI31(true));
OpenAPI openAPI = reader.read(SiblingsResourceRequestBody.class);
String yaml = "openapi: 3.1.0\n" +
"paths:\n" +
" /test/bodyimpl:\n" +
" get:\n" +
" operationId: getBodyImpl\n" +
" requestBody:\n" +
" description: aaa\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/PetSimple'\n" +
" description: resource pet\n" +
" writeOnly: true\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n" +
" /test/bodyimplparam:\n" +
" get:\n" +
" operationId: getBodyImplParam\n" +
" requestBody:\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" $ref: '#/components/schemas/PetSimple'\n" +
" description: resource pet\n" +
" writeOnly: true\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n" +
"components:\n" +
" schemas:\n" +
" PetSimple:\n" +
" description: Pet\n";
SerializationMatchers.assertEqualsToYaml31(openAPI, yaml);
} |
@Override
public void subscribe(final Collection<String> topics) {
acquireAndEnsureOpen();
try {
maybeThrowInvalidGroupIdException();
if (topics == null)
throw new IllegalArgumentException("Topic collection to subscribe to cannot be null");
if (topics.isEmpty()) {
// treat subscribing to empty topic list as the same as unsubscribing
unsubscribe();
} else {
for (String topic : topics) {
if (isBlank(topic))
throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic");
}
log.info("Subscribed to topic(s): {}", String.join(", ", topics));
if (subscriptions.subscribeToShareGroup(new HashSet<>(topics)))
metadata.requestUpdateForNewTopics();
// Trigger subscribe event to effectively join the group if not already part of it,
// or just send the new subscription to the broker.
applicationEventHandler.add(new ShareSubscriptionChangeEvent());
}
} finally {
release();
}
} | @Test
public void testSubscriptionOnEmptyTopic() {
consumer = newConsumer();
String emptyTopic = " ";
assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic)));
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
// Move to trash first as precondition of delete
this.delete(super.trash(files, prompt, callback));
for(Path f : files.keySet()) {
fileid.cache(f, null);
}
}
catch(ApiException e) {
for(Path f : files.keySet()) {
throw new EueExceptionMappingService().map("Cannot delete {0}", e, f);
}
}
} | @Test
public void testDeleteMultipleFiles() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path folder = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory));
final Path file1 = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Path file2 = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new EueDirectoryFeature(session, fileid).mkdir(folder, new TransferStatus());
createFile(fileid, file1, RandomUtils.nextBytes(511));
createFile(fileid, file2, RandomUtils.nextBytes(214));
assertTrue(new EueFindFeature(session, fileid).find(file1));
assertTrue(new EueFindFeature(session, fileid).find(file2));
new EueDeleteFeature(session, fileid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new EueFindFeature(session, fileid).find(file1, new DisabledListProgressListener())));
assertFalse((new EueFindFeature(session, fileid).find(file2, new DisabledListProgressListener())));
assertTrue(new EueFindFeature(session, fileid).find(folder, new DisabledListProgressListener()));
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new EueFindFeature(session, fileid).find(folder, new DisabledListProgressListener())));
} |
@Override
public byte[] contents() {
return blob.getContent();
} | @Test
public void testContents() {
byte[] contents = new byte[] {0, 1, 2};
when(blob.getContent()).thenReturn(contents);
assertThat(artifact.contents()).isEqualTo(contents);
} |
public void flush() {
ByteBuffer bf = localBuffer.get();
bf.position(0);
bf.limit(wheelLength);
mappedByteBuffer.position(0);
mappedByteBuffer.limit(wheelLength);
for (int i = 0; i < wheelLength; i++) {
if (bf.get(i) != mappedByteBuffer.get(i)) {
mappedByteBuffer.put(i, bf.get(i));
}
}
this.mappedByteBuffer.force();
} | @Test(expected = RuntimeException.class)
public void testRecoveryFixedTTL() throws Exception {
timerWheel.flush();
TimerWheel tmpWheel = new TimerWheel(baseDir, slotsTotal + 1, precisionMs);
} |
public static GradientTreeBoost fit(Formula formula, DataFrame data) {
return fit(formula, data, new Properties());
} | @Test
public void testPenDigits() {
System.out.println("Pen Digits");
MathEx.setSeed(19650218); // to get repeatable results.
ClassificationValidations<GradientTreeBoost> result = CrossValidation.classification(10, PenDigits.formula, PenDigits.data,
(f, x) -> GradientTreeBoost.fit(f, x, 100, 20, 6, 5, 0.05, 0.7));
System.out.println(result);
assertEquals(0.9809, result.avg.accuracy, 1E-4);
} |
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
} | @Test
void crash_count_exceeding_limit_marks_node_as_down() {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10);
final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3));
nodeInfo.setPrematureCrashCount(11);
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
assertThat(state.toString(), equalTo("distributor:5 storage:5 .3.s:d"));
} |
@VisibleForTesting
static boolean isUriValid(String uri) {
Matcher matcher = URI_PATTERN.matcher(uri);
return matcher.matches();
} | @Test
public void testValidUri() {
assertThat(
HttpCacheServerHandler.isUriValid(
"http://some-path.co.uk:8080/ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://127.12.12.0:8080/ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://localhost:8080/ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"https://localhost:8080/ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"localhost:8080/ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"ac/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://some-path.co.uk:8080/cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://127.12.12.0:8080/cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"http://localhost:8080/cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"https://localhost:8080/cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"localhost:8080/cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
assertThat(
HttpCacheServerHandler.isUriValid(
"cas/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
.isTrue();
} |
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | @Test
public void doubleToJson() {
JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.FLOAT64_SCHEMA, 12.34));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"double\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(12.34, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).doubleValue(), 0.001);
} |
public static boolean isPrimitiveClassName(@Nullable String name) {
if (name == null)
return false;
for (Type prim : PRIMITIVES)
if (prim.getClassName().equals(name))
return true;
return false;
} | @Test
void testIsPrimitiveClassName() {
assertTrue(Types.isPrimitiveClassName("void"));
assertTrue(Types.isPrimitiveClassName("boolean"));
assertTrue(Types.isPrimitiveClassName("byte"));
assertTrue(Types.isPrimitiveClassName("char"));
assertTrue(Types.isPrimitiveClassName("short"));
assertTrue(Types.isPrimitiveClassName("int"));
assertTrue(Types.isPrimitiveClassName("float"));
assertTrue(Types.isPrimitiveClassName("double"));
assertTrue(Types.isPrimitiveClassName("long"));
//
assertFalse(Types.isPrimitiveClassName("Z"));
assertFalse(Types.isPrimitiveClassName("[I"));
assertFalse(Types.isPrimitiveClassName("VOID"));
assertFalse(Types.isPrimitiveClassName(""));
assertFalse(Types.isPrimitiveClassName(null));
} |
@Override
public boolean supportsSetMaxRows() {
return false;
} | @Test
public void testSupportsSetMaxRows() throws Exception {
assertFalse( dbMeta.supportsSetMaxRows() );
} |
public static boolean validateCSConfiguration(
final Configuration oldConfParam, final Configuration newConf,
final RMContext rmContext) throws IOException {
// ensure that the oldConf is deep copied
Configuration oldConf = new Configuration(oldConfParam);
QueueMetrics.setConfigurationValidation(oldConf, true);
QueueMetrics.setConfigurationValidation(newConf, true);
CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler();
CapacityScheduler newCs = new CapacityScheduler();
try {
//TODO: extract all the validation steps and replace reinitialize with
//the specific validation steps
newCs.setConf(oldConf);
newCs.setRMContext(rmContext);
newCs.init(oldConf);
newCs.addNodes(liveScheduler.getAllNodes());
newCs.reinitialize(newConf, rmContext, true);
return true;
} finally {
newCs.stop();
}
} | @Test
public void testValidateCSConfigStopANonLeafQueueInvalid() {
Configuration oldConfig = CapacitySchedulerConfigGeneratorForTest
.createBasicCSConfiguration();
Configuration newConfig = new Configuration(oldConfig);
newConfig
.set("yarn.scheduler.capacity.root.state", "STOPPED");
RMContext rmContext = prepareRMContext();
try {
CapacitySchedulerConfigValidator
.validateCSConfiguration(oldConfig, newConfig, rmContext);
fail("There are child queues in running state");
} catch (IOException e) {
Assert.assertTrue(e.getCause().getMessage()
.contains("The parent queue:root cannot be STOPPED"));
}
} |
@Override
public boolean edgeExists(String source, String target) {
checkId(source);
checkId(target);
NodeDraftImpl sourceNode = getNode(source);
NodeDraftImpl targetNode = getNode(target);
if (sourceNode != null && targetNode != null) {
boolean undirected = edgeDefault.equals(EdgeDirectionDefault.UNDIRECTED) ||
(undirectedEdgesCount > 0 && directedEdgesCount == 0);
long edgeId = getLongId(sourceNode, targetNode, !undirected);
for (Long2ObjectMap l : edgeTypeSets) {
if (l != null) {
if (l.containsKey(edgeId)) {
return true;
}
}
}
}
return false;
} | @Test
public void testEdgeExistsUndirected() {
ImportContainerImpl importContainer = new ImportContainerImpl();
generateTinyUndirectedGraph(importContainer);
Assert.assertTrue(importContainer.edgeExists("1"));
Assert.assertTrue(importContainer.edgeExists("1", "2"));
Assert.assertTrue(importContainer.edgeExists("2", "1"));
} |
void print(List<Line> lines, AnsiEscapes... border) {
int maxLength = lines.stream().map(Line::length).max(comparingInt(a -> a))
.orElseThrow(NoSuchElementException::new);
StringBuilder out = new StringBuilder();
Format borderFormat = monochrome ? monochrome() : color(border);
out.append(borderFormat.text("β" + times('β', maxLength + 2) + "β")).append("\n");
for (Line line : lines) {
int rightPad = maxLength - line.length();
out.append(borderFormat.text("β"))
.append(' ');
for (Span span : line.spans) {
Format format = monochrome ? monochrome() : color(span.escapes);
out.append(format.text(span.text));
}
out.append(times(' ', rightPad))
.append(' ')
.append(borderFormat.text("β"))
.append("\n");
}
out.append(borderFormat.text("β" + times('β', maxLength + 2) + "β")).append("\n");
this.out.print(out);
} | @Test
void printsAnsiBanner() throws UnsupportedEncodingException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
Banner banner = new Banner(new PrintStream(bytes, false, StandardCharsets.UTF_8.name()), false);
banner.print(asList(
new Banner.Line("Bla"),
new Banner.Line(
new Banner.Span("Bla "),
new Banner.Span("Bla", AnsiEscapes.BLUE),
new Banner.Span(" "),
new Banner.Span("Bla", AnsiEscapes.RED)),
new Banner.Line("Bla Bla")), AnsiEscapes.CYAN);
assertThat(bytes, bytes(equalTo("" +
"\u001B[36mβββββββββββββββ\u001B[0m\n" +
"\u001B[36mβ\u001B[0m Bla \u001B[36mβ\u001B[0m\n" +
"\u001B[36mβ\u001B[0m Bla \u001B[34mBla\u001B[0m \u001B[31mBla\u001B[0m \u001B[36mβ\u001B[0m\n" +
"\u001B[36mβ\u001B[0m Bla Bla \u001B[36mβ\u001B[0m\n" +
"\u001B[36mβββββββββββββββ\u001B[0m\n")));
} |
@Override
public <I, K, V> Map<K, V> mapToPair(List<I> data, SerializablePairFunction<I, K, V> func, Integer parallelism) {
return data.stream().map(throwingMapToPairWrapper(func)).collect(
Collectors.toMap(Pair::getLeft, Pair::getRight, (oldVal, newVal) -> newVal)
);
} | @Test
public void testMapToPair() {
List<String> mapList = Arrays.asList("hudi_flink", "hudi_spark", "hudi_java");
Map<String, String> resultMap = context.mapToPair(mapList, x -> {
String[] splits = x.split("_");
return new ImmutablePair<>(splits[0], splits[1]);
}, 2);
Assertions.assertNotNull(resultMap.get("hudi"));
} |
public double getWidth() {
return this.right - this.left;
} | @Test
public void getWidthTest() {
Rectangle rectangle = create(1, 2, 3, 4);
Assert.assertEquals(2, rectangle.getWidth(), 0);
} |
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
} | @Test(expected = RuntimeException.class)
public void shouldThrowIfExecuteThrows() {
// Given:
when(ksqlEngine.execute(any(), any(ConfiguredStatement.class)))
.thenThrow(new RuntimeException("Boom!"));
// When:
standaloneExecutor.startAsync();
} |
public static java.util.regex.Pattern compilePattern(String expression) {
return compilePattern(expression, 0);
} | @Test
void testCompilePatternOK() {
Pattern pattern = JMeterUtils.compilePattern("some.*");
assertTrue(pattern.matcher("something").matches());
} |
@Override
public void createFunction(SqlInvokedFunction function, boolean replace)
{
checkCatalog(function);
checkFunctionLanguageSupported(function);
checkArgument(!function.hasVersion(), "function '%s' is already versioned", function);
QualifiedObjectName functionName = function.getFunctionId().getFunctionName();
checkFieldLength("Catalog name", functionName.getCatalogName(), MAX_CATALOG_NAME_LENGTH);
checkFieldLength("Schema name", functionName.getSchemaName(), MAX_SCHEMA_NAME_LENGTH);
if (!functionNamespaceDao.functionNamespaceExists(functionName.getCatalogName(), functionName.getSchemaName())) {
throw new PrestoException(NOT_FOUND, format("Function namespace not found: %s", functionName.getCatalogSchemaName()));
}
checkFieldLength("Function name", functionName.getObjectName(), MAX_FUNCTION_NAME_LENGTH);
if (function.getParameters().size() > MAX_PARAMETER_COUNT) {
throw new PrestoException(GENERIC_USER_ERROR, format("Function has more than %s parameters: %s", MAX_PARAMETER_COUNT, function.getParameters().size()));
}
for (Parameter parameter : function.getParameters()) {
checkFieldLength("Parameter name", parameter.getName(), MAX_PARAMETER_NAME_LENGTH);
}
checkFieldLength(
"Parameter type list",
function.getFunctionId().getArgumentTypes().stream()
.map(TypeSignature::toString)
.collect(joining(",")),
MAX_PARAMETER_TYPES_LENGTH);
checkFieldLength("Return type", function.getSignature().getReturnType().toString(), MAX_RETURN_TYPE_LENGTH);
jdbi.useTransaction(handle -> {
FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass);
Optional<SqlInvokedFunctionRecord> latestVersion = transactionDao.getLatestRecordForUpdate(hash(function.getFunctionId()), function.getFunctionId());
if (!replace && latestVersion.isPresent() && !latestVersion.get().isDeleted()) {
throw new PrestoException(ALREADY_EXISTS, "Function already exists: " + function.getFunctionId());
}
if (!latestVersion.isPresent() || !latestVersion.get().getFunction().hasSameDefinitionAs(function)) {
long newVersion = latestVersion.map(SqlInvokedFunctionRecord::getFunction).map(MySqlFunctionNamespaceManager::getLongVersion).orElse(0L) + 1;
insertSqlInvokedFunction(transactionDao, function, newVersion);
}
else if (latestVersion.get().isDeleted()) {
SqlInvokedFunction latest = latestVersion.get().getFunction();
checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId());
transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), false);
}
});
refreshFunctionsCache(functionName);
} | @Test
public void testCreateFunctionFailedDuplicate()
{
createFunction(FUNCTION_POWER_TOWER_DOUBLE, true);
assertPrestoException(() -> createFunction(FUNCTION_POWER_TOWER_DOUBLE, false), ALREADY_EXISTS, ".*Function already exists: unittest\\.memory\\.power_tower\\(double\\)");
assertPrestoException(() -> createFunction(FUNCTION_POWER_TOWER_DOUBLE_UPDATED, false), ALREADY_EXISTS, ".*Function already exists: unittest\\.memory\\.power_tower\\(double\\)");
} |
public static Config getConfig(
Configuration configuration, @Nullable HostAndPort externalAddress) {
return getConfig(
configuration,
externalAddress,
null,
PekkoUtils.getForkJoinExecutorConfig(
ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration)));
} | @Test
void getConfigHandlesIPv6Address() {
final String ipv6AddressString = "2001:db8:10:11:12:ff00:42:8329";
final Config config =
PekkoUtils.getConfig(new Configuration(), new HostAndPort(ipv6AddressString, 1234));
assertThat(config.getString("pekko.remote.classic.netty.tcp.hostname"))
.isEqualTo(NetUtils.unresolvedHostToNormalizedString(ipv6AddressString));
} |
@Override
public long getEndToEndDuration() {
return Math.max(0, failureTimestamp - triggerTimestamp);
} | @Test
void testEndToEndDuration() {
long duration = 123912931293L;
long triggerTimestamp = 10123;
long failureTimestamp = triggerTimestamp + duration;
Map<JobVertexID, TaskStateStats> taskStats = new HashMap<>();
JobVertexID jobVertexId = new JobVertexID();
taskStats.put(jobVertexId, new TaskStateStats(jobVertexId, 1));
FailedCheckpointStats failed =
new FailedCheckpointStats(
0,
triggerTimestamp,
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION),
1,
taskStats,
0,
0,
0,
0,
0,
false,
failureTimestamp,
null,
null);
assertThat(failed.getEndToEndDuration()).isEqualTo(duration);
} |
@Override
public MaskTableRuleConfiguration swapRuleItemConfiguration(final AlterRuleItemEvent event, final String yamlContent) {
return new YamlMaskTableRuleConfigurationSwapper().swapToObject(YamlEngine.unmarshal(yamlContent, YamlMaskTableRuleConfiguration.class));
} | @Test
void assertSwapRuleItemConfiguration() {
assertThat(new MaskTableChangedProcessor().swapRuleItemConfiguration(mock(AlterRuleItemEvent.class), "name: test_table").getName(), is("test_table"));
} |
@VisibleForTesting
void stripXFFHeaders(HttpRequest req) {
HttpHeaders headers = req.headers();
for (AsciiString headerName : HEADERS_TO_STRIP) {
headers.remove(headerName);
}
} | @Test
void strip_match() {
StripUntrustedProxyHeadersHandler stripHandler = getHandler(AllowWhen.MUTUAL_SSL_AUTH);
headers.add("x-forwarded-for", "abcd");
stripHandler.stripXFFHeaders(msg);
assertFalse(headers.contains("x-forwarded-for"));
} |
@Cacheable("digid-app")
public boolean digidAppSwitchEnabled(){
return isEnabled("Koppeling met DigiD app");
} | @Test
void checkSwitchDisabled() {
var switchObject = createSwitch(appSwitchName, "Description van de switch A", SwitchStatus.INACTIVE, 1, ZonedDateTime.now());
when(switchRepository.findByName(appSwitchName)).thenReturn(Optional.of(switchObject));
assertFalse(service.digidAppSwitchEnabled());
} |
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException {
Marshaller marshaller = getContext(clazz).createMarshaller();
setMarshallerProperties(marshaller);
if (marshallerEventHandler != null) {
marshaller.setEventHandler(marshallerEventHandler);
}
marshaller.setSchema(marshallerSchema);
return marshaller;
} | @Test
void buildsMarshallerWithFormattedOutputProperty() throws Exception {
JAXBContextFactory factory =
new JAXBContextFactory.Builder().withMarshallerFormattedOutput(true).build();
Marshaller marshaller = factory.createMarshaller(Object.class);
assertThat((Boolean) marshaller.getProperty(Marshaller.JAXB_FORMATTED_OUTPUT)).isTrue();
} |
@Override
public CompletableFuture<Void> prepareSnapshot(
ChannelStateWriter channelStateWriter, long checkpointId) throws CheckpointException {
for (Map.Entry<
InputChannelInfo,
SpillingAdaptiveSpanningRecordDeserializer<
DeserializationDelegate<StreamElement>>>
e : recordDeserializers.entrySet()) {
try {
channelStateWriter.addInputData(
checkpointId,
e.getKey(),
ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN,
e.getValue().getUnconsumedBuffer());
} catch (IOException ioException) {
throw new CheckpointException(CheckpointFailureReason.IO_EXCEPTION, ioException);
}
}
return checkpointedInputGate.getAllBarriersReceivedFuture(checkpointId);
} | @Test
void testSnapshotAfterEndOfPartition() throws Exception {
int numInputChannels = 1;
int channelId = 0;
int checkpointId = 0;
VerifyRecordsDataOutput<Long> output = new VerifyRecordsDataOutput<>();
LongSerializer inSerializer = LongSerializer.INSTANCE;
StreamTestSingleInputGate<Long> inputGate =
new StreamTestSingleInputGate<>(numInputChannels, 0, inSerializer, 1024);
StreamTaskInput<Long> input =
new StreamTaskNetworkInput<>(
new CheckpointedInputGate(
inputGate.getInputGate(),
SingleCheckpointBarrierHandler
.createUnalignedCheckpointBarrierHandler(
TestSubtaskCheckpointCoordinator.INSTANCE,
"test",
new DummyCheckpointInvokable(),
SystemClock.getInstance(),
false,
inputGate.getInputGate()),
new SyncMailboxExecutor()),
inSerializer,
ioManager,
new StatusWatermarkValve(numInputChannels),
0,
() -> false);
inputGate.sendEvent(
new CheckpointBarrier(
checkpointId,
0L,
CheckpointOptions.forCheckpointWithDefaultLocation().toUnaligned()),
channelId);
inputGate.sendElement(new StreamRecord<>(42L), channelId);
assertHasNextElement(input, output);
assertHasNextElement(input, output);
assertThat(output.getNumberOfEmittedRecords()).isOne();
// send EndOfPartitionEvent and ensure that deserializer has been released
inputGate.sendEvent(EndOfPartitionEvent.INSTANCE, channelId);
input.emitNext(output);
// now snapshot all inflight buffers
CompletableFuture<Void> completableFuture =
input.prepareSnapshot(ChannelStateWriter.NO_OP, checkpointId);
completableFuture.join();
} |
@Override
public boolean cleanUnusedTopic(String cluster) throws RemotingConnectException, RemotingSendRequestException,
RemotingTimeoutException, MQClientException, InterruptedException {
return defaultMQAdminExtImpl.cleanUnusedTopic(cluster);
} | @Test
public void testCleanUnusedTopic() throws InterruptedException, RemotingTimeoutException, MQClientException, RemotingSendRequestException, RemotingConnectException {
boolean result = defaultMQAdminExt.cleanUnusedTopic("default-cluster");
assertThat(result).isFalse();
} |
public static String[] splitString( String string, String separator ) {
/*
* 0123456 Example a;b;c;d --> new String[] { a, b, c, d }
*/
// System.out.println("splitString ["+path+"] using ["+separator+"]");
List<String> list = new ArrayList<>();
if ( string == null || string.length() == 0 ) {
return new String[] {};
}
int sepLen = separator.length();
int from = 0;
int end = string.length() - sepLen + 1;
for ( int i = from; i < end; i += sepLen ) {
if ( string.substring( i, i + sepLen ).equalsIgnoreCase( separator ) ) {
// OK, we found a separator, the string to add to the list
// is [from, i[
list.add( nullToEmpty( string.substring( from, i ) ) );
from = i + sepLen;
}
}
// Wait, if the string didn't end with a separator, we still have information at the end of the string...
// In our example that would be "d"...
if ( from + sepLen <= string.length() ) {
list.add( nullToEmpty( string.substring( from, string.length() ) ) );
}
return list.toArray( new String[list.size()] );
} | @Test
public void testSplitStringWithDelimiterAndEmptyEnclosureMultiCharRemoveEnclosure() {
String mask = "Hello%s world";
String[] chunks = {"Hello", " world"};
String stringToSplit = String.format( mask, DELIMITER2 );
String [] result = Const.splitString( stringToSplit, DELIMITER2, "", true );
assertSplit( result, chunks );
} |
@Override
public HandlerStatus onRead() throws Exception {
src.flip();
try {
while (src.hasRemaining()) {
Packet packet = packetReader.readFrom(src);
if (packet == null) {
break;
}
onPacketComplete(packet);
}
return CLEAN;
} finally {
compactOrClear(src);
}
} | @Test
public void whenNormalPacket() throws Exception {
ByteBuffer src = ByteBuffer.allocate(1000);
Packet packet = new Packet(serializationService.toBytes("foobar"));
new PacketIOHelper().writeTo(packet, src);
decoder.src(src);
decoder.onRead();
assertEquals(1, dispatcher.packets.size());
Packet found = dispatcher.packets.get(0);
assertEquals(packet, found);
assertEquals(1, normalPacketCounter.get());
assertEquals(0, priorityPacketCounter.get());
} |
public int getEffectiveLayoutVersion() {
return getEffectiveLayoutVersion(isRollingUpgrade(),
fsImage.getStorage().getLayoutVersion(),
NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
} | @Test
public void testGetEffectiveLayoutVersion() {
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -60, -61, -63));
assertEquals(-61,
FSNamesystem.getEffectiveLayoutVersion(true, -61, -61, -63));
assertEquals(-62,
FSNamesystem.getEffectiveLayoutVersion(true, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -63, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -60, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -61, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -63, -61, -63));
} |
public static DwrfTableEncryptionProperties forTable(String encryptTable, String encryptionAlgorithm, String encryptionProvider)
{
return new DwrfTableEncryptionProperties(Optional.of(encryptTable), Optional.empty(), encryptionAlgorithm, encryptionProvider);
} | @Test
public void testEncryptTable()
{
DwrfTableEncryptionProperties properties = forTable("abcd", "test_algo", "test_prov");
assertEquals(properties.toHiveProperties(), ImmutableMap.of(
ENCRYPT_TABLE_KEY, "abcd",
DWRF_ENCRYPTION_PROVIDER_KEY, "test_prov",
DWRF_ENCRYPTION_ALGORITHM_KEY, "test_algo"));
assertEquals(properties.toTableProperties(), ImmutableMap.of(
ENCRYPT_TABLE, "abcd",
DWRF_ENCRYPTION_PROVIDER, "test_prov",
DWRF_ENCRYPTION_ALGORITHM, "test_algo"));
} |
public TreeCache start() throws Exception {
Preconditions.checkState(treeState.compareAndSet(TreeState.LATENT, TreeState.STARTED), "already started");
if (createParentNodes) {
client.createContainers(root.path);
}
client.getConnectionStateListenable().addListener(connectionStateListener);
if (client.getZookeeperClient().isConnected()) {
root.wasCreated();
}
return this;
} | @Test
public void testKilledSession() throws Exception {
client.create().forPath("/test");
cache = newTreeCacheWithListeners(client, "/test");
cache.start();
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test");
assertEvent(TreeCacheEvent.Type.INITIALIZED);
client.create().forPath("/test/foo", "foo".getBytes());
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/foo");
client.create().withMode(CreateMode.EPHEMERAL).forPath("/test/me", "data".getBytes());
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/me");
client.getZookeeperClient().getZooKeeper().getTestable().injectSessionExpiration();
assertEvent(TreeCacheEvent.Type.INITIALIZED, null, null, true);
assertEvent(TreeCacheEvent.Type.NODE_REMOVED, "/test/me", "data".getBytes(), true);
assertNoMoreEvents();
} |
@Udf(description = "Converts an INT value in degrees to a value in radians")
public Double radians(
@UdfParameter(
value = "value",
description = "The value in degrees to convert to radians."
) final Integer value
) {
return radians(value == null ? null : value.doubleValue());
} | @Test
public void shouldHandlePositive() {
assertThat(udf.radians(180.0), closeTo(Math.PI, 0.000000000000001));
assertThat(udf.radians(360.0), closeTo(2 * Math.PI, 0.000000000000001));
assertThat(udf.radians(70.73163980890013), closeTo(1.2345, 0.000000000000001));
assertThat(udf.radians(114), closeTo(1.9896753472735358, 0.000000000000001));
assertThat(udf.radians(114L), closeTo(1.9896753472735358, 0.000000000000001));
} |
public static TypeParameterMatcher find(
final Object object, final Class<?> parametrizedSuperclass, final String typeParamName) {
final Map<Class<?>, Map<String, TypeParameterMatcher>> findCache =
InternalThreadLocalMap.get().typeParameterMatcherFindCache();
final Class<?> thisClass = object.getClass();
Map<String, TypeParameterMatcher> map = findCache.get(thisClass);
if (map == null) {
map = new HashMap<String, TypeParameterMatcher>();
findCache.put(thisClass, map);
}
TypeParameterMatcher matcher = map.get(typeParamName);
if (matcher == null) {
matcher = get(find0(object, parametrizedSuperclass, typeParamName));
map.put(typeParamName, matcher);
}
return matcher;
} | @Test
public void testUnsolvedParameter() throws Exception {
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
TypeParameterMatcher.find(new TypeQ(), TypeX.class, "B");
}
});
} |
public void load() {
Set<CoreExtension> coreExtensions = serviceLoaderWrapper.load(getClass().getClassLoader());
ensureNoDuplicateName(coreExtensions);
coreExtensionRepository.setLoadedCoreExtensions(coreExtensions);
if (!coreExtensions.isEmpty()) {
LOG.info("Loaded core extensions: {}", coreExtensions.stream().map(CoreExtension::getName).collect(Collectors.joining(", ")));
}
} | @Test
public void load_has_no_effect_if_there_is_no_ServiceLoader_for_CoreExtension_class() {
when(serviceLoaderWrapper.load(any())).thenReturn(Collections.emptySet());
underTest.load();
verify(serviceLoaderWrapper).load(CoreExtensionsLoader.class.getClassLoader());
verify(coreExtensionRepository).setLoadedCoreExtensions(Collections.emptySet());
verifyNoMoreInteractions(serviceLoaderWrapper, coreExtensionRepository);
} |
public void openFolder( boolean write ) throws KettleException {
openFolder( null, true, write );
} | @Test
public void openFolderTest() throws KettleException, MessagingException {
conn.openFolder( "a/b", false, false );
Folder folder = conn.getFolder();
Assert.assertEquals( "Folder B is opened", "B", folder.getFullName() );
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.