focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public String toString() {
return "ResourceConfig{" +
"url=" + url +
", id='" + id + '\'' +
", resourceType=" + resourceType +
'}';
} | @Test
public void when_attachDuplicateFileWithPath_then_throwsException() throws Exception {
// Given
String resourceId = "resource";
String path1 = createFile("path/to/" + resourceId).toString();
String path2 = createFile("path/to/another/" + resourceId).toString();
config.attachFile(path1);
// When
assertThrows(IllegalArgumentException.class, () -> config.attachFile(path2));
} |
@Override
public int serializePartitionKV(DataOutputStream out, int partitionId,
SizedWritable<?> key, SizedWritable<?> value)
throws IOException {
if (key.length == SizedWritable.INVALID_LENGTH ||
value.length == SizedWritable.INVALID_LENGTH) {
updateLength(key, value);
}
final int keyLength = key.length;
final int valueLength = value.length;
int bytesWritten = KV_HEAD_LENGTH + keyLength + valueLength;
if (partitionId != -1) {
bytesWritten += Constants.SIZEOF_PARTITION_LENGTH;
}
if (out.hasUnFlushedData() && out.shortOfSpace(bytesWritten)) {
out.flush();
}
if (partitionId != -1) {
out.writeInt(partitionId);
}
out.writeInt(keyLength);
out.writeInt(valueLength);
keySerializer.serialize(key.v, out);
valueSerializer.serialize(value.v, out);
return bytesWritten;
} | @Test
public void testSerializePartitionKV() throws IOException {
final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
Mockito.when(dataOut.hasUnFlushedData()).thenReturn(true);
Mockito.when(
dataOut
.shortOfSpace(key.length + value.length +
Constants.SIZEOF_KV_LENGTH + Constants.SIZEOF_PARTITION_LENGTH))
.thenReturn(true);
final int written = serializer.serializePartitionKV(dataOut, 100, key, value);
// flush once, write 4 int, and 2 byte array
Mockito.verify(dataOut, Mockito.times(1)).flush();
Mockito.verify(dataOut, Mockito.times(5)).writeInt(anyInt());
Mockito.verify(dataOut, Mockito.times(2)).write(any(byte[].class),
anyInt(), anyInt());
Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH
+ Constants.SIZEOF_PARTITION_LENGTH);
} |
public <T> T parse(String input, Class<T> cls) {
return readFlow(input, cls, type(cls));
} | @Test
void duplicateKey() {
ConstraintViolationException exception = assertThrows(
ConstraintViolationException.class,
() -> this.parse("flows/invalids/duplicate-key.yaml")
);
assertThat(exception.getConstraintViolations().size(), is(1));
assertThat(new ArrayList<>(exception.getConstraintViolations()).getFirst().getMessage(), containsString("Duplicate field 'variables.tf'"));
} |
public Map<TaskId, Long> getTaskOffsetSums() {
final Map<TaskId, Long> taskOffsetSums = new HashMap<>();
// Not all tasks will create directories, and there may be directories for tasks we don't currently own,
// so we consider all tasks that are either owned or on disk. This includes stateless tasks, which should
// just have an empty changelogOffsets map.
final Map<TaskId, Task> tasks = allTasks();
final Set<TaskId> lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks =
union(HashSet::new, lockedTaskDirectories, tasks.keySet());
for (final Task task : tasks.values()) {
if (task.state() != State.CREATED && task.state() != State.CLOSED) {
final Map<TopicPartition, Long> changelogOffsets = task.changelogOffsets();
if (changelogOffsets.isEmpty()) {
log.debug("Skipping to encode apparently stateless (or non-logged) offset sum for task {}",
task.id());
} else {
taskOffsetSums.put(task.id(), sumOfChangelogOffsets(task.id(), changelogOffsets));
}
lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks.remove(task.id());
}
}
for (final TaskId id : lockedTaskDirectoriesOfNonOwnedTasksAndClosedAndCreatedTasks) {
final File checkpointFile = stateDirectory.checkpointFileFor(id);
try {
if (checkpointFile.exists()) {
taskOffsetSums.put(id, sumOfChangelogOffsets(id, new OffsetCheckpoint(checkpointFile).read()));
}
} catch (final IOException e) {
log.warn(String.format("Exception caught while trying to read checkpoint for task %s:", id), e);
}
}
return taskOffsetSums;
} | @Test
public void shouldComputeOffsetSumForRunningStatefulTaskAndRestoringTaskWithStateUpdater() {
final StreamTask runningStatefulTask = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RUNNING).build();
final StreamTask restoringStatefulTask = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING).build();
final StandbyTask restoringStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING).build();
final long changelogOffsetOfRunningTask = 42L;
final long changelogOffsetOfRestoringStatefulTask = 24L;
final long changelogOffsetOfRestoringStandbyTask = 84L;
when(runningStatefulTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p0changelog, changelogOffsetOfRunningTask)));
when(restoringStatefulTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p1changelog, changelogOffsetOfRestoringStatefulTask)));
when(restoringStandbyTask.changelogOffsets())
.thenReturn(mkMap(mkEntry(t1p2changelog, changelogOffsetOfRestoringStandbyTask)));
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.allTasksPerId()).thenReturn(mkMap(mkEntry(taskId00, runningStatefulTask)));
when(stateUpdater.getTasks()).thenReturn(mkSet(restoringStandbyTask, restoringStatefulTask));
assertThat(
taskManager.getTaskOffsetSums(),
is(mkMap(
mkEntry(taskId00, changelogOffsetOfRunningTask),
mkEntry(taskId01, changelogOffsetOfRestoringStatefulTask),
mkEntry(taskId02, changelogOffsetOfRestoringStandbyTask)
))
);
} |
List<AlternativeInfo> calcAlternatives(final int s, final int t) {
// First, do a regular bidirectional route search
checkAlreadyRun();
init(s, 0, t, 0);
runAlgo();
final Path bestPath = extractPath();
if (!bestPath.isFound()) {
return Collections.emptyList();
}
alternatives.add(new AlternativeInfo(bestPath, 0));
final ArrayList<PotentialAlternativeInfo> potentialAlternativeInfos = new ArrayList<>();
bestWeightMapFrom.forEach((IntObjectPredicate<SPTEntry>) (v, fromSPTEntry) -> {
SPTEntry toSPTEntry = bestWeightMapTo.get(v);
if (toSPTEntry == null)
return true;
if (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath() > bestPath.getWeight() * maxWeightFactor)
return true;
// This gives us a path s -> v -> t, but since we are using contraction hierarchies,
// s -> v and v -> t need not be shortest paths. In fact, they can sometimes be pretty strange.
// We still use this preliminary path to filter for shared path length with other alternatives,
// so we don't have to work so much.
Path preliminaryRoute = createPathExtractor().extract(fromSPTEntry, toSPTEntry, fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath());
double preliminaryShare = calculateShare(preliminaryRoute);
if (preliminaryShare > maxShareFactor) {
return true;
}
PotentialAlternativeInfo potentialAlternativeInfo = new PotentialAlternativeInfo();
potentialAlternativeInfo.v = v;
potentialAlternativeInfo.weight = 2 * (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath()) + preliminaryShare;
potentialAlternativeInfos.add(potentialAlternativeInfo);
return true;
});
potentialAlternativeInfos.sort(Comparator.comparingDouble(o -> o.weight));
for (PotentialAlternativeInfo potentialAlternativeInfo : potentialAlternativeInfos) {
int v = potentialAlternativeInfo.v;
// Okay, now we want the s -> v -> t shortest via-path, so we route s -> v and v -> t
// and glue them together.
DijkstraBidirectionCH svRouter = new DijkstraBidirectionCH(graph);
svRouter.setPathExtractorSupplier(this::createPathExtractor);
final Path svPath = svRouter.calcPath(s, v);
extraVisitedNodes += svRouter.getVisitedNodes();
DijkstraBidirectionCH vtRouter = new DijkstraBidirectionCH(graph);
vtRouter.setPathExtractorSupplier(this::createPathExtractor);
final Path vtPath = vtRouter.calcPath(v, t);
Path path = concat(graph.getBaseGraph(), svPath, vtPath);
extraVisitedNodes += vtRouter.getVisitedNodes();
double sharedDistanceWithShortest = sharedDistanceWithShortest(path);
double detourLength = path.getDistance() - sharedDistanceWithShortest;
double directLength = bestPath.getDistance() - sharedDistanceWithShortest;
if (detourLength > directLength * maxWeightFactor) {
continue;
}
double share = calculateShare(path);
if (share > maxShareFactor) {
continue;
}
// This is the final test we need: Discard paths that are not "locally shortest" around v.
// So move a couple of nodes to the left and right from v on our path,
// route, and check if v is on the shortest path.
final IntIndexedContainer svNodes = svPath.calcNodes();
int vIndex = svNodes.size() - 1;
if (!tTest(path, vIndex))
continue;
alternatives.add(new AlternativeInfo(path, share));
if (alternatives.size() >= maxPaths)
break;
}
return alternatives;
} | @Test
public void testCalcAlternatives() {
BaseGraph g = createTestGraph(em);
PMap hints = new PMap();
hints.putObject("alternative_route.max_weight_factor", 2.3);
hints.putObject("alternative_route.local_optimality_factor", 0.5);
hints.putObject("alternative_route.max_paths", 4);
RoutingCHGraph routingCHGraph = prepareCH(g);
AlternativeRouteCH altDijkstra = new AlternativeRouteCH(routingCHGraph, hints);
List<AlternativeRouteCH.AlternativeInfo> pathInfos = altDijkstra.calcAlternatives(5, 10);
assertEquals(3, pathInfos.size());
// 4 -> 11 -> 12 is shorter than 4 -> 10 -> 12 (11 is an admissible via node), BUT
// 4 -> 11 -> 12 -> 10 is too long compared to 4 -> 10
} |
public CreateTableCommand createTableCommand(
final KsqlStructuredDataOutputNode outputNode,
final Optional<RefinementInfo> emitStrategy
) {
Optional<WindowInfo> windowInfo =
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo();
if (windowInfo.isPresent() && emitStrategy.isPresent()) {
final WindowInfo info = windowInfo.get();
windowInfo = Optional.of(WindowInfo.of(
info.getType(),
info.getSize(),
Optional.of(emitStrategy.get().getOutputRefinement())
));
}
return new CreateTableCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
windowInfo,
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldNotThrowWhenThereAreElementsInCreateTable() {
// Given:
final CreateTable statement =
new CreateTable(SOME_NAME, TABLE_ELEMENTS_1_VALUE,
false, true, withProperties, false);
// When:
createSourceFactory.createTableCommand(statement, ksqlConfig);
// Then: not exception thrown
} |
@Override
public void startScheduling() {
final Set<SchedulingPipelinedRegion> sourceRegions =
IterableUtils.toStream(schedulingTopology.getAllPipelinedRegions())
.filter(this::isSourceRegion)
.collect(Collectors.toSet());
maybeScheduleRegions(sourceRegions);
} | @Test
void testSchedulingRegionWithInnerNonPipelinedEdge() throws Exception {
final JobVertex v1 = createJobVertex("v1", 1);
final JobVertex v2 = createJobVertex("v2", 1);
final JobVertex v3 = createJobVertex("v3", 1);
final JobVertex v4 = createJobVertex("v4", 1);
final JobVertex v5 = createJobVertex("v5", 1);
v2.connectNewDataSetAsInput(
v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v5.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(
v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_FULL);
v4.connectNewDataSetAsInput(
v1, DistributionPattern.POINTWISE, ResultPartitionType.HYBRID_SELECTIVE);
v4.connectNewDataSetAsInput(
v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3, v4, v5));
final JobGraph jobGraph =
JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph =
TestingDefaultExecutionGraphBuilder.newBuilder()
.setJobGraph(jobGraph)
.build(EXECUTOR_RESOURCE.getExecutor());
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
startScheduling(schedulingTopology);
assertThat(testingSchedulerOperation.getScheduledVertices()).hasSize(1);
List<ExecutionVertexID> executionVertexIds =
testingSchedulerOperation.getScheduledVertices().get(0);
assertThat(executionVertexIds).hasSize(5);
} |
public TopicList getHasUnitSubUnUnitTopicList(final boolean containRetry, final long timeoutMillis)
throws RemotingException, MQClientException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_HAS_UNIT_SUB_UNUNIT_TOPIC_LIST, null);
RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
byte[] body = response.getBody();
if (body != null) {
TopicList topicList = TopicList.decode(response.getBody(), TopicList.class);
if (!containRetry) {
Iterator<String> it = topicList.getTopicList().iterator();
while (it.hasNext()) {
String topic = it.next();
if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
it.remove();
}
}
}
return topicList;
}
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
} | @Test
public void assertGetHasUnitSubUnUnitTopicList() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
TopicList responseBody = new TopicList();
responseBody.getTopicList().add(defaultTopic);
setResponseBody(responseBody);
TopicList actual = mqClientAPI.getHasUnitSubUnUnitTopicList(false, defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.getTopicList().size());
} |
public void heartbeat(InstanceHeartbeatRequest heartbeatRequest) {
heartbeatExecutor.scheduleWithFixedDelay(() -> {
try {
// If the health check passes, the heartbeat will be reported.
// If it does not pass, the heartbeat will not be reported.
Map<String, String> headers = new HashMap<>(1);
headers.put(HttpHeaders.USER_AGENT, "polaris");
if (!OkHttpUtil.checkUrl(heartbeatRequest.getHost(), heartbeatRequest.getPort(),
polarisDiscoveryProperties.getHealthCheckUrl(), headers)) {
LOGGER.error("backend service health check failed. health check endpoint = {}",
polarisDiscoveryProperties.getHealthCheckUrl());
return;
}
polarisSDKContextManager.getProviderAPI().heartbeat(heartbeatRequest);
LOGGER.trace("Polaris heartbeat is sent");
}
catch (PolarisException e) {
LOGGER.error("polaris heartbeat error with code [{}]", e.getCode(), e);
}
catch (Exception e) {
LOGGER.error("polaris heartbeat runtime error", e);
}
}, polarisDiscoveryProperties.getHeartbeatInterval(), polarisDiscoveryProperties.getHeartbeatInterval(), SECONDS);
} | @Test
void testHeartbeat() {
this.contextRunner2.run(context -> {
PolarisServiceRegistry registry = context.getBean(PolarisServiceRegistry.class);
PolarisRegistration registration = Mockito.mock(PolarisRegistration.class);
when(registration.getHost()).thenReturn("127.0.0.1");
when(registration.getPort()).thenReturn(8080);
when(registration.getServiceId()).thenReturn(SERVICE_PROVIDER);
assertThatCode(() -> registry.register(registration)).doesNotThrowAnyException();
Thread.sleep(6000);
assertThatCode(() -> registry.deregister(registration)).doesNotThrowAnyException();
});
} |
public static void hookPendingIntentGetForegroundService(PendingIntent pendingIntent, Context context, int requestCode, Intent intent, int flags) {
hookPendingIntent(intent, pendingIntent);
} | @Test
public void hookPendingIntentGetForegroundService() {
PushAutoTrackHelper.hookPendingIntentGetForegroundService(MockDataTest.mockPendingIntent(), mApplication, 100, null, 100);
} |
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
} | @Test
public void newFieldGetter_whenExtractingFromNonEmpty_Collection_bothNullFirst_FieldAndParentIsNonEmptyMultiResult_nullValueFirst_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", null, new InnerObject("inner", null, 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]");
Getter innerObjectNameGetter
= GetterFactory.newFieldGetter(object, parentGetter, innerAttributesCollectionField, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
} |
public synchronized @Nullable WorkItemServiceState reportUpdate(
@Nullable DynamicSplitResult dynamicSplitResult, Duration requestedLeaseDuration)
throws Exception {
checkState(worker != null, "setWorker should be called before reportUpdate");
checkState(!finalStateSent, "cannot reportUpdates after sending a final state");
checkArgument(requestedLeaseDuration != null, "requestLeaseDuration must be non-null");
if (wasAskedToAbort) {
LOG.info("Service already asked to abort work item, not reporting ignored progress.");
return null;
}
WorkItemStatus status = createStatusUpdate(false);
status.setRequestedLeaseDuration(TimeUtil.toCloudDuration(requestedLeaseDuration));
populateProgress(status);
populateSplitResult(status, dynamicSplitResult);
return execute(status);
} | @Test
public void reportUpdate() throws Exception {
when(worker.extractMetricUpdates()).thenReturn(Collections.emptyList());
statusClient.setWorker(worker, executionContext);
statusClient.reportUpdate(null, LEASE_DURATION);
verify(workUnitClient).reportWorkItemStatus(statusCaptor.capture());
WorkItemStatus workStatus = statusCaptor.getValue();
assertThat(workStatus.getCompleted(), equalTo(false));
} |
public static Optional<KsqlAuthorizationValidator> create(
final KsqlConfig ksqlConfig,
final ServiceContext serviceContext,
final Optional<KsqlAuthorizationProvider> externalAuthorizationProvider
) {
final Optional<KsqlAccessValidator> accessValidator = getAccessValidator(
ksqlConfig,
serviceContext,
externalAuthorizationProvider
);
return accessValidator.map(v ->
new KsqlAuthorizationValidatorImpl(cacheIfEnabled(ksqlConfig, v)));
} | @Test
public void shouldReturnEmptyValidatorIfKafkaBrokerVersionTooLowButAuthorizerClassConfigIsSet() {
// Given:
givenSingleNode();
givenAuthorizerClass("a-class");
when(adminClient.describeCluster(any())).thenThrow(new UnsupportedVersionException("too old"));
// When:
final Optional<KsqlAuthorizationValidator> validator = KsqlAuthorizationValidatorFactory.create(
ksqlConfig,
serviceContext,
Optional.empty()
);
// Then
assertThat(validator, is(Optional.empty()));
} |
public static Field[] getFields(Class<?> beanClass) throws SecurityException {
Assert.notNull(beanClass);
return FIELDS_CACHE.computeIfAbsent(beanClass, () -> getFieldsDirectly(beanClass, true));
} | @Test
public void getFieldsTest() {
// 能够获取到父类字段
final Field[] fields = ReflectUtil.getFields(TestSubClass.class);
assertEquals(4, fields.length);
} |
@Override
public ResultSet getProcedureColumns(final String catalog, final String schemaPattern, final String procedureNamePattern, final String columnNamePattern) throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getProcedureColumns(getActualCatalog(catalog), getActualSchema(schemaPattern), procedureNamePattern, columnNamePattern));
} | @Test
void assertGetProcedureColumns() throws SQLException {
when(databaseMetaData.getProcedureColumns("test", null, null, null)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getProcedureColumns("test", null, null, null), instanceOf(DatabaseMetaDataResultSet.class));
} |
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
} | @Test
public void testNullDeserialize() {
Deserializer deserializer = new Deserializer.Builder()
.schema(HiveIcebergTestUtils.FULL_SCHEMA)
.writerInspector((StructObjectInspector) IcebergObjectInspector.create(HiveIcebergTestUtils.FULL_SCHEMA))
.sourceInspector(HiveIcebergTestUtils.FULL_SCHEMA_OBJECT_INSPECTOR)
.build();
Record expected = HiveIcebergTestUtils.getNullTestRecord();
Object[] nulls = new Object[HiveIcebergTestUtils.FULL_SCHEMA.columns().size()];
Arrays.fill(nulls, null);
Record actual = deserializer.deserialize(nulls);
Assert.assertEquals(expected, actual);
// Check null record as well
Assert.assertNull(deserializer.deserialize(null));
} |
public Result runIndexOrPartitionScanQueryOnOwnedPartitions(Query query) {
Result result = runIndexOrPartitionScanQueryOnOwnedPartitions(query, true);
assert result != null;
return result;
} | @Test
public void verifyFullScanFailureWhileMigratingInFlight() {
EqualPredicate predicate = new EqualPredicate("this", value) {
@Override
protected boolean applyForSingleAttributeValue(Comparable attributeValue) {
// start a new migration while executing a full scan
mapService.beforeMigration(new PartitionMigrationEvent(MigrationEndpoint.SOURCE, partitionId, 0, 1,
UUID.randomUUID()));
return super.applyForSingleAttributeValue(attributeValue);
}
};
Query query = Query.of()
.mapName(map.getName())
.predicate(predicate)
.iterationType(IterationType.ENTRY)
.partitionIdSet(SetUtil.allPartitionIds(instance.getPartitionService().getPartitions().size()))
.build();
QueryResult result = (QueryResult) queryRunner.runIndexOrPartitionScanQueryOnOwnedPartitions(query);
assertNull(result.getPartitionIds());
} |
public void parse(InputStream inputStream, ContentHandler contentHandler, Metadata metadata,
ParseContext parseContext) throws IOException, SAXException, TikaException {
if (!initialized) {
initialize(parseContext);
}
if (!available) {
return;
}
Reader reader =
MediaType.TEXT_PLAIN.toString().equals(metadata.get(Metadata.CONTENT_TYPE)) ?
new InputStreamReader(inputStream, StandardCharsets.UTF_8) :
secondaryParser.parse(inputStream);
String text = IOUtils.toString(reader);
IOUtils.closeQuietly(reader);
for (NERecogniser ner : nerChain) {
Map<String, Set<String>> names = ner.recognise(text);
if (names != null) {
for (Map.Entry<String, Set<String>> entry : names.entrySet()) {
if (entry.getValue() != null) {
String mdKey = MD_KEY_PREFIX + entry.getKey();
for (String name : entry.getValue()) {
metadata.add(mdKey, name);
}
}
}
}
}
XHTMLContentHandler xhtml = new XHTMLContentHandler(contentHandler, metadata);
extractOutput(text.trim(), xhtml);
} | @Test
public void testParse() throws Exception {
//test config is added to resources directory
try (InputStream is = getResourceAsStream(CONFIG_FILE)) {
TikaConfig config = new TikaConfig(is);
Tika tika = new Tika(config);
String text = "I am student at University of Southern California (USC)," +
" located in Los Angeles . USC's football team is called by name Trojans." +
" Mr. John McKay was a head coach of the team from 1960 - 1975";
Metadata md = new Metadata();
tika.parse(new ByteArrayInputStream(text.getBytes(StandardCharsets.UTF_8)), md);
HashSet<String> set = new HashSet<>(
Arrays.asList(md.getValues(TikaCoreProperties.TIKA_PARSED_BY)));
assumeTrue(set.contains(NamedEntityParser.class.getName()));
set.clear();
set.addAll(Arrays.asList(md.getValues("NER_PERSON")));
assumeTrue(set.contains("John McKay"));
set.clear();
set.addAll(Arrays.asList(md.getValues("NER_LOCATION")));
assumeTrue(set.contains("Los Angeles"));
set.clear();
set.addAll(Arrays.asList(md.getValues("NER_ORGANIZATION")));
assumeTrue(set.contains("University of Southern California"));
set.clear();
set.addAll(Arrays.asList(md.getValues("NER_DATE")));
assumeTrue(set.contains("1960 - 1975"));
}
} |
@Override
public void subscribe(String serviceName, EventListener listener) throws NacosException {
subscribe(serviceName, new ArrayList<>(), listener);
} | @Test
void testSubscribe2() throws NacosException {
//given
String serviceName = "service1";
String groupName = "group1";
EventListener listener = event -> {
};
//when
client.subscribe(serviceName, groupName, listener);
NamingSelectorWrapper wrapper = new NamingSelectorWrapper(serviceName, groupName, Constants.NULL,
NamingSelectorFactory.newClusterSelector(Collections.emptyList()), listener);
//then
verify(changeNotifier, times(1)).registerListener(groupName, serviceName, wrapper);
verify(proxy, times(1)).subscribe(serviceName, groupName, "");
} |
@Override
public String getName() {
return "Python Package Analyzer";
} | @Test
public void testAnalyzeSourceMetadata() throws AnalysisException {
boolean found = false;
final Dependency result = new Dependency(BaseTest.getResourceAsFile(
this, "python/eggtest/__init__.py"));
analyzer.analyze(result, null);
assertTrue("Expected vendor evidence to contain \"example\".",
result.getEvidence(EvidenceType.VENDOR).toString().contains("example"));
for (final Evidence e : result.getEvidence(EvidenceType.VERSION)) {
if ("0.0.1".equals(e.getValue())) {
found = true;
break;
}
}
assertTrue("Version 0.0.1 not found in EggTest dependency.", found);
assertEquals("0.0.1",result.getVersion());
assertEquals("eggtest",result.getName());
assertEquals("eggtest:0.0.1",result.getDisplayFileName());
assertEquals(PythonPackageAnalyzer.DEPENDENCY_ECOSYSTEM,result.getEcosystem());
} |
@Override
public FileEntity upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
if(status.getLength() > 0) {
return new BrickUploadFeature(session, writer).upload(file, local, throttle, listener, status, callback);
}
else {
new BrickTouchFeature(session).touch(file, status);
return null;
}
} | @Test
public void testUpload() throws Exception {
final BrickThresholdUploadFeature feature = new BrickThresholdUploadFeature(session);
final Path root = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = new AlphanumericRandomStringService().random();
final Path test = new Path(root, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final int length = 254;
final byte[] content = RandomUtils.nextBytes(length);
IOUtils.write(content, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final BytecountStreamListener count = new BytecountStreamListener();
feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
count, status, new DisabledLoginCallback());
assertEquals(content.length, count.getSent());
assertTrue(status.isComplete());
assertTrue(new BrickFindFeature(session).find(test));
final PathAttributes attributes = new BrickAttributesFinderFeature(session).find(test);
assertEquals(content.length, attributes.getSize());
final byte[] compare = new byte[length];
IOUtils.readFully(new BrickReadFeature(session).read(test, new TransferStatus().withLength(length), new DisabledConnectionCallback()), compare);
assertArrayEquals(content, compare);
new BrickDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
} |
@SuppressWarnings("unchecked")
protected Class<? extends OptionalParameter> determineTypeClass(Tag tag)
throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
// we have to use reflection because the type field is private
Field f = tag.getClass().getDeclaredField("type");
f.setAccessible(true);
return (Class<? extends OptionalParameter>) f.get(tag);
} | @Test
public void determineTypeClass() throws Exception {
assertSame(OptionalParameter.Source_subaddress.class, command.determineTypeClass(Tag.SOURCE_SUBADDRESS));
assertSame(OptionalParameter.Additional_status_info_text.class,
command.determineTypeClass(Tag.ADDITIONAL_STATUS_INFO_TEXT));
assertSame(OptionalParameter.Dest_addr_subunit.class, command.determineTypeClass(Tag.DEST_ADDR_SUBUNIT));
assertSame(OptionalParameter.Dest_telematics_id.class, command.determineTypeClass(Tag.DEST_TELEMATICS_ID));
assertSame(OptionalParameter.Qos_time_to_live.class, command.determineTypeClass(Tag.QOS_TIME_TO_LIVE));
assertSame(OptionalParameter.Alert_on_message_delivery.class,
command.determineTypeClass(Tag.ALERT_ON_MESSAGE_DELIVERY));
} |
public static void stringNotEmptyAndThenExecute(String source, Runnable runnable) {
if (StringUtils.isNotEmpty(source)) {
try {
runnable.run();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.error("string not empty and then execute cause an exception.", e);
}
}
} | @Test
void testStringNotEmptyAndThenExecuteFail() {
String word = "";
Runnable task = Mockito.mock(Runnable.class);
TemplateUtils.stringNotEmptyAndThenExecute(word, task);
Mockito.verify(task, Mockito.times(0)).run();
} |
@CanIgnoreReturnValue
public GsonBuilder setStrictness(Strictness strictness) {
this.strictness = Objects.requireNonNull(strictness);
return this;
} | @Test
public void testSetStrictness() throws IOException {
final Strictness STRICTNESS = Strictness.STRICT;
GsonBuilder builder = new GsonBuilder();
builder.setStrictness(STRICTNESS);
Gson gson = builder.create();
assertThat(gson.newJsonReader(new StringReader("{}")).getStrictness()).isEqualTo(STRICTNESS);
assertThat(gson.newJsonWriter(new StringWriter()).getStrictness()).isEqualTo(STRICTNESS);
} |
@Operation(summary = "start Id check with the wid checker app", tags = { SwaggerConfig.WIDCHECKER_RAISE_TO_SUB }, operationId = "id_check_with_wid_checker_start",
parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")})
@GetMapping(value = "wid_checker/session", produces = "application/json")
@ResponseBody
public AppResponse startIdCheckWithWidchecker() throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
return service.startFlow(WidCheckerIdCheckFlow.NAME, Action.START_ID_CHECK_WITH_WID_CHECKER, null);
} | @Test
void validateIfCorrectProcessesAreCalledStartIdCheckWithWidchecker() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException {
AppSessionRequest request = new AppSessionRequest();
activationController.startIdCheckWithWidchecker();
verify(flowService, times(1)).startFlow(WidCheckerIdCheckFlow.NAME, Action.START_ID_CHECK_WITH_WID_CHECKER, null);
} |
public static int equalsConstantTime(byte[] bytes1, int startPos1, byte[] bytes2, int startPos2, int length) {
return !hasUnsafe() || !unalignedAccess() ?
ConstantTimeUtils.equalsConstantTime(bytes1, startPos1, bytes2, startPos2, length) :
PlatformDependent0.equalsConstantTime(bytes1, startPos1, bytes2, startPos2, length);
} | @Test
public void testEqualsConsistentTime() {
testEquals(new EqualityChecker() {
@Override
public boolean equals(byte[] bytes1, int startPos1, byte[] bytes2, int startPos2, int length) {
return PlatformDependent.equalsConstantTime(bytes1, startPos1, bytes2, startPos2, length) != 0;
}
});
} |
@Override
public ResultSet executeQuery(String sql)
throws SQLException {
validateState();
try {
if (!DriverUtils.queryContainsLimitStatement(sql)) {
sql += " " + LIMIT_STATEMENT + " " + _maxRows;
}
String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions());
ResultSetGroup resultSetGroup = _session.execute(enabledSql);
if (resultSetGroup.getResultSetCount() == 0) {
_resultSet = PinotResultSet.empty();
return _resultSet;
}
_resultSet = new PinotResultSet(resultSetGroup.getResultSet(0));
return _resultSet;
} catch (PinotClientException e) {
throw new SQLException(String.format("Failed to execute query : %s", sql), e);
}
} | @Test
public void testSetOptionAsFloat()
throws Exception {
Properties props = new Properties();
props.put(QueryOptionKey.USE_MULTISTAGE_ENGINE, "2.5");
PinotConnection pinotConnection =
new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Statement statement = pinotConnection.createStatement();
Assert.assertNotNull(statement);
statement.executeQuery(BASIC_TEST_QUERY);
String expectedSql =
DriverUtils.createSetQueryOptionString(QueryOptionKey.USE_MULTISTAGE_ENGINE, 2.5) + BASIC_TEST_QUERY;
Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql);
} |
@UdafFactory(description = "Compute sample standard deviation of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint, M2 double>")
public static TableUdaf<Integer, Struct, Double> stdDevInt() {
return getStdDevImplementation(
0,
STRUCT_INT,
(agg, newValue) -> newValue + agg.getInt32(SUM),
(agg, newValue) ->
Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt32(SUM) + newValue)),
(agg1, agg2) ->
agg1.getInt32(SUM).doubleValue() / agg1.getInt64(COUNT).doubleValue()
- agg2.getInt32(SUM).doubleValue() / agg2.getInt64(COUNT).doubleValue(),
(agg1, agg2) -> agg1.getInt32(SUM) + agg2.getInt32(SUM),
(agg, valueToRemove) -> agg.getInt32(SUM) - valueToRemove);
} | @Test
public void shouldIgnoreNull() {
final TableUdaf<Integer, Struct, Double> udaf = stdDevInt();
Struct agg = udaf.initialize();
final Integer[] values = new Integer[] {60, 64, 70};
for (final int thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
agg = udaf.aggregate(null, agg);
final double standardDev = udaf.map(agg);
assertThat(standardDev, equalTo(5.033222956847166));
} |
UriEndpoint createUriEndpoint(String url, boolean isWs) {
return createUriEndpoint(url, isWs, connectAddress);
} | @Test
void createUriEndpointRelativeSslSupport() {
String test1 = this.builder.sslSupport()
.build()
.createUriEndpoint("/foo", false)
.toExternalForm();
String test2 = this.builder.sslSupport()
.build()
.createUriEndpoint("/foo", true)
.toExternalForm();
assertThat(test1).isEqualTo("https://localhost/foo");
assertThat(test2).isEqualTo("wss://localhost/foo");
} |
public WorkProcessor<Page> merge(List<Type> keyTypes, List<Type> allTypes, List<WorkProcessor<Page>> pages, DriverYieldSignal driverYieldSignal)
{
return merge(keyTypes, null, allTypes, pages, driverYieldSignal);
} | @Test
public void testBinaryMergeIteratorOverPageWithDifferentHashes()
{
Page page = rowPagesBuilder(BIGINT)
.row(42)
.row(42)
.row(52)
.row(60)
.build().get(0);
WorkProcessor<Page> mergedPages = new MergeHashSort(newSimpleAggregatedMemoryContext()).merge(
ImmutableList.of(BIGINT),
ImmutableList.of(BIGINT),
ImmutableList.of(ImmutableList.of(page).iterator()).stream()
.map(WorkProcessor::fromIterator)
.collect(toImmutableList()),
new DriverYieldSignal());
assertTrue(mergedPages.process());
Page resultPage = mergedPages.getResult();
assertEquals(resultPage.getPositionCount(), 4);
assertEquals(resultPage.getBlock(0).getLong(0), 42);
assertEquals(resultPage.getBlock(0).getLong(1), 42);
assertEquals(resultPage.getBlock(0).getLong(2), 52);
assertEquals(resultPage.getBlock(0).getLong(3), 60);
assertFinishes(mergedPages);
} |
@VisibleForTesting
static Configuration getConfiguration() {
Configuration conf = new HdfsConfiguration();
conf.addResource(FedBalance.FED_BALANCE_DEFAULT_XML);
conf.addResource(FedBalance.FED_BALANCE_SITE_XML);
return conf;
} | @Test
public void testDefaultConfigs() {
Configuration configuration = DFSRouter.getConfiguration();
String journalUri =
configuration.get(FedBalanceConfigs.SCHEDULER_JOURNAL_URI);
int workerThreads =
configuration.getInt(FedBalanceConfigs.WORK_THREAD_NUM, -1);
Assert.assertEquals("hdfs://localhost:8020/tmp/procedure", journalUri);
Assert.assertEquals(10, workerThreads);
} |
public boolean messageOffsetOnly() {
return segmentBaseOffset == UNIFIED_LOG_UNKNOWN_OFFSET && relativePositionInSegment == UNKNOWN_FILE_POSITION;
} | @Test
void testMessageOffsetOnly() {
LogOffsetMetadata metadata1 = new LogOffsetMetadata(1L);
LogOffsetMetadata metadata2 = new LogOffsetMetadata(1L, 0L, 1);
assertTrue(UNKNOWN_OFFSET_METADATA.messageOffsetOnly());
assertFalse(metadata2.messageOffsetOnly());
assertTrue(metadata1.messageOffsetOnly());
} |
public List<PartitionAssignment> assignments() {
return assignments;
} | @Test
public void testTopicAssignmentReplicas() {
List<Integer> replicasP0 = Arrays.asList(0, 1, 2);
List<Integer> replicasP1 = Arrays.asList(1, 2, 0);
List<PartitionAssignment> partitionAssignments = Arrays.asList(
partitionAssignment(replicasP0),
partitionAssignment(replicasP1)
);
assertEquals(partitionAssignments, new TopicAssignment(partitionAssignments).assignments());
} |
public static <T extends Serializable> SerializableCoder<T> of(TypeDescriptor<T> type) {
@SuppressWarnings("unchecked")
Class<T> clazz = (Class<T>) type.getRawType();
return new SerializableCoder<>(clazz, type);
} | @Test
public <T extends Serializable> void testSerializableCoderIsSerializableWithGenericTypeToken()
throws Exception {
SerializableCoder<T> coder = SerializableCoder.of(new TypeDescriptor<T>() {});
CoderProperties.coderSerializable(coder);
} |
public static NormalKey createFromSpec(String spec) {
if (spec == null || !spec.contains(":")) {
throw new IllegalArgumentException("Invalid spec format");
}
String[] parts = spec.split(":", 2);
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid spec format");
}
String algorithmName = parts[0];
String base64Key = parts[1];
EncryptionAlgorithmPB algorithm;
if (algorithmName.equalsIgnoreCase("AES_128")) {
algorithm = EncryptionAlgorithmPB.AES_128;
} else {
throw new IllegalArgumentException("Unsupported algorithm: " + algorithmName);
}
byte[] plainKey;
try {
plainKey = Base64.getDecoder().decode(base64Key);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid Base64 key", e);
}
if (plainKey.length != 16) {
throw new IllegalArgumentException("Invalid key length " + plainKey.length * 8);
}
return new NormalKey(algorithm, plainKey, null);
} | @Test
public void testCreateFromSpec_InvalidBase64Key() {
assertThrows(IllegalArgumentException.class, () -> {
NormalKey.createFromSpec("AES_128:invalid_base64");
});
} |
public static TriState from(String booleanLike) {
if (StringUtils.isBlank(booleanLike)) {
return UNSET;
}
if (booleanLike.equalsIgnoreCase("false")) {
return FALSE;
}
if (booleanLike.equalsIgnoreCase("true")) {
return TRUE;
}
throw new IllegalArgumentException(String.format("The string '%s' does not look like a boolean.", booleanLike));
} | @Test
public void testBadStringShouldRaiseError() {
assertThrows(IllegalArgumentException.class, () -> TriState.from("foo"));
} |
static JobVertexInputInfo computeVertexInputInfoForPointwise(
int sourceCount,
int targetCount,
Function<Integer, Integer> numOfSubpartitionsRetriever,
boolean isDynamicGraph) {
final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>();
if (sourceCount >= targetCount) {
for (int index = 0; index < targetCount; index++) {
int start = index * sourceCount / targetCount;
int end = (index + 1) * sourceCount / targetCount;
IndexRange partitionRange = new IndexRange(start, end - 1);
IndexRange subpartitionRange =
computeConsumedSubpartitionRange(
index,
1,
() -> numOfSubpartitionsRetriever.apply(start),
isDynamicGraph,
false);
executionVertexInputInfos.add(
new ExecutionVertexInputInfo(index, partitionRange, subpartitionRange));
}
} else {
for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) {
int start = (partitionNum * targetCount + sourceCount - 1) / sourceCount;
int end = ((partitionNum + 1) * targetCount + sourceCount - 1) / sourceCount;
int numConsumers = end - start;
IndexRange partitionRange = new IndexRange(partitionNum, partitionNum);
// Variable used in lambda expression should be final or effectively final
final int finalPartitionNum = partitionNum;
for (int i = start; i < end; i++) {
IndexRange subpartitionRange =
computeConsumedSubpartitionRange(
i,
numConsumers,
() -> numOfSubpartitionsRetriever.apply(finalPartitionNum),
isDynamicGraph,
false);
executionVertexInputInfos.add(
new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange));
}
}
}
return new JobVertexInputInfo(executionVertexInputInfos);
} | @Test
void testComputeVertexInputInfoForPointwiseWithNonDynamicGraph() {
final JobVertexInputInfo jobVertexInputInfo =
computeVertexInputInfoForPointwise(2, 3, ignored -> 3, false);
assertThat(jobVertexInputInfo.getExecutionVertexInputInfos())
.containsExactlyInAnyOrder(
new ExecutionVertexInputInfo(0, new IndexRange(0, 0), new IndexRange(0, 0)),
new ExecutionVertexInputInfo(1, new IndexRange(0, 0), new IndexRange(1, 1)),
new ExecutionVertexInputInfo(
2, new IndexRange(1, 1), new IndexRange(0, 0)));
} |
@Override
public ApplicationId
submitApplication(ApplicationSubmissionContext appContext)
throws YarnException, IOException {
ApplicationId applicationId = appContext.getApplicationId();
if (applicationId == null) {
throw new ApplicationIdNotProvidedException(
"ApplicationId is not provided in ApplicationSubmissionContext");
}
SubmitApplicationRequest request =
Records.newRecord(SubmitApplicationRequest.class);
request.setApplicationSubmissionContext(appContext);
// Automatically add the timeline DT into the CLC
// Only when the security and the timeline service are both enabled
if (isSecurityEnabled() && timelineV1ServiceEnabled &&
getConfig().get(YarnConfiguration.TIMELINE_HTTP_AUTH_TYPE)
.equals(KerberosAuthenticationHandler.TYPE)) {
addTimelineDelegationToken(appContext.getAMContainerSpec());
}
// Automatically add the DT for Log Aggregation path
// This is useful when a separate storage is used for log aggregation
try {
if (isSecurityEnabled()) {
addLogAggregationDelegationToken(appContext.getAMContainerSpec());
}
} catch (Exception e) {
LOG.warn("Failed to obtain delegation token for Log Aggregation Path", e);
}
//TODO: YARN-1763:Handle RM failovers during the submitApplication call.
rmClient.submitApplication(request);
int pollCount = 0;
long startTime = System.currentTimeMillis();
EnumSet<YarnApplicationState> waitingStates =
EnumSet.of(YarnApplicationState.NEW,
YarnApplicationState.NEW_SAVING,
YarnApplicationState.SUBMITTED);
EnumSet<YarnApplicationState> failToSubmitStates =
EnumSet.of(YarnApplicationState.FAILED,
YarnApplicationState.KILLED);
while (true) {
try {
ApplicationReport appReport = getApplicationReport(applicationId);
YarnApplicationState state = appReport.getYarnApplicationState();
if (!waitingStates.contains(state)) {
if(failToSubmitStates.contains(state)) {
throw new YarnException("Failed to submit " + applicationId +
" to YARN : " + appReport.getDiagnostics());
}
LOG.info("Submitted application {}", applicationId);
break;
}
long elapsedMillis = System.currentTimeMillis() - startTime;
if (enforceAsyncAPITimeout() &&
elapsedMillis >= asyncApiPollTimeoutMillis) {
throw new YarnException("Timed out while waiting for application " +
applicationId + " to be submitted successfully");
}
// Notify the client through the log every 10 poll, in case the client
// is blocked here too long.
if (++pollCount % 10 == 0) {
LOG.info("Application submission is not finished, " +
"submitted application {} is still in {}",
applicationId,
state);
}
try {
Thread.sleep(submitPollIntervalMillis);
} catch (InterruptedException ie) {
String msg = "Interrupted while waiting for application "
+ applicationId + " to be successfully submitted.";
LOG.error(msg);
throw new YarnException(msg, ie);
}
} catch (ApplicationNotFoundException ex) {
// FailOver or RM restart happens before RMStateStore saves
// ApplicationState
LOG.info("Re-submit application {} with the" +
" same ApplicationSubmissionContext", applicationId);
rmClient.submitApplication(request);
}
}
return applicationId;
} | @Test
public void testAutomaitcLogAggregationDelegationToken()
throws Exception {
Configuration conf = getConf();
SecurityUtil.setAuthenticationMethod(
UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
conf.set(YarnConfiguration.RM_PRINCIPAL, YARN_RM);
String remoteRootLogPath = "/tmp/app-logs";
MiniDFSCluster hdfsCluster = null;
try {
// Step 1: Start a MiniDFSCluster for Log Aggregation Path
HdfsConfiguration hdfsConfig = new HdfsConfiguration();
hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig)
.numDataNodes(1).build();
Path remoteRootLogDir = new Path(remoteRootLogPath);
FileSystem fs = hdfsCluster.getFileSystem();
fs.mkdirs(remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
fs.getFileStatus(remoteRootLogDir).getPath().toString());
// Step 2: Prepare a Mock FileSystem which returns Delegation Token
// when YarnClientImpl invokes
DelegationTokenIdentifier hdfsDT = new DelegationTokenIdentifier(new Text(
"test"), new Text(YARN_RM), null);
final Token<DelegationTokenIdentifier> dToken =
new Token<>(hdfsDT.getBytes(), new byte[0], hdfsDT.getKind(),
new Text());
FileSystem mockFs = mock(FileSystem.class);
doAnswer(new Answer<Token<?>[]>() {
@Override
public Token<?>[] answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
((Credentials) args[1]).addToken(hdfsDT.getKind(), dToken);
return new Token[]{dToken};
}
}).when(mockFs).addDelegationTokens(any(), any());
FileSystemTestHelper.addFileSystemForTesting(fs.getUri(),
hdfsConfig, mockFs);
// Step 3: Prepare a Mock YarnClientImpl
YarnClientImpl client = spy(new YarnClientImpl() {
@Override
protected void serviceStart() {
rmClient = mock(ApplicationClientProtocol.class);
}
@Override
protected void serviceStop() {
}
@Override
public ApplicationReport getApplicationReport(ApplicationId appId) {
ApplicationReport report = mock(ApplicationReport.class);
when(report.getYarnApplicationState())
.thenReturn(YarnApplicationState.RUNNING);
return report;
}
@Override
public boolean isSecurityEnabled() {
return true;
}
});
client.init(conf);
client.start();
// Step 4: Prepare a ApplicationSubmissionContext and submit the app
ApplicationSubmissionContext context =
mock(ApplicationSubmissionContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
when(context.getApplicationId()).thenReturn(applicationId);
DataOutputBuffer dob = new DataOutputBuffer();
Credentials credentials = new Credentials();
credentials.writeTokenStorageToStream(dob);
ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
ContainerLaunchContext clc = ContainerLaunchContext.newInstance(
null, null, null, null, tokens, null);
when(context.getAMContainerSpec()).thenReturn(clc);
client.submitApplication(context);
// Step 5: Verify automatic addition of HDFS DT for log aggregation path
credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
tokens = clc.getTokens();
if (tokens != null) {
dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
}
Collection<Token<? extends TokenIdentifier>> dTokens =
credentials.getAllTokens();
Assert.assertEquals("Failed to place token for Log Aggregation Path",
1, dTokens.size());
Assert.assertEquals("Wrong Token for Log Aggregation",
hdfsDT.getKind(), dTokens.iterator().next().getKind());
} finally {
if (hdfsCluster != null) {
hdfsCluster.shutdown();
}
}
} |
@Override
public Table getTable(String dbName, String tblName) {
TableIdentifier identifier = TableIdentifier.of(dbName, tblName);
if (tables.containsKey(identifier)) {
return tables.get(identifier);
}
try {
org.apache.iceberg.Table icebergTable = icebergCatalog.getTable(dbName, tblName);
IcebergCatalogType catalogType = icebergCatalog.getIcebergCatalogType();
// Hive/Glue catalog table name is case-insensitive, normalize it to lower case
if (catalogType == IcebergCatalogType.HIVE_CATALOG || catalogType == IcebergCatalogType.GLUE_CATALOG) {
dbName = dbName.toLowerCase();
tblName = tblName.toLowerCase();
}
Table table = IcebergApiConverter.toIcebergTable(icebergTable, catalogName, dbName, tblName, catalogType.name());
table.setComment(icebergTable.properties().getOrDefault(COMMENT, ""));
tables.put(identifier, table);
return table;
} catch (StarRocksConnectorException e) {
LOG.error("Failed to get iceberg table {}", identifier, e);
return null;
} catch (NoSuchTableException e) {
return getView(dbName, tblName);
}
} | @Test
public void testNotExistTable(@Mocked IcebergHiveCatalog icebergHiveCatalog,
@Mocked HiveTableOperations hiveTableOperations) {
new Expectations() {
{
icebergHiveCatalog.getTable("db", "tbl");
result = new BaseTable(hiveTableOperations, "tbl");
minTimes = 0;
icebergHiveCatalog.getTable("db", "tbl2");
result = new StarRocksConnectorException("not found");
}
};
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
Assert.assertNull(metadata.getTable("db", "tbl2"));
} |
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("CREATE");
if (getReplace()) {
writer.keyword("OR REPLACE");
}
writer.keyword("EXTERNAL MAPPING");
if (ifNotExists) {
writer.keyword("IF NOT EXISTS");
}
name.unparse(writer, leftPrec, rightPrec);
if (externalName != null) {
writer.keyword("EXTERNAL NAME");
externalName.unparse(writer, leftPrec, rightPrec);
}
if (!columns.isEmpty()) {
SqlWriter.Frame frame = writer.startList("(", ")");
for (SqlNode column : columns) {
printIndent(writer);
column.unparse(writer, 0, 0);
}
writer.newlineAndIndent();
writer.endList(frame);
}
if (dataConnection != null) {
writer.newlineAndIndent();
writer.keyword("DATA CONNECTION");
dataConnection.unparse(writer, leftPrec, rightPrec);
} else {
assert connectorType != null;
writer.newlineAndIndent();
writer.keyword("TYPE");
connectorType.unparse(writer, leftPrec, rightPrec);
}
if (objectType != null) {
writer.newlineAndIndent();
writer.keyword("OBJECT TYPE");
objectType.unparse(writer, leftPrec, rightPrec);
}
unparseOptions(writer, options);
} | @Test
public void test_unparse() {
Mapping mapping = new Mapping(
"name",
"external-name",
null,
"Type",
null,
asList(
new MappingField("field1", QueryDataType.VARCHAR, "__key.field1"),
new MappingField("field2", QueryDataType.INT, "this.field2")
),
ImmutableMap.of("key1", "value1", "key2", "value2")
);
String sql = SqlCreateMapping.unparse(mapping);
assertThat(sql).isEqualTo("CREATE OR REPLACE EXTERNAL MAPPING \"hazelcast\".\"public\".\"name\" " +
"EXTERNAL NAME \"external-name\" (" + LE +
" \"field1\" VARCHAR EXTERNAL NAME \"__key.field1\"," + LE +
" \"field2\" INTEGER EXTERNAL NAME \"this.field2\"" + LE +
")" + LE +
"TYPE \"Type\"" + LE +
"OPTIONS (" + LE +
" 'key1'='value1'," + LE +
" 'key2'='value2'" + LE +
")"
);
} |
public void addIfValid(String headerName, String value) {
Objects.requireNonNull(headerName, "headerName");
Objects.requireNonNull(value, "value");
if (isValid(headerName) && isValid(value)) {
String normalName = HeaderName.normalize(headerName);
addNormal(headerName, normalName, value);
}
} | @Test
void addIfValid() {
Headers headers = new Headers();
headers.addIfValid("Via", "duct");
headers.addIfValid("Cookie", "abc=def");
headers.addIfValid("cookie", "uvw=xyz");
Truth.assertThat(headers.getAll("Via")).containsExactly("duct");
Truth.assertThat(headers.getAll("Cookie"))
.containsExactly("abc=def", "uvw=xyz")
.inOrder();
Truth.assertThat(headers.size()).isEqualTo(3);
} |
@JsonCreator
public static ContentPackInstallationRequest create(
@JsonProperty("parameters") @Nullable Map<String, ValueReference> parameters,
@JsonProperty("comment") @Nullable String comment) {
final Map<String, ValueReference> parameterMap = parameters == null ? Collections.emptyMap() : parameters;
return new AutoValue_ContentPackInstallationRequest(parameterMap, comment);
} | @Test
public void testSerialisation() {
final ImmutableMap<String, ValueReference> parameters = ImmutableMap.of(
"param1", ValueReference.of("string"),
"param2", ValueReference.of(42),
"param3", ValueReference.of(3.14d),
"param4", ValueReference.of(true));
final ContentPackInstallationRequest request = ContentPackInstallationRequest.create(parameters, "comment");
final JsonNode node = objectMapper.valueToTree(request);
assertThat(node.path("comment").asText()).isEqualTo("comment");
assertThat(node.path("parameters").path("param1").path("@type").asText()).isEqualTo("string");
assertThat(node.path("parameters").path("param1").path("@value").asText()).isEqualTo("string");
assertThat(node.path("parameters").path("param2").path("@type").asText()).isEqualTo("integer");
assertThat(node.path("parameters").path("param2").path("@value").asInt()).isEqualTo(42);
assertThat(node.path("parameters").path("param3").path("@type").asText()).isEqualTo("double");
assertThat(node.path("parameters").path("param3").path("@value").asDouble()).isEqualTo(3.14d);
assertThat(node.path("parameters").path("param4").path("@type").asText()).isEqualTo("boolean");
assertThat(node.path("parameters").path("param4").path("@value").asBoolean()).isEqualTo(true);
} |
@Override
public boolean isAvailable() {
return sonarRuntime.getEdition() == ENTERPRISE || sonarRuntime.getEdition() == DATACENTER;
} | @Test
@UseDataProvider("editionsAndMultipleAlmAvailability")
public void isEnabled_shouldOnlyBeEnabledInEnterpriseEditionPlus(SonarEdition edition, boolean shouldBeEnabled) {
when(sonarRuntime.getEdition()).thenReturn(edition);
boolean isAvailable = underTest.isAvailable();
assertThat(isAvailable).isEqualTo(shouldBeEnabled);
} |
@Override
public UndoLogDeleteRequestProto convert2Proto(UndoLogDeleteRequest undoLogDeleteRequest) {
final short typeCode = undoLogDeleteRequest.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final AbstractTransactionRequestProto abstractTransactionRequestProto = AbstractTransactionRequestProto
.newBuilder().setAbstractMessage(abstractMessage).build();
final UndoLogDeleteRequestProto undoLogDeleteRequestProto = UndoLogDeleteRequestProto.newBuilder()
.setAbstractTransactionRequest(abstractTransactionRequestProto).setSaveDays(
undoLogDeleteRequest.getSaveDays()).setBranchType(
BranchTypeProto.valueOf(undoLogDeleteRequest.getBranchType().name())).setResourceId(
undoLogDeleteRequest.getResourceId()).build();
return undoLogDeleteRequestProto;
} | @Test
public void convert2Proto() {
UndoLogDeleteRequest undoLogDeleteRequest = new UndoLogDeleteRequest();
undoLogDeleteRequest.setBranchType(BranchType.AT);
undoLogDeleteRequest.setResourceId(RESOURCE_ID);
undoLogDeleteRequest.setSaveDays(SAVE_DAYS);
UndoLogDeleteRequestConvertor undoLogDeleteRequestConvertor = new UndoLogDeleteRequestConvertor();
UndoLogDeleteRequestProto proto = undoLogDeleteRequestConvertor.convert2Proto(
undoLogDeleteRequest);
UndoLogDeleteRequest realRequest = undoLogDeleteRequestConvertor.convert2Model(proto);
assertThat(realRequest.getTypeCode()).isEqualTo(undoLogDeleteRequest.getTypeCode());
assertThat(realRequest.getBranchType()).isEqualTo(undoLogDeleteRequest.getBranchType());
assertThat(realRequest.getResourceId()).isEqualTo(undoLogDeleteRequest.getResourceId());
assertThat(realRequest.getSaveDays()).isEqualTo(undoLogDeleteRequest.getSaveDays());
} |
@Override
@PublicAPI(usage = ACCESS)
public Set<? extends JavaAnnotation<JavaPackage>> getAnnotations() {
return packageInfo
.map(it -> it.getAnnotations().stream().map(withSelfAsOwner).collect(toSet()))
.orElse(emptySet());
} | @Test
public void test_getAnnotations() {
JavaPackage annotatedPackage = importPackage("packageexamples.annotated");
JavaPackage nonAnnotatedPackage = importPackage("packageexamples");
JavaAnnotation<JavaPackage> annotation = getOnlyElement(annotatedPackage.getAnnotations());
assertThatType(annotation.getRawType()).matches(PackageLevelAnnotation.class);
assertThat(annotation.getOwner()).isEqualTo(annotatedPackage);
assertThat(nonAnnotatedPackage.getAnnotations()).isEmpty();
} |
public static ByteBuf copiedBuffer(byte[] array) {
if (array.length == 0) {
return EMPTY_BUFFER;
}
return wrappedBuffer(array.clone());
} | @Test
public void testCopiedBuffer() {
ByteBuf copied = copiedBuffer(ByteBuffer.allocateDirect(16));
assertEquals(16, copied.capacity());
copied.release();
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(new byte[][] { new byte[] { 1, 2, 3 } }));
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(new byte[] { 1 }, new byte[] { 2 }, new byte[] { 3 }));
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(new ByteBuf[] { wrappedBuffer(new byte[] { 1, 2, 3 })}));
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(wrappedBuffer(new byte[] { 1 }),
wrappedBuffer(new byte[] { 2 }), wrappedBuffer(new byte[] { 3 })));
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(new ByteBuffer[] { ByteBuffer.wrap(new byte[] { 1, 2, 3 }) }));
assertEqualsAndRelease(wrappedBuffer(new byte[] { 1, 2, 3 }),
copiedBuffer(ByteBuffer.wrap(new byte[] { 1 }),
ByteBuffer.wrap(new byte[] { 2 }), ByteBuffer.wrap(new byte[] { 3 })));
} |
@Override public Table alterTable(String catName, String dbName, String tblName, Table newTable, String validWriteIds)
throws InvalidObjectException, MetaException {
newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds);
// in case of event based cache update, cache will be updated during commit.
if (canUseEvents) {
return newTable;
}
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tblName = normalizeIdentifier(tblName);
String newTblName = normalizeIdentifier(newTable.getTableName());
if (!shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) {
return newTable;
}
Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
if (tbl == null) {
// The table is not yet loaded in cache
return newTable;
}
if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
// If old table is in the cache and the new table can also be cached
sharedCache.alterTableInCache(catName, dbName, tblName, newTable);
} else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
// If old table is *not* in the cache but the new table can be cached
sharedCache.addTableToCache(catName, dbName, newTblName, newTable);
} else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) {
// If old table is in the cache but the new table *cannot* be cached
sharedCache.removeTableFromCache(catName, dbName, tblName);
}
return newTable;
} | @Test public void testAlterTable() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
MetaStoreTestUtils.setConfForStandloneMode(conf);
CachedStore cachedStore = new CachedStore();
CachedStore.clearSharedCache();
cachedStore.setConfForTest(conf);
ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore();
// Prewarm CachedStore
CachedStore.setCachePrewarmedState(false);
CachedStore.prewarm(objectStore);
List<String> db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName());
Assert.assertEquals(2, db1Tables.size());
List<String> db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName());
Assert.assertEquals(2, db2Tables.size());
// Alter table db1Utbl1 via CachedStore and read via ObjectStore
Table db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName());
String newOwner = "newOwner";
Table db1Utbl1ReadAlt = new Table(db1Utbl1Read);
db1Utbl1ReadAlt.setOwner(newOwner);
cachedStore
.alterTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), db1Utbl1ReadAlt, "0");
db1Utbl1Read =
cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName());
Table db1Utbl1ReadOS =
objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName());
Assert.assertEquals(db1Utbl1Read, db1Utbl1ReadOS);
// Alter table db2Utbl1 via ObjectStore and read via CachedStore
Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName());
Table db2Utbl1ReadAlt = new Table(db2Utbl1Read);
db2Utbl1ReadAlt.setOwner(newOwner);
objectStore
.alterTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName(), db2Utbl1ReadAlt, "0");
updateCache(cachedStore);
db2Utbl1Read =
objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName());
Table d21Utbl1ReadCS =
cachedStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName());
Assert.assertEquals(db2Utbl1Read, d21Utbl1ReadCS);
cachedStore.shutdown();
} |
@Operation(summary = "Start the process and generate the session ids")
@PostMapping(value = "new", consumes = "application/json", produces = "application/json")
public StartProcessResponse startProcessRestService(@Valid @RequestBody StartProcessRequest request) {
StartProcessResponse response = new StartProcessResponse();
// fill the response with the url for the app
response.setUrl(publicUrl);
final String host;
try {
host = new URL(request.getReturnUrl()).getHost();
} catch (MalformedURLException e) {
throw new ClientException("Malformed URL", e);
}
if (!whitelistPattern.matcher(host).matches()) {
logger.warn("The host given: {}, is not a white listed host!", host);
throw new ClientException("Invalid return url");
}
EidSession session = EidSession.create(request.getReturnUrl(), request.getConfirmId(),
request.getClientIpAddress(), timeout);
sessionRepo.save(session);
response.setSessionId(session.getId());
response.setConfirmSecret(session.getConfirmSecret());
response.setExpiration(session.getExpiration());
// Result OK
return response;
} | @Test
public void testStartProcessRestService() {
StartProcessRequest request = new StartProcessRequest();
request.setReturnUrl("http://localhost");
request.setConfirmId("confirmId");
// the test
StartProcessResponse response = controller.startProcessRestService(request);
// asserts
assertNotNull(response.getConfirmSecret());
assertNotNull(response.getSessionId());
assertEquals("SSSSSSSSSSSSSSSSSSSSSS", response.getUrl());
assertEquals(600, response.getExpiration());
} |
@Override
public SchemaAndValue get(final ProcessingLogConfig config) {
final Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA)
.put(ProcessingLogMessageSchema.TYPE, MessageType.DESERIALIZATION_ERROR.getTypeId())
.put(ProcessingLogMessageSchema.DESERIALIZATION_ERROR, deserializationError(config));
return new SchemaAndValue(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA, struct);
} | @Test
public void shouldSetNullRecordToNull() {
// Given:
final DeserializationError deserError = new DeserializationError(
error,
Optional.empty(),
"topic",
false
);
// When:
final SchemaAndValue msg = deserError.get(config);
// Then:
final Struct struct = (Struct) msg.value();
final Struct deserializationError = struct.getStruct(DESERIALIZATION_ERROR);
assertThat(deserializationError.get(DESERIALIZATION_ERROR_FIELD_RECORD_B64),
is(nullValue()));
} |
@Override
public boolean match(Message msg, StreamRule rule) {
Object rawField = msg.getField(rule.getField());
if (rawField == null) {
return rule.getInverted();
}
if (rawField instanceof String) {
String field = (String) rawField;
Boolean result = rule.getInverted() ^ !(field.trim().isEmpty());
return result;
}
return !rule.getInverted();
} | @Test
public void testInvertedBasicNonMatch() throws Exception {
StreamRule rule = getSampleRule();
rule.setField("nonexistentField");
rule.setType(StreamRuleType.PRESENCE);
rule.setInverted(true);
Message message = getSampleMessage();
StreamRuleMatcher matcher = getMatcher(rule);
Boolean result = matcher.match(message, rule);
assertTrue(result);
} |
public static boolean isValidRootUrl(String url) {
UrlValidator validator = new CustomUrlValidator();
return validator.isValid(url);
} | @Test
public void ipv4Allowed() {
assertTrue(UrlHelper.isValidRootUrl("http://172.52.125.12"));
assertTrue(UrlHelper.isValidRootUrl("http://172.52.125.12/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://172.52.125.12:8080"));
assertTrue(UrlHelper.isValidRootUrl("http://172.52.125.12:8080/jenkins"));
} |
public static Type getArrayElementType(Type type) {
return GenericTypeReflector.getArrayComponentType(type);
} | @Test
public void getArrayElementType() {
assertThat(Reflection.getArrayElementType(int[].class)).isEqualTo(int.class);
assertThat(Reflection.getArrayElementType(Person[].class)).isEqualTo(Person.class);
var containerOfPerson = Types.parameterizedType(Container.class, Person.class);
assertThat(Reflection.getArrayElementType(Types.arrayOf(containerOfPerson)))
.isEqualTo(containerOfPerson);
} |
public void setInputChannels(InputChannel... channels) {
if (channels.length != numberOfInputChannels) {
throw new IllegalArgumentException(
"Expected "
+ numberOfInputChannels
+ " channels, "
+ "but got "
+ channels.length);
}
synchronized (requestLock) {
System.arraycopy(channels, 0, this.channels, 0, numberOfInputChannels);
for (InputChannel inputChannel : channels) {
if (inputChannels
.computeIfAbsent(
inputChannel.getPartitionId().getPartitionId(),
ignored -> new HashMap<>())
.put(inputChannel.getChannelInfo(), inputChannel)
== null
&& inputChannel instanceof UnknownInputChannel) {
numberOfUninitializedChannels++;
}
}
}
} | @Test
void testPartitionNotFoundExceptionWhileGetNextBuffer() throws Exception {
final SingleInputGate inputGate = InputChannelTestUtils.createSingleInputGate(1);
final LocalInputChannel localChannel =
createLocalInputChannel(inputGate, new ResultPartitionManager());
final ResultPartitionID partitionId = localChannel.getPartitionId();
inputGate.setInputChannels(localChannel);
localChannel.setError(new PartitionNotFoundException(partitionId));
assertThatThrownBy(inputGate::getNext)
.isInstanceOfSatisfying(
PartitionNotFoundException.class,
(notFoundException) ->
assertThat(notFoundException.getPartitionId())
.isEqualTo(partitionId));
} |
String getUrl() {
return "http://" + this.httpServer.getInetAddress().getHostAddress() + ":" + this.httpServer.getLocalPort();
} | @Test
public void return_http_response_with_code_500_and_exception_message_as_body_when_action_throws_exception() throws IOException {
Response response = call(underTest.getUrl() + "/failing");
assertThat(response.code()).isEqualTo(500);
assertThat(response.body().string()).isEqualTo(FAILING_ACTION.getMessage());
} |
@Override
public boolean contains(String word) {
return dict.contains(word);
} | @Test
public void testContains() {
System.out.println("contains");
assertTrue(EnglishPunctuations.getInstance().contains("["));
assertTrue(EnglishPunctuations.getInstance().contains("]"));
assertTrue(EnglishPunctuations.getInstance().contains("("));
assertTrue(EnglishPunctuations.getInstance().contains(")"));
assertTrue(EnglishPunctuations.getInstance().contains("{"));
assertTrue(EnglishPunctuations.getInstance().contains("}"));
assertTrue(EnglishPunctuations.getInstance().contains("<"));
assertTrue(EnglishPunctuations.getInstance().contains(">"));
assertTrue(EnglishPunctuations.getInstance().contains(":"));
assertTrue(EnglishPunctuations.getInstance().contains(","));
assertTrue(EnglishPunctuations.getInstance().contains(";"));
assertTrue(EnglishPunctuations.getInstance().contains("-"));
assertTrue(EnglishPunctuations.getInstance().contains("--"));
assertTrue(EnglishPunctuations.getInstance().contains("---"));
assertTrue(EnglishPunctuations.getInstance().contains("!"));
assertTrue(EnglishPunctuations.getInstance().contains("?"));
assertTrue(EnglishPunctuations.getInstance().contains("."));
assertTrue(EnglishPunctuations.getInstance().contains("..."));
assertTrue(EnglishPunctuations.getInstance().contains("`"));
assertTrue(EnglishPunctuations.getInstance().contains("'"));
assertTrue(EnglishPunctuations.getInstance().contains("\""));
assertTrue(EnglishPunctuations.getInstance().contains("/"));
assertFalse(EnglishPunctuations.getInstance().contains(""));
assertFalse(EnglishPunctuations.getInstance().contains("word"));
} |
public float toFloat(String name) {
return toFloat(name, 0.0f);
} | @Test
public void testToFloat_String_float() {
System.out.println("toFloat");
float expResult;
float result;
Properties props = new Properties();
props.put("value1", "12345.6789");
props.put("value2", "-9000.001");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = 12345.6789f;
result = instance.toFloat("value1", 0.123f);
assertEquals(expResult, result, 0);
expResult = -9000.001f;
result = instance.toFloat("value2", 0.123f);
assertEquals(expResult, result, 0);
expResult = 0.123f;
result = instance.toFloat("empty", 0.123f);
assertEquals(expResult, result, 0);
expResult = 0.123f;
result = instance.toFloat("str", 0.123f);
assertEquals(expResult, result, 0);
expResult = 0.123f;
result = instance.toFloat("boolean", 0.123f);
assertEquals(expResult, result, 0);
expResult = 24.98f;
result = instance.toFloat("float", 0.123f);
assertEquals(expResult, result, 0);
expResult = 12;
result = instance.toFloat("int", 0.123f);
assertEquals(expResult, result, 0);
expResult = 0.123f;
result = instance.toFloat("char", 0.123f);
assertEquals(expResult, result, 0);
expResult = 0.123f;
result = instance.toFloat("nonexistent", 0.123f);
assertEquals(expResult, result, 0);
} |
public abstract IOStreamPair execContainer(ContainerExecContext ctx)
throws ContainerExecutionException; | @Test
public void testExecContainer() throws Exception {
Container container = mock(Container.class);
try {
ContainerExecContext.Builder builder = new ContainerExecContext.Builder();
builder.setUser("foo").setAppId("app1").setContainer(container);
ContainerExecContext ctx = builder.build();
containerExecutor.execContainer(ctx);
} catch (Exception e) {
// socket exception should be thrown immediately, without RPC retries.
Assert.assertTrue(e instanceof ContainerExecutionException);
}
} |
@VisibleForTesting
Entity exportNativeEntity(Output output, EntityDescriptorIds entityDescriptorIds) {
final OutputEntity outputEntity = OutputEntity.create(
ValueReference.of(output.getTitle()),
ValueReference.of(output.getType()),
toReferenceMap(output.getConfiguration())
);
final JsonNode data = objectMapper.convertValue(outputEntity, JsonNode.class);
final Set<Constraint> constraints = versionConstraints(output);
return EntityV1.builder()
.id(ModelId.of(entityDescriptorIds.getOrThrow(output.getId(), ModelTypes.OUTPUT_V1)))
.type(ModelTypes.OUTPUT_V1)
.constraints(ImmutableSet.copyOf(constraints))
.data(data)
.build();
} | @Test
@MongoDBFixtures("OutputFacadeTest.json")
public void exportNativeEntity() throws NotFoundException {
final Output output = outputService.load("5adf239e4b900a0fdb4e5197");
final EntityDescriptor descriptor = EntityDescriptor.create(output.getId(), ModelTypes.OUTPUT_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Entity entity = facade.exportNativeEntity(output, entityDescriptorIds);
assertThat(entity).isInstanceOf(EntityV1.class);
assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null)));
assertThat(entity.type()).isEqualTo(ModelTypes.OUTPUT_V1);
final EntityV1 entityV1 = (EntityV1) entity;
final OutputEntity outputEntity = objectMapper.convertValue(entityV1.data(), OutputEntity.class);
assertThat(outputEntity.title()).isEqualTo(ValueReference.of("STDOUT"));
assertThat(outputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.outputs.LoggingOutput"));
assertThat(outputEntity.configuration()).containsEntry("prefix", ValueReference.of("Writing message: "));
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
try {
Alarm alarm = JacksonUtil.fromString(msg.getData(), Alarm.class);
Objects.requireNonNull(alarm, "alarm is null");
ListenableFuture<Alarm> latest = ctx.getAlarmService().findAlarmByIdAsync(ctx.getTenantId(), alarm.getId());
Futures.addCallback(latest, new FutureCallback<>() {
@Override
public void onSuccess(@Nullable Alarm result) {
if (result == null) {
ctx.tellFailure(msg, new TbNodeException("No such alarm found."));
return;
}
boolean isPresent = config.getAlarmStatusList().stream()
.anyMatch(alarmStatus -> result.getStatus() == alarmStatus);
ctx.tellNext(msg, isPresent ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
}
@Override
public void onFailure(Throwable t) {
ctx.tellFailure(msg, t);
}
}, ctx.getDbCallbackExecutor());
} catch (Exception e) {
if (e instanceof IllegalArgumentException || e instanceof NullPointerException) {
log.debug("[{}][{}] Failed to parse alarm: [{}] error [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e.getMessage());
} else {
log.error("[{}][{}] Failed to parse alarm: [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e);
}
throw new TbNodeException(e);
}
} | @Test
void givenDeletedAlarm_whenOnMsg_then_Failure() throws TbNodeException {
// GIVEN
var alarm = new Alarm();
alarm.setId(ALARM_ID);
alarm.setOriginator(DEVICE_ID);
alarm.setType("General Alarm");
alarm.setCleared(true);
String msgData = JacksonUtil.toString(alarm);
TbMsg msg = getTbMsg(msgData);
when(alarmService.findAlarmByIdAsync(TENANT_ID, ALARM_ID)).thenReturn(Futures.immediateFuture(null));
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
ArgumentCaptor<Throwable> throwableCaptor = ArgumentCaptor.forClass(Throwable.class);
verify(ctx, times(1)).tellFailure(newMsgCaptor.capture(), throwableCaptor.capture());
verify(ctx, never()).tellSuccess(any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
Throwable value = throwableCaptor.getValue();
assertThat(value).isInstanceOf(TbNodeException.class).hasMessage("No such alarm found.");
} |
static long sizeOf(Mutation m) {
if (m.getOperation() == Mutation.Op.DELETE) {
return sizeOf(m.getKeySet());
}
long result = 0;
for (Value v : m.getValues()) {
switch (v.getType().getCode()) {
case ARRAY:
result += estimateArrayValue(v);
break;
case STRUCT:
throw new IllegalArgumentException("Structs are not supported in mutation.");
default:
result += estimatePrimitiveValue(v);
}
}
return result;
} | @Test
public void dates() throws Exception {
Mutation timestamp =
Mutation.newInsertOrUpdateBuilder("test").set("one").to(Timestamp.now()).build();
Mutation nullTimestamp =
Mutation.newInsertOrUpdateBuilder("test").set("one").to((Timestamp) null).build();
Mutation date =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(Date.fromYearMonthDay(2017, 10, 10))
.build();
Mutation nullDate =
Mutation.newInsertOrUpdateBuilder("test").set("one").to((Date) null).build();
Mutation timestampArray =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.toTimestampArray(Arrays.asList(Timestamp.now(), null))
.build();
Mutation dateArray =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.toDateArray(
Arrays.asList(
null,
Date.fromYearMonthDay(2017, 1, 1),
null,
Date.fromYearMonthDay(2017, 1, 2)))
.build();
Mutation nullTimestampArray =
Mutation.newInsertOrUpdateBuilder("test").set("one").toTimestampArray(null).build();
Mutation nullDateArray =
Mutation.newInsertOrUpdateBuilder("test").set("one").toDateArray(null).build();
Mutation deleteTimestamp =
Mutation.delete("test", Key.of(Timestamp.parseTimestamp("2077-10-15T00:00:00Z")));
Mutation deleteDate = Mutation.delete("test", Key.of(Date.fromYearMonthDay(2017, 1, 1)));
assertThat(MutationSizeEstimator.sizeOf(timestamp), is(12L));
assertThat(MutationSizeEstimator.sizeOf(date), is(12L));
assertThat(MutationSizeEstimator.sizeOf(nullTimestamp), is(12L));
assertThat(MutationSizeEstimator.sizeOf(nullDate), is(12L));
assertThat(MutationSizeEstimator.sizeOf(timestampArray), is(24L));
assertThat(MutationSizeEstimator.sizeOf(dateArray), is(48L));
assertThat(MutationSizeEstimator.sizeOf(nullTimestampArray), is(0L));
assertThat(MutationSizeEstimator.sizeOf(nullDateArray), is(0L));
assertThat(MutationSizeEstimator.sizeOf(deleteTimestamp), is(12L));
assertThat(MutationSizeEstimator.sizeOf(deleteDate), is(12L));
} |
@Override
public PipelineState getLatestCheckpointByJobIdAndPipelineId(String jobId, String pipelineId)
throws CheckpointStorageException {
String parentPath = getStorageParentDirectory() + jobId;
Collection<File> fileList = new ArrayList<>();
try {
fileList = FileUtils.listFiles(new File(parentPath), FILE_EXTENSIONS, false);
} catch (Exception e) {
if (!(e.getCause() instanceof NoSuchFileException)) {
throw new CheckpointStorageException(ExceptionUtils.getMessage(e));
}
}
if (fileList.isEmpty()) {
log.info("No checkpoint found for job, job id is: " + jobId);
return null;
}
List<String> fileNames = fileList.stream().map(File::getName).collect(Collectors.toList());
String latestFileName =
getLatestCheckpointFileNameByJobIdAndPipelineId(fileNames, pipelineId);
AtomicReference<PipelineState> latestFile = new AtomicReference<>(null);
fileList.forEach(
file -> {
String fileName = file.getName();
if (fileName.equals(latestFileName)) {
try {
byte[] data = FileUtils.readFileToByteArray(file);
latestFile.set(deserializeCheckPointData(data));
} catch (IOException e) {
log.error(
"read checkpoint data from file " + file.getAbsolutePath(), e);
}
}
});
if (latestFile.get() == null) {
log.info(
"No checkpoint found for this job, the job id is: "
+ jobId
+ ", pipeline id is: "
+ pipelineId);
return null;
}
return latestFile.get();
} | @Test
public void testGetLatestCheckpointByJobIdAndPipelineId() throws CheckpointStorageException {
PipelineState state = STORAGE.getLatestCheckpointByJobIdAndPipelineId(JOB_ID, "1");
Assertions.assertEquals(2, state.getCheckpointId());
} |
@Override
public void close() {
nameResolver.close();
} | @Test
public void testCloseDelegates() {
@SuppressWarnings("unchecked")
NameResolver<InetAddress> nameResolver = mock(NameResolver.class);
InetSocketAddressResolver resolver = new InetSocketAddressResolver(
ImmediateEventExecutor.INSTANCE, nameResolver);
resolver.close();
verify(nameResolver, times(1)).close();
} |
DataTableType lookupTableTypeByType(Type type) {
return lookupTableTypeByType(type, Function.identity());
} | @Test
void null_integer_transformed_to_null() {
DataTableTypeRegistry registry = new DataTableTypeRegistry(Locale.ENGLISH);
DataTableType dataTableType = registry.lookupTableTypeByType(LIST_OF_LIST_OF_INTEGER);
assertEquals(
singletonList(singletonList(null)),
dataTableType.transform(singletonList(singletonList(null))));
} |
@Override
public Collection<ParameterRewriter> getParameterRewriters() {
Collection<ParameterRewriter> result = new LinkedList<>();
addParameterRewriter(result, new GeneratedKeyInsertValueParameterRewriter());
addParameterRewriter(result, new ShardingPaginationParameterRewriter());
return result;
} | @Test
void assertGetParameterRewritersWhenPaginationIsNotNeedRewrite() {
RouteContext routeContext = mock(RouteContext.class);
when(routeContext.isSingleRouting()).thenReturn(true);
SelectStatementContext statementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS);
when(statementContext.getPaginationContext().isHasPagination()).thenReturn(true);
assertTrue(new ShardingParameterRewriterBuilder(routeContext, Collections.singletonMap("test", mock(ShardingSphereSchema.class)), statementContext).getParameterRewriters().isEmpty());
} |
@Override
public V get()
throws InterruptedException, ExecutionException {
try {
return get(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (TimeoutException e) {
throw new ExecutionException(e);
}
} | @Test
public void completeDelegate_getWithTimeout_delegateAsked() throws Exception {
delegateFuture.run();
assertEquals(DELEGATE_RESULT, delegateFuture.get(10, TimeUnit.MILLISECONDS));
} |
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
} | @Test
void testKerberosKeytabSecret() throws IOException {
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
final Secret resultSecret =
(Secret)
this.kubernetesJobManagerSpecification.getAccompanyingResources().stream()
.filter(
x ->
x instanceof Secret
&& x.getMetadata()
.getName()
.equals(
KerberosMountDecorator
.getKerberosKeytabSecretName(
CLUSTER_ID)))
.collect(Collectors.toList())
.get(0);
final Map<String, String> resultDatas = resultSecret.getData();
assertThat(resultDatas).hasSize(1);
assertThat(resultDatas.get(KEYTAB_FILE))
.isEqualTo(Base64.getEncoder().encodeToString("some keytab".getBytes()));
} |
public void ready() {
sync.releaseShared(UNUSED);
} | @Test
public void testStartingGunInterruptions() throws InterruptedException {
StartingGun sg = new StartingGun();
Thread[] threads = startWaitingThreads(sg);
Thread.sleep(PAUSE);
allThreadsAlive(threads);
for (Thread thread : threads) thread.interrupt();
Thread.sleep(PAUSE);
sg.ready();
allThreadsDead(threads);
} |
@Override
public String getRootLoggerName() {
return ROOT_LOGGER_NAME;
} | @Test
public void getRootLoggerName_returns_rootLogger() {
assertThat(newLog4JPropertiesBuilder().getRootLoggerName()).isEqualTo("rootLogger");
} |
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Only outputting the MIME type as metadata
metadata.set(Metadata.CONTENT_TYPE, ENVI_MIME_TYPE);
// The following code was taken from the TXTParser
// Automatically detect the character encoding
try (AutoDetectReader reader = new AutoDetectReader(CloseShieldInputStream.wrap(stream),
metadata, getEncodingDetector(context))) {
Charset charset = reader.getCharset();
// deprecated, see TIKA-431
metadata.set(Metadata.CONTENT_ENCODING, charset.name());
xhtml = new XHTMLContentHandler(handler, metadata);
xhtml.startDocument();
readLines(reader, metadata);
xhtml.endDocument();
} catch (IOException | TikaException e) {
LOG.error("Error reading input data stream.", e);
}
} | @Test
public void testParseGlobalMetadata() throws Exception {
try (InputStream stream = EnviHeaderParser.class
.getResourceAsStream("/test-documents/envi_test_header.hdr")) {
assertNotNull(stream, "Test ENVI file 'envi_test_header.hdr' not found");
parser.parse(stream, handler, metadata, new ParseContext());
}
// Check content of test file
String content = handler.toString();
assertContains("<body><p>ENVI</p>", content);
assertContains("<p>samples = 2400</p>", content);
assertContains("<p>lines = 2400</p>", content);
assertContains(
"<p>map info = {Sinusoidal, 1.5000, 1.5000, -10007091.3643, " +
"5559289.2856, 4.6331271653e+02, 4.6331271653e+02, , units=Meters}</p>",
content);
assertContains("content=\"application/envi.hdr\"", content);
assertContains(
"projection info = {16, 6371007.2, 0.000000, 0.0, 0.0, Sinusoidal, units=Meters}",
content);
} |
public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException {
meta = (WriteToLogMeta) smi;
data = (WriteToLogData) sdi;
Object[] r = getRow(); // get row, set busy!
if ( r == null ) { // no more input to be expected...
setOutputDone();
return false;
}
// Limit hit? skip
if ( rowCounterLimitHit ) {
putRow( getInputRowMeta(), r ); // copy row to output
return true;
}
if ( first ) {
first = false;
if ( meta.getFieldName() != null && meta.getFieldName().length > 0 ) {
data.fieldnrs = new int[meta.getFieldName().length];
for ( int i = 0; i < data.fieldnrs.length; i++ ) {
data.fieldnrs[i] = getInputRowMeta().indexOfValue( meta.getFieldName()[i] );
if ( data.fieldnrs[i] < 0 ) {
logError( BaseMessages.getString( PKG, "WriteToLog.Log.CanNotFindField", meta.getFieldName()[i] ) );
throw new KettleException( BaseMessages.getString( PKG, "WriteToLog.Log.CanNotFindField", meta
.getFieldName()[i] ) );
}
}
} else {
data.fieldnrs = new int[getInputRowMeta().size()];
for ( int i = 0; i < data.fieldnrs.length; i++ ) {
data.fieldnrs[i] = i;
}
}
data.fieldnr = data.fieldnrs.length;
data.loglevel = meta.getLogLevelByDesc();
data.logmessage = Const.NVL( this.environmentSubstitute( meta.getLogMessage() ), "" );
if ( !Utils.isEmpty( data.logmessage ) ) {
data.logmessage += Const.CR + Const.CR;
}
} // end if first
// We don't need to calculate if step log level is lower than the run log level
if ( getLogLevel().getLevel() >= data.loglevel.getLevel() ) {
StringBuilder out = new StringBuilder();
out.append( Const.CR ).append( "------------> " )
.append( BaseMessages.getString(
PKG, "WriteToLog.Log.NLigne", "" + getLinesRead() ) )
.append( "------------------------------" )
.append( Const.CR );
out.append( getRealLogMessage() );
String[] fieldNames = {};
// Obtaining the field name list is a heavy operation, as so, it was removed from the loop.
// And, as it's only needed if the header is to be printed, I conditioned the calculation to that scenario.
if ( meta.isDisplayHeader() ) {
fieldNames = getInputRowMeta().getFieldNames();
}
// Loop through fields
for ( int i = 0; i < data.fieldnr; i++ ) {
String fieldValue = getInputRowMeta().getString( r, data.fieldnrs[ i ] );
if ( meta.isDisplayHeader() ) {
out.append( fieldNames[ data.fieldnrs[ i ] ] ).append( " = " );
}
out.append( fieldValue ).append( Const.CR );
}
out.append( Const.CR ).append( "====================" );
setLog( data.loglevel, out );
}
// Increment counter
if ( meta.isLimitRows() && ++rowCounter >= meta.getLimitRowsNumber() ) {
rowCounterLimitHit = true;
}
putRow( getInputRowMeta(), r ); // copy row to output
return true;
} | @Test
public void processRow_NullRow() throws Exception {
WriteToLog writeToLogSpy = spy( writeToLog );
doReturn( null ).when( writeToLogSpy ).getRow();
WriteToLogMeta meta = mock( WriteToLogMeta.class );
WriteToLogData data = mock( WriteToLogData.class );
assertFalse( writeToLogSpy.processRow( meta, data ) );
verify( writeToLogSpy, times( 0 ) ).getInputRowMeta();
verify( writeToLogSpy, times( 0 ) ).getLogLevel();
} |
static byte eightBitCharacter(final String asciiCharacter)
{
Verify.notNull(asciiCharacter, "asciiCharacter");
final byte[] bytes = asciiCharacter.getBytes(StandardCharsets.US_ASCII);
if (bytes.length != 1)
{
throw new IllegalArgumentException(
"String value `" + asciiCharacter + "` did not fit into a single 8-bit character");
}
return bytes[0];
} | @Test
void emptyParamToEightBitCharacterThrowsIAE()
{
assertThrows(IllegalArgumentException.class, () -> RustUtil.eightBitCharacter(""));
} |
@Override
public boolean addAggrConfigInfo(final String dataId, final String group, String tenant, final String datumId,
String appName, final String content) {
String appNameTmp = StringUtils.isBlank(appName) ? StringUtils.EMPTY : appName;
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
final Timestamp now = new Timestamp(System.currentTimeMillis());
ConfigInfoAggrMapper configInfoAggrMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_AGGR);
String select = configInfoAggrMapper.select(Collections.singletonList("content"),
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id"));
String insert = configInfoAggrMapper.insert(
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id", "app_name", "content", "gmt_modified"));
String update = configInfoAggrMapper.update(Arrays.asList("content", "gmt_modified"),
Arrays.asList("data_id", "group_id", "tenant_id", "datum_id"));
try {
try {
String dbContent = jt.queryForObject(select, new Object[] {dataId, group, tenantTmp, datumId},
String.class);
if (dbContent != null && dbContent.equals(content)) {
return true;
} else {
return jt.update(update, content, now, dataId, group, tenantTmp, datumId) > 0;
}
} catch (EmptyResultDataAccessException ex) { // no data, insert
return jt.update(insert, dataId, group, tenantTmp, datumId, appNameTmp, content, now) > 0;
}
} catch (DataAccessException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
} | @Test
void testAddAggrConfigInfoOfException() {
String dataId = "dataId111";
String group = "group";
String tenant = "tenant";
String datumId = "datumId";
String appName = "appname1234";
String content = "content1234";
//mock query datumId and throw EmptyResultDataAccessException.
when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, datumId}), eq(String.class))).thenThrow(
new CannotGetJdbcConnectionException("mock exp"));
try {
externalConfigInfoAggrPersistService.addAggrConfigInfo(dataId, group, tenant, datumId, appName, content);
assertTrue(false);
} catch (Exception exp) {
assertEquals("mock exp", exp.getMessage());
}
} |
public static String renderTemplate(String template, Map<String, Object> newContext)
throws IOException, ClassNotFoundException {
Map<String, Object> contextMap = getDefaultContextMap();
contextMap.putAll(newContext);
String templateRendered = GROOVY_TEMPLATE_ENGINE.createTemplate(template).make(contextMap).toString();
GROOVY_SHELL.resetLoadedClasses();
return templateRendered;
} | @Test
public void testRenderTemplateWithGivenContextMap()
throws IOException, ClassNotFoundException {
Map<String, Object> contextMap = new HashMap<>();
contextMap.put("first_date_2020", "2020-01-01");
contextMap.put("name", "xiang");
contextMap.put("ts", 1577836800);
contextMap.put("yyyy", "2020");
contextMap.put("YYYY", "1919");
contextMap.put("MM", "05");
contextMap.put("dd", "06");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("$first_date_2020", contextMap), "2020-01-01");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("${first_date_2020}", contextMap), "2020-01-01");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("${ name }", contextMap), "xiang");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("${ ts }", contextMap), "1577836800");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("/var/rawdata/${ yyyy }/${ MM }/${ dd }", contextMap),
"/var/rawdata/2020/05/06");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("/var/rawdata/${yyyy}/${MM}/${dd}", contextMap),
"/var/rawdata/2020/05/06");
Assert.assertEquals(GroovyTemplateUtils.renderTemplate("/var/rawdata/${YYYY}/${MM}/${dd}", contextMap),
"/var/rawdata/1919/05/06");
} |
public boolean addMetadataString(String rawMetadata) throws UnmarshallingException {
InputStream inputStream = new ByteArrayInputStream(rawMetadata.getBytes(UTF_8));
XMLObject metadata = super.unmarshallMetadata(inputStream);
if (!isValid(metadata)) {
return false;
}
if (metadata instanceof EntitiesDescriptor) {
this.entitiesDescriptor = (EntitiesDescriptor) metadata;
}
if (metadata instanceof EntityDescriptor) {
this.entityDescriptor = (EntityDescriptor) metadata;
}
return true;
} | @Test
public void addInvalidMetadataStringTest() throws UnmarshallingException {
assertFalse(stringMetadataResolver.addMetadataString(invalidMetadata));
} |
@Override
public ExecuteContext before(ExecuteContext context) {
RequestTag requestTag = ThreadLocalUtils.getRequestTag();
if (requestTag != null) {
requestTag.getTag().forEach((key, value) ->
RequestContext.getCurrentContext().addZuulRequestHeader(key, value.get(0)));
}
return context;
} | @Test
public void testBefore() {
// RequestTag is null
interceptor.before(null);
Assert.assertEquals(0, RequestContext.getCurrentContext().getZuulRequestHeaders().size());
// rRequestTag is not null
ThreadLocalUtils.addRequestTag(Collections.singletonMap("bar", Collections.singletonList("foo")));
interceptor.before(null);
Assert.assertEquals(1, RequestContext.getCurrentContext().getZuulRequestHeaders().size());
Assert.assertEquals("foo", RequestContext.getCurrentContext().getZuulRequestHeaders().get("bar"));
} |
public void showPerspective( final String perspectiveId ) {
changePerspectiveVisibility( perspectiveId, false );
} | @Test
public void showPerspective() {
SpoonPerspectiveManager.PerspectiveManager perspectiveManager = perspectiveManagerMap.get( perspective );
spoonPerspectiveManager.showPerspective( perspective.getId() );
verify( perspectiveManager ).setPerspectiveHidden( PERSPECTIVE_NAME, false );
} |
@Override
public void updateNextNode(int level, long node) {
Preconditions.checkArgument(
level >= 0 && level <= topLevel,
"invalid level " + level + " current top level is " + topLevel);
if (level == 0) {
nextNode = node;
} else {
levelIndex[level - 1] = node;
}
} | @Test
void testUpdateNextNode() {
// test update next node of level 0
int level = 0;
long node1 = random.nextLong(Long.MAX_VALUE);
heapHeadIndex.updateNextNode(level, node1);
assertThat(heapHeadIndex.getNextNode(level)).isEqualTo(node1);
// Increase one level and make sure everything still works
heapHeadIndex.updateLevel(++level);
long node2 = random.nextLong(Long.MAX_VALUE);
heapHeadIndex.updateNextNode(level, node2);
assertThat(heapHeadIndex.getNextNode(level)).isEqualTo(node2);
assertThat(heapHeadIndex.getNextNode(level - 1)).isEqualTo(node1);
} |
public boolean filter(Message message) {
if (!enabled) {
return false;
}
List<String> ipFields = getIpAddressFields(message);
for (String key : ipFields) {
Object fieldValue = message.getField(key);
final InetAddress address = getValidRoutableInetAddress(fieldValue);
if (address == null) {
continue;
}
// For reserved IPs just mark as reserved. Otherwise, enforce Graylog schema on only relevant IP fields
// or add legacy fields on all IP fields in the message if enforcement is disabled.
final String prefix = enforceGraylogSchema ? ipAddressFields.getOrDefault(key, key) : key;
if (ReservedIpChecker.getInstance().isReservedIpAddress(address.getHostAddress())) {
message.addField(prefix + "_reserved_ip", true);
} else if (enforceGraylogSchema) {
addGIMGeoIpDataIfPresent(message, address, prefix);
} else {
addLegacyGeoIpDataIfPresent(message, address, prefix);
}
}
return true;
} | @Test
public void testFilterIpInfo() {
when(ipInfoAsnResolver.isEnabled()).thenReturn(true);
when(ipInfoAsnResolver.getGeoIpData(publicIp)).thenReturn(Optional.of(ipInfoAsnInfo));
when(ipInfoCityResolver.isEnabled()).thenReturn(true);
when(ipInfoCityResolver.getGeoIpData(publicIp)).thenReturn(Optional.of(ipInfoLocationInfo));
when(geoIpVendorResolverService.createCityResolver(any(GeoIpResolverConfig.class), any(Timer.class)))
.thenReturn(ipInfoCityResolver);
when(geoIpVendorResolverService.createAsnResolver(any(GeoIpResolverConfig.class), any(Timer.class)))
.thenReturn(ipInfoAsnResolver);
GeoIpResolverConfig conf = config.toBuilder()
.databaseVendorType(DatabaseVendorType.IPINFO)
.build();
final GeoIpResolverEngine engine = new GeoIpResolverEngine(geoIpVendorResolverService, conf, s3GeoIpFileService, metricRegistry);
Map<String, Object> fields = new HashMap<>();
fields.put("_id", java.util.UUID.randomUUID().toString());
fields.put("source_ip", publicIp.getHostAddress());
Message message = messageFactory.createMessage(fields);
engine.filter(message);
String expectedGeoName = ipInfoLocationInfo.cityName() + ", " + ipInfoLocationInfo.countryIsoCode();
Assertions.assertEquals(expectedGeoName, message.getField("source_geo_name"));
Assertions.assertEquals(ipInfoLocationInfo.region(), message.getField("source_geo_region"));
Assertions.assertEquals(ipInfoLocationInfo.cityName(), message.getField("source_geo_city"));
Assertions.assertEquals(ipInfoLocationInfo.timeZone(), message.getField("source_geo_timezone"));
Assertions.assertFalse(message.hasField("source_geo_country"));
Assertions.assertEquals(ipInfoLocationInfo.countryIsoCode(), message.getField("source_geo_country_iso"));
Assertions.assertEquals(ipInfoAsnInfo.organization(), message.getField("source_as_organization"));
Assertions.assertEquals(ipInfoAsnInfo.asn(), message.getField("source_as_number"));
} |
@SuppressWarnings("unchecked")
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
NodeId nodeId = request.getNodeId();
String host = nodeId.getHost();
int cmPort = nodeId.getPort();
int httpPort = request.getHttpPort();
Resource capability = request.getResource();
String nodeManagerVersion = request.getNMVersion();
Resource physicalResource = request.getPhysicalResource();
NodeStatus nodeStatus = request.getNodeStatus();
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
if (!minimumNodeManagerVersion.equals("NONE")) {
if (minimumNodeManagerVersion.equals("EqualToRM")) {
minimumNodeManagerVersion = YarnVersionInfo.getVersion();
}
if ((nodeManagerVersion == null) ||
(VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) {
String message =
"Disallowed NodeManager Version " + nodeManagerVersion
+ ", is less than the minimum version "
+ minimumNodeManagerVersion + " sending SHUTDOWN signal to "
+ "NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
if (checkIpHostnameInRegistration) {
InetSocketAddress nmAddress =
NetUtils.createSocketAddrForHost(host, cmPort);
InetAddress inetAddress = Server.getRemoteIp();
if (inetAddress != null && nmAddress.isUnresolved()) {
// Reject registration of unresolved nm to prevent resourcemanager
// getting stuck at allocations.
final String message =
"hostname cannot be resolved (ip=" + inetAddress.getHostAddress()
+ ", hostname=" + host + ")";
LOG.warn("Unresolved nodemanager registration: " + message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
// Check if this node is a 'valid' node
if (!this.nodesListManager.isValidNode(host) &&
!isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager from " + host
+ ", Sending SHUTDOWN signal to the NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
// check if node's capacity is load from dynamic-resources.xml
String nid = nodeId.toString();
Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
if (dynamicLoadCapability != null) {
LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to"
+ " settings in dynamic-resources.xml.", nid, capability,
dynamicLoadCapability);
capability = dynamicLoadCapability;
// sync back with new resource.
response.setResource(capability);
}
// Check if this node has minimum allocations
if (capability.getMemorySize() < minAllocMb
|| capability.getVirtualCores() < minAllocVcores) {
String message = "NodeManager from " + host
+ " doesn't satisfy minimum allocations, Sending SHUTDOWN"
+ " signal to the NodeManager. Node capabilities are " + capability
+ "; minimums are " + minAllocMb + "mb and " + minAllocVcores
+ " vcores";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
response.setContainerTokenMasterKey(containerTokenSecretManager
.getCurrentKey());
response.setNMTokenMasterKey(nmTokenSecretManager
.getCurrentKey());
RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort,
resolve(host), capability, nodeManagerVersion, physicalResource);
RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
if (oldNode == null) {
RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId,
request.getNMContainerStatuses(),
request.getRunningApplications(), nodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found the number of previous cached log aggregation "
+ "status from nodemanager:" + nodeId + " is :"
+ request.getLogAggregationReportsForApps().size());
}
startEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(
startEvent);
} else {
LOG.info("Reconnect from the node at: " + host);
this.nmLivelinessMonitor.unregister(nodeId);
if (CollectionUtils.isEmpty(request.getRunningApplications())
&& rmNode.getState() != NodeState.DECOMMISSIONING
&& rmNode.getHttpPort() != oldNode.getHttpPort()) {
// Reconnected node differs, so replace old node and start new node
switch (rmNode.getState()) {
case RUNNING:
ClusterMetrics.getMetrics().decrNumActiveNodes();
break;
case UNHEALTHY:
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected Rmnode state");
}
this.rmContext.getDispatcher().getEventHandler()
.handle(new NodeRemovedSchedulerEvent(rmNode));
this.rmContext.getRMNodes().put(nodeId, rmNode);
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus));
} else {
// Reset heartbeat ID since node just restarted.
oldNode.resetLastNodeHeartBeatResponse();
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeReconnectEvent(nodeId, rmNode,
request.getRunningApplications(),
request.getNMContainerStatuses()));
}
}
// On every node manager register we will be clearing NMToken keys if
// present for any running application.
this.nmTokenSecretManager.removeNodeKey(nodeId);
this.nmLivelinessMonitor.register(nodeId);
// Handle received container status, this should be processed after new
// RMNode inserted
if (!rmContext.isWorkPreservingRecoveryEnabled()) {
if (!request.getNMContainerStatuses().isEmpty()) {
LOG.info("received container statuses on node manager register :"
+ request.getNMContainerStatuses());
for (NMContainerStatus status : request.getNMContainerStatuses()) {
handleNMContainerStatus(status, nodeId);
}
}
}
// Update node's labels to RM's NodeLabelManager.
Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet(
request.getNodeLabels());
if (isDistributedNodeLabelsConf && nodeLabels != null) {
try {
updateNodeLabelsFromNMReport(nodeLabels, nodeId);
response.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
// Ensure the exception is captured in the response
response.setDiagnosticsMessage(ex.getMessage());
response.setAreNodeLabelsAcceptedByRM(false);
}
} else if (isDelegatedCentralizedNodeLabelsConf) {
this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId);
}
// Update node's attributes to RM's NodeAttributesManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
response.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg = response.getDiagnosticsMessage() == null ?
ex.getMessage() :
response.getDiagnosticsMessage() + "\n" + ex.getMessage();
response.setDiagnosticsMessage(errorMsg);
response.setAreNodeAttributesAcceptedByRM(false);
}
}
StringBuilder message = new StringBuilder();
message.append("NodeManager from node ").append(host).append("(cmPort: ")
.append(cmPort).append(" httpPort: ");
message.append(httpPort).append(") ")
.append("registered with capability: ").append(capability);
message.append(", assigned nodeId ").append(nodeId);
if (response.getAreNodeLabelsAcceptedByRM()) {
message.append(", node labels { ").append(
StringUtils.join(",", nodeLabels) + " } ");
}
if (response.getAreNodeAttributesAcceptedByRM()) {
message.append(", node attributes { ")
.append(request.getNodeAttributes() + " } ");
}
LOG.info(message.toString());
response.setNodeAction(NodeAction.NORMAL);
response.setRMIdentifier(ResourceManager.getClusterTimeStamp());
response.setRMVersion(YarnVersionInfo.getVersion());
return response;
} | @Test
public void testNodeRegistrationWithInvalidLabels() throws Exception {
writeToHostsFile("host2");
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
hostFile.getAbsolutePath());
conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
final RMNodeLabelsManager nodeLabelsMgr = new NullRMNodeLabelsManager();
rm = new MockRM(conf) {
@Override
protected RMNodeLabelsManager createNodeLabelManager() {
return nodeLabelsMgr;
}
};
rm.start();
try {
nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("X", "Y", "Z"));
} catch (IOException e) {
Assert.fail("Caught Exception while initializing");
e.printStackTrace();
}
ResourceTrackerService resourceTrackerService =
rm.getResourceTrackerService();
RegisterNodeManagerRequest registerReq =
Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId = NodeId.newInstance("host2", 1234);
Resource capability = Resources.createResource(1024);
registerReq.setResource(capability);
registerReq.setNodeId(nodeId);
registerReq.setHttpPort(1234);
registerReq.setNMVersion(YarnVersionInfo.getVersion());
registerReq.setNodeLabels(toNodeLabelSet("A", "B", "C"));
RegisterNodeManagerResponse response =
resourceTrackerService.registerNodeManager(registerReq);
Assert.assertEquals(
"On Invalid Node Labels action is expected to be normal",
NodeAction.NORMAL, response.getNodeAction());
Assert.assertNull(nodeLabelsMgr.getNodeLabels().get(nodeId));
Assert.assertNotNull(response.getDiagnosticsMessage());
Assert.assertFalse("Node Labels should not accepted by RM If Invalid",
response.getAreNodeLabelsAcceptedByRM());
if (rm != null) {
rm.stop();
}
} |
@Override
public void error(String code, String cause, String extendedInformation, String msg) {
if (getDisabled()) {
return;
}
try {
getLogger().error(appendContextMessageWithInstructions(code, cause, extendedInformation, msg));
} catch (Throwable t) {
// ignored.
}
} | @Test
void testInstructionShownOrNot() {
LoggerFactory.setLoggerAdapter(FrameworkModel.defaultModel(), "jdk");
ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(FailsafeErrorTypeAwareLoggerTest.class);
logger.error(
REGISTRY_ADDRESS_INVALID,
"Registry center",
"May be it's offline.",
"error message",
new Exception("error"));
logger.error(
REGISTRY_ADDRESS_INVALID,
"Registry center",
"May be it's offline.",
"error message",
new Exception("error"));
} |
@Override
public CompletableFuture<SchemaVersion> putSchemaIfAbsent(String schemaId,
SchemaData schema,
SchemaCompatibilityStrategy strategy) {
try {
SchemaDataValidator.validateSchemaData(schema);
} catch (InvalidSchemaDataException e) {
return FutureUtil.failedFuture(e);
}
return service.putSchemaIfAbsent(schemaId, schema, strategy);
} | @Test
public void testPutSchemaIfAbsentWithGoodSchemaData() {
String schemaId = "test-schema-id";
SchemaCompatibilityStrategy strategy = SchemaCompatibilityStrategy.FULL;
CompletableFuture<SchemaVersion> future = new CompletableFuture<>();
when(underlyingService.putSchemaIfAbsent(eq(schemaId), any(SchemaData.class), eq(strategy)))
.thenReturn(future);
SchemaData schemaData = SchemaData.builder()
.type(SchemaType.BOOLEAN)
.data(new byte[0])
.build();
assertSame(future, service.putSchemaIfAbsent(schemaId, schemaData, strategy));
verify(underlyingService, times(1))
.putSchemaIfAbsent(eq(schemaId), same(schemaData), eq(strategy));
} |
public void setMinimalWidth() {
int nrNonEmptyFields = wFields.nrNonEmpty();
for ( int i = 0; i < nrNonEmptyFields; i++ ) {
TableItem item = wFields.getNonEmpty( i );
item.setText( 5, "" );
item.setText( 6, "" );
item.setText( 12, ValueMetaString.getTrimTypeDesc( ValueMetaInterface.TRIM_TYPE_BOTH ) );
int type = ValueMetaFactory.getIdForValueMeta( item.getText( 2 ) );
switch ( type ) {
case ValueMetaInterface.TYPE_STRING:
item.setText( 3, "" );
break;
case ValueMetaInterface.TYPE_INTEGER:
item.setText( 3, "0" );
break;
case ValueMetaInterface.TYPE_NUMBER:
item.setText( 3, "0.#####" );
break;
case ValueMetaInterface.TYPE_DATE:
break;
default:
break;
}
}
for ( int i = 0; i < input.inputFields.length; i++ ) {
input.inputFields[i].setTrimType( ValueMetaInterface.TRIM_TYPE_BOTH );
}
wFields.optWidth( true );
} | @Test
public void testMinimalWidth_PDI_14253() throws Exception {
final String virtualFile = "ram://pdi-14253.txt";
KettleVFS.getFileObject( virtualFile ).createFile();
final String content = "r1c1, r1c2\nr2c1 , r2c2 ";
ByteArrayOutputStream bos = new ByteArrayOutputStream();
bos.write( content.getBytes() );
OutputStream os = KettleVFS.getFileObject( virtualFile ).getContent().getOutputStream();
IOUtils.copy( new ByteArrayInputStream( bos.toByteArray() ), os );
os.close();
TextFileInputMeta meta = new TextFileInputMeta();
meta.content.lineWrapped = false;
meta.inputFields = new BaseFileField[]{
new BaseFileField( "col1", -1, -1 ),
new BaseFileField( "col2", -1, -1 )
};
meta.content.fileCompression = "None";
meta.content.fileType = "CSV";
meta.content.header = false;
meta.content.nrHeaderLines = -1;
meta.content.footer = false;
meta.content.nrFooterLines = -1;
TextFileInputData data = new TextFileInputData();
data.files = new FileInputList();
data.files.addFile( KettleVFS.getFileObject( virtualFile ) );
data.outputRowMeta = new RowMeta();
data.outputRowMeta.addValueMeta( new ValueMetaString( "col1" ) );
data.outputRowMeta.addValueMeta( new ValueMetaString( "col2" ) );
data.dataErrorLineHandler = mock( FileErrorHandler.class );
data.fileFormatType = TextFileInputMeta.FILE_FORMAT_UNIX;
data.separator = ",";
data.filterProcessor = new TextFileFilterProcessor( new TextFileFilter[0], new Variables() { } );
data.filePlayList = new FilePlayListAll();
TextFileInputDialog dialog =
new TextFileInputDialog( mock( Shell.class ), meta, mock( TransMeta.class ), "TFIMinimalWidthTest" );
TableView tv = mock( TableView.class );
when( tv.nrNonEmpty() ).thenReturn( 0 );
// click the Minimal width button
dialog.setMinimalWidth( tv );
RowSet output = new BlockingRowSet( 5 );
TextFileInput input = StepMockUtil.getStep( TextFileInput.class, TextFileInputMeta.class, "test" );
input.setOutputRowSets( Collections.singletonList( output ) );
while ( input.processRow( meta, data ) ) {
// wait until the step completes executing
}
Object[] row1 = output.getRowImmediate();
assertRow( row1, "r1c1", "r1c2" );
Object[] row2 = output.getRowImmediate();
assertRow( row2, "r2c1", "r2c2" );
KettleVFS.getFileObject( virtualFile ).delete();
} |
@Override
public TimestampedKeyValueStore<K, V> build() {
KeyValueStore<Bytes, byte[]> store = storeSupplier.get();
if (!(store instanceof TimestampedBytesStore)) {
if (store.persistent()) {
store = new KeyValueToTimestampedKeyValueByteStoreAdapter(store);
} else {
store = new InMemoryTimestampedKeyValueStoreMarker(store);
}
}
return new MeteredTimestampedKeyValueStore<>(
maybeWrapCaching(maybeWrapLogging(store)),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
} | @Test
public void shouldNotHaveChangeLoggingStoreWhenDisabled() {
setUp();
final TimestampedKeyValueStore<String, String> store = builder.withLoggingDisabled().build();
final StateStore next = ((WrappedStateStore) store).wrapped();
assertThat(next, CoreMatchers.equalTo(inner));
} |
@Nonnull
@SuppressFBWarnings(value = "RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE", justification =
"False positive on try-with-resources as of JDK11")
public static Resources resourcesOf(String... packages) {
String[] paths = stream(packages).map(ReflectionUtils::toPath).toArray(String[]::new);
ClassGraph classGraph = new ClassGraph()
.whitelistPackages(packages)
.whitelistPaths(paths)
.ignoreClassVisibility();
try (ScanResult scanResult = classGraph.scan()) {
Collection<ClassResource> classes =
scanResult.getAllClasses().stream().map(ClassResource::new).collect(toList());
Collection<URL> nonClasses = scanResult.getAllResources().nonClassFilesOnly().getURLs();
return new Resources(classes, nonClasses);
}
} | @Test
public void when_resourcesOf_then_returnsAllResources() throws ClassNotFoundException {
// When
Resources resources = ReflectionUtils.resourcesOf(OuterClass.class.getPackage().getName());
// Then
Collection<ClassResource> classes = resources.classes().collect(toList());
assertThat(classes).hasSizeGreaterThan(5);
assertThat(classes).contains(classResource(OuterClass.class));
assertThat(classes).contains(classResource(OuterClass.NestedClass.class));
assertThat(classes).contains(classResource(OuterClass.NestedClass.DeeplyNestedClass.class));
assertThat(classes).contains(classResource(Class.forName(OuterClass.class.getName() + "$1")));
assertThat(classes).contains(
classResource(Class.forName(OuterClass.NestedClass.DeeplyNestedClass.class.getName() + "$1"))
);
List<URL> nonClasses = resources.nonClasses().collect(toList());
assertThat(nonClasses).hasSize(5);
assertThat(nonClasses).map(v -> substringAfterLast(v.toString(), "/"))
.contains("file.json")
.contains("file_list.json")
.contains("file_pretty_printed.json")
.contains("file_list_pretty_printed.json")
.contains("package.properties");
} |
@Override
public void close() {
try {
LOG.debug("Closing JMS connection.");
session.close();
connection.close();
} catch (JMSException e) {
LOG.warn("Error closing JMS connection.", e);
}
} | @Test
public void testSerializability() throws IOException {
JmsSpout spout = new JmsSpout();
ByteArrayOutputStream out = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(out);
oos.writeObject(spout);
oos.close();
assertTrue(out.toByteArray().length > 0);
} |
@Override
public void saveProperty(DbSession session, PropertyDto property, @Nullable String userLogin,
@Nullable String projectKey, @Nullable String projectName, @Nullable String qualifier) {
// do nothing
} | @Test
public void saveProperty() {
underTest.saveProperty(oldPropertyDto);
assertNoInteraction();
} |
public LocalSession getLocalSession(long sessionId) {
return localSessionCache.get(sessionId);
} | @Test
public void require_that_local_sessions_belong_to_a_tenant() throws Exception {
setup();
// tenant is "default"
long firstSessionId = deploy();
long secondSessionId = deploy();
assertNotNull(sessionRepository.getLocalSession(firstSessionId));
assertNotNull(sessionRepository.getLocalSession(secondSessionId));
assertNull(sessionRepository.getLocalSession(secondSessionId + 1));
// tenant is "newTenant"
TenantName newTenant = TenantName.from("newTenant");
tenantRepository.addTenant(newTenant);
long sessionId = deploy(ApplicationId.from(newTenant.value(), "testapp", "default"), appJdiscOnly);
SessionRepository sessionRepository2 = tenantRepository.getTenant(newTenant).getSessionRepository();
assertNotNull(sessionRepository2.getLocalSession(sessionId));
} |
public static PTransformMatcher stateOrTimerParDo() {
return new PTransformMatcher() {
@Override
public boolean matches(AppliedPTransform<?, ?, ?> application) {
if (PTransformTranslation.PAR_DO_TRANSFORM_URN.equals(
PTransformTranslation.urnForTransformOrNull(application.getTransform()))) {
try {
return ParDoTranslation.usesStateOrTimers(application);
} catch (IOException e) {
throw new RuntimeException(
String.format(
"Transform with URN %s could not be translated",
PTransformTranslation.PAR_DO_TRANSFORM_URN),
e);
}
}
return false;
}
@Override
public String toString() {
return MoreObjects.toStringHelper("StateOrTimerParDoMatcher").toString();
}
};
} | @Test
public void parDoWithState() {
AppliedPTransform<?, ?, ?> statefulApplication =
getAppliedTransform(
ParDo.of(doFnWithState).withOutputTags(new TupleTag<>(), TupleTagList.empty()));
assertThat(PTransformMatchers.stateOrTimerParDo().matches(statefulApplication), is(true));
AppliedPTransform<?, ?, ?> splittableApplication =
getAppliedTransform(
ParDo.of(splittableDoFn).withOutputTags(new TupleTag<>(), TupleTagList.empty()));
assertThat(PTransformMatchers.stateOrTimerParDo().matches(splittableApplication), is(false));
} |
static List<InetAddress> filterPreferredAddresses(InetAddress[] allAddresses) {
List<InetAddress> preferredAddresses = new ArrayList<>();
Class<? extends InetAddress> clazz = null;
for (InetAddress address : allAddresses) {
if (clazz == null) {
clazz = address.getClass();
}
if (clazz.isInstance(address)) {
preferredAddresses.add(address);
}
}
return preferredAddresses;
} | @Test
public void testFilterPreferredAddresses() throws UnknownHostException {
InetAddress ipv4 = InetAddress.getByName("192.0.0.1");
InetAddress ipv6 = InetAddress.getByName("::1");
InetAddress[] ipv4First = new InetAddress[]{ipv4, ipv6, ipv4};
List<InetAddress> result = ClientUtils.filterPreferredAddresses(ipv4First);
assertTrue(result.contains(ipv4));
assertFalse(result.contains(ipv6));
assertEquals(2, result.size());
InetAddress[] ipv6First = new InetAddress[]{ipv6, ipv4, ipv4};
result = ClientUtils.filterPreferredAddresses(ipv6First);
assertTrue(result.contains(ipv6));
assertFalse(result.contains(ipv4));
assertEquals(1, result.size());
} |
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) {
//FIXME this is a total hack but it works around a number of issues related to vertical map
//replication and horiztonal replication that can cause polygons to completely disappear when
//panning
if (pZoom < 3)
return true;
boolean latMatch = false;
boolean lonMatch = false;
//vertical wrapping detection
if (pBoundingBox.mLatSouth <= mLatNorth &&
pBoundingBox.mLatSouth >= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//normal case, non overlapping
if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//special case for when *this completely surrounds the pBoundbox
if (mLonWest <= pBoundingBox.mLonWest &&
mLonEast >= pBoundingBox.mLonEast &&
mLatNorth >= pBoundingBox.mLatNorth &&
mLatSouth <= pBoundingBox.mLatSouth)
return true;
//normal case, non overlapping
if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth)
latMatch = true;
if (mLonWest > mLonEast) {
//the date line is included in the bounding box
//we want to match lon from the dateline to the eastern bounds of the box
//and the dateline to the western bounds of the box
if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest)
lonMatch = true;
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast <= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast)
lonMatch = false;
}
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast >= pBoundingBox.mLonEast) {
lonMatch = true;
}
/*
//that is completely within this
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast<= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast )
lonMatch = false;
}
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast>= pBoundingBox.mLonEast) {
lonMatch = true;
}*/
}
return latMatch && lonMatch;
} | @Test
public void testOverlapsDateLine() {
// ________________
// | | |
// |** | **|
// |-*----+-----*-|
// |** | **|
// | | |
// ----------------
//box is notated as *
//test area is notated as ?
BoundingBox box = new BoundingBox(45, -178, -45, 178);
Assert.assertTrue(box.overlaps(box, 4));
// ________________
// | | |
// |** | ?? **|
// |-*----+-----*-|
// |** | **|
// | | |
// ----------------
//box is notated as *
//test area is notated as ?
BoundingBox farAway = new BoundingBox(45, 45, 44, 44);
Assert.assertFalse(box.overlaps(farAway, 4));
// ________________
// | | |
// |** | ? **|
// |-*----+-----*-|
// |** | **|
// | | |
// ----------------
//box is notated as *
//test area is notated as ?
farAway = new BoundingBox(1, 45, 1, 44);
Assert.assertFalse(box.overlaps(farAway, 4));
// ________________
// | | |
// |** ?|? **|
// |-*---?+?----*-|
// |** ?|? **|
// | | |
// ----------------
//box is notated as *
//test area is notated as ?
farAway = new BoundingBox(2, 2, -2, -2);
Assert.assertFalse(box.overlaps(farAway, 4));
farAway = new BoundingBox(0.5, 0.5, -0.5, -0.5);
Assert.assertFalse(box.overlaps(farAway, 4));
farAway = new BoundingBox(1, -179, -1, 179);
Assert.assertTrue(box.overlaps(farAway, 4));
} |
public static String packageNameOfResource(String classpathResourceName) {
Path parent = Paths.get(classpathResourceName).getParent();
if (parent == null) {
return DEFAULT_PACKAGE_NAME;
}
String pathSeparator = parent.getFileSystem().getSeparator();
return parent.toString().replace(pathSeparator, PACKAGE_SEPARATOR_STRING);
} | @Test
void packageNameOfResource() {
String packageName = ClasspathSupport.packageNameOfResource("com/example/app.feature");
assertEquals("com.example", packageName);
} |
public static String generateDatabaseId(String baseString) {
checkArgument(baseString.length() != 0, "baseString cannot be empty!");
String databaseId =
generateResourceId(
baseString,
ILLEGAL_DATABASE_CHARS,
REPLACE_DATABASE_CHAR,
MAX_DATABASE_ID_LENGTH,
DATABASE_TIME_FORMAT);
// replace hyphen with underscore, so there's no need for backticks
String trimmed = CharMatcher.is('_').trimTrailingFrom(databaseId);
checkArgument(
trimmed.length() > 0,
"Database id is empty after removing illegal characters and trailing underscores");
// if first char is not a letter, replace with a padding letter, so it doesn't
// violate spanner's database naming rules
char padding = generatePadding();
if (!Character.isLetter(trimmed.charAt(0))) {
trimmed = padding + trimmed.substring(1);
}
return trimmed;
} | @Test
public void testGenerateDatabaseIdShouldReplaceDotWithUnderscore() {
String testBaseString = "test.database";
String actual = generateDatabaseId(testBaseString);
assertThat(actual).matches("test_da_\\d{8}_\\d{6}_\\d{6}");
} |
@VisibleForTesting
Schema convertSchema(org.apache.avro.Schema schema) {
return Schema.of(getFields(schema));
} | @Test
void convertSchema_lists() {
Field intListField = Field.newBuilder("intList", StandardSQLTypeName.INT64).setMode(Field.Mode.REPEATED).build();
Field requiredDoubleField = Field.newBuilder("requiredDouble", StandardSQLTypeName.FLOAT64)
.setMode(Field.Mode.REQUIRED)
.build();
Field optionalStringField = Field.newBuilder("optionalString", StandardSQLTypeName.STRING)
.setMode(Field.Mode.NULLABLE)
.build();
Field recordListField = Field.newBuilder("recordList", StandardSQLTypeName.STRUCT,
requiredDoubleField, optionalStringField).setMode(Field.Mode.REPEATED).build();
com.google.cloud.bigquery.Schema expected =
com.google.cloud.bigquery.Schema.of(intListField, recordListField);
Assertions.assertEquals(expected, SCHEMA_RESOLVER.convertSchema(LISTS));
} |
@Override
public Collection<String> split(String text) {
return Arrays.asList(text.split(splitPattern));
} | @Test
public void testSplit() {
Collection<String> split = patternSplitStrategy.split("hello" + PATTERN + "world");
assertEquals(2, split.size());
assertEquals("world", new ArrayList<>(split).get(1));
} |
public void appendTimestampToResponse(OutputStream outputStream) throws IOException {
AbstractMessageLite messageLite = protobufObjectGenerator.generateTimestampMessage(system2.now());
messageLite.writeDelimitedTo(outputStream);
} | @Test
public void appendTimestampToResponse_outputStreamIsCalledAtLeastOnce() throws IOException {
OutputStream outputStream = mock(OutputStream.class);
underTest.appendTimestampToResponse(outputStream);
verify(outputStream, atLeastOnce()).write(any(byte[].class), anyInt(), anyInt());
} |
public synchronized void createInstance(List<BigtableResourceManagerCluster> clusters)
throws BigtableResourceManagerException {
// Check to see if instance already exists, and throw error if it does
if (hasInstance) {
LOG.warn(
"Skipping instance creation. Instance was already created or static instance was passed. Reusing : {}.",
instanceId);
return;
}
LOG.info("Creating instance {} in project {}.", instanceId, projectId);
// Create instance request object and add all the given clusters to the request
CreateInstanceRequest request = CreateInstanceRequest.of(instanceId);
for (BigtableResourceManagerCluster cluster : clusters) {
request.addCluster(
cluster.clusterId(), cluster.zone(), cluster.numNodes(), cluster.storageType());
}
// Send the instance request to Google Cloud
try (BigtableInstanceAdminClient instanceAdminClient =
bigtableResourceManagerClientFactory.bigtableInstanceAdminClient()) {
instanceAdminClient.createInstance(request);
} catch (Exception e) {
throw new BigtableResourceManagerException(
"Failed to create instance " + instanceId + ".", e);
}
hasInstance = true;
this.clusters = clusters;
LOG.info("Successfully created instance {}.", instanceId);
} | @Test
public void testCreateInstanceShouldWorkWhenBigtableDoesNotThrowAnyError() {
testManager.createInstance(cluster);
verify(bigtableResourceManagerClientFactory.bigtableInstanceAdminClient())
.createInstance(any());
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof PiMatchKey)) {
return false;
}
PiMatchKey that = (PiMatchKey) o;
return Objects.equal(fieldMatches, that.fieldMatches);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(piMatchKey1, sameAsPiMatchKey1)
.addEqualityGroup(PiMatchKey.EMPTY, PiMatchKey.EMPTY)
.addEqualityGroup(piMatchKey2)
.testEquals();
} |
public byte[] getTail() {
int size = (int) Math.min(tailSize, bytesRead);
byte[] result = new byte[size];
System.arraycopy(tailBuffer, currentIndex, result, 0, size - currentIndex);
System.arraycopy(tailBuffer, 0, result, size - currentIndex, currentIndex);
return result;
} | @Test
public void testTailSingleByteReads() throws IOException {
final int count = 128;
TailStream stream = new TailStream(generateStream(0, 2 * count), count);
readStream(stream);
assertEquals(generateText(count, count), new String(stream.getTail(), UTF_8),
"Wrong buffer");
} |
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final P4PacketMetadataModel other = (P4PacketMetadataModel) obj;
return Objects.equals(this.id, other.id)
&& Objects.equals(this.bitWidth, other.bitWidth);
} | @Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(metadataModel, sameAsMetadataModel)
.addEqualityGroup(metadataModel2)
.addEqualityGroup(metadataModel3)
.testEquals();
} |
@ScalarFunction
@Description("Calculates the great-circle distance between two points on the Earth's surface in kilometers")
@SqlType(DOUBLE)
public static double greatCircleDistance(
@SqlType(DOUBLE) double latitude1,
@SqlType(DOUBLE) double longitude1,
@SqlType(DOUBLE) double latitude2,
@SqlType(DOUBLE) double longitude2)
{
return SphericalGeographyUtils.greatCircleDistance(latitude1, longitude1, latitude2, longitude2);
} | @Test
public void testGreatCircleDistance()
{
assertFunctionWithError("great_circle_distance(36.12, -86.67, 33.94, -118.40)", DOUBLE, 2886.448973436703);
assertFunctionWithError("great_circle_distance(33.94, -118.40, 36.12, -86.67)", DOUBLE, 2886.448973436703);
assertFunctionWithError("great_circle_distance(42.3601, -71.0589, 42.4430, -71.2290)", DOUBLE, 16.73469743457461);
assertFunctionWithError("great_circle_distance(36.12, -86.67, 36.12, -86.67)", DOUBLE, 0.0);
assertInvalidFunction("great_circle_distance(100, 20, 30, 40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(10, 20, 300, 40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(10, 200, 30, 40)", "Longitude must be between -180 and 180");
assertInvalidFunction("great_circle_distance(10, 20, 30, 400)", "Longitude must be between -180 and 180");
assertInvalidFunction("great_circle_distance(nan(), -86.67, 33.94, -118.40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(infinity(), -86.67, 33.94, -118.40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(36.12, nan(), 33.94, -118.40)", "Longitude must be between -180 and 180");
assertInvalidFunction("great_circle_distance(36.12, infinity(), 33.94, -118.40)", "Longitude must be between -180 and 180");
assertInvalidFunction("great_circle_distance(36.12, -86.67, nan(), -118.40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(36.12, -86.67, infinity(), -118.40)", "Latitude must be between -90 and 90");
assertInvalidFunction("great_circle_distance(36.12, -86.67, 33.94, nan())", "Longitude must be between -180 and 180");
assertInvalidFunction("great_circle_distance(36.12, -86.67, 33.94, infinity())", "Longitude must be between -180 and 180");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.