focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void transform(Message message, DataType fromType, DataType toType) {
AvroSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, AvroSchema.class);
if (schema == null) {
throw new CamelExecutionException("Missing proper avro schema for data type processing", message.getExchange());
}
try {
byte[] marshalled;
String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null);
if (contentClass != null) {
Class<?> contentType
= message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass);
marshalled = Avro.mapper().writer().forType(contentType).with(schema)
.writeValueAsBytes(message.getBody());
} else {
marshalled = Avro.mapper().writer().forType(JsonNode.class).with(schema)
.writeValueAsBytes(getBodyAsJsonNode(message, schema));
}
message.setBody(marshalled);
message.setHeader(Exchange.CONTENT_TYPE, MimeType.AVRO_BINARY.type());
message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getAvroSchema().getFullName());
} catch (InvalidPayloadException | IOException | ClassNotFoundException e) {
throw new CamelExecutionException("Failed to apply Avro binary data type on exchange", message.getExchange(), e);
}
} | @Test
void shouldHandleExplicitContentClass() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
AvroSchema avroSchema = getSchema();
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, avroSchema);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
exchange.getMessage().setBody(new Person("Donald", 19));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
JSONAssert.assertEquals("""
{"name":"Donald","age":19}
""", Json.mapper().writeValueAsString(
Avro.mapper().reader().with(avroSchema).readTree(exchange.getMessage().getBody(byte[].class))), true);
} |
public boolean removeNode(String key, String group) {
return zkClient.removeConfig(key, group);
} | @Test
public void testRemoveNode() {
int index = CHILD_ONE_PATH.lastIndexOf("/");
String parentPath = CHILD_ONE_PATH.substring(0, index);
String key = CHILD_ONE_PATH.substring(index + 1);
boolean deleteChileOne = zooKeeperBufferedClient.removeNode(key, parentPath);
Assert.assertTrue(deleteChileOne);
boolean deleteChileTwo = zooKeeperBufferedClient.removeNode(CHILE_TWO_PATh_KEY, CHILE_TWO_PATh_PARENT);
Assert.assertTrue(deleteChileTwo);
boolean deleteParentNode = zooKeeperBufferedClient.removeNode(PARENT_PATH, null);
Assert.assertTrue(deleteParentNode);
} |
@VisibleForTesting
static Estimate calculateDataSize(String column, Collection<PartitionStatistics> partitionStatistics, double totalRowCount)
{
List<PartitionStatistics> statisticsWithKnownRowCountAndDataSize = partitionStatistics.stream()
.filter(statistics -> {
if (!statistics.getBasicStatistics().getRowCount().isPresent()) {
return false;
}
HiveColumnStatistics columnStatistics = statistics.getColumnStatistics().get(column);
if (columnStatistics == null) {
return false;
}
return columnStatistics.getTotalSizeInBytes().isPresent();
})
.collect(toImmutableList());
if (statisticsWithKnownRowCountAndDataSize.isEmpty()) {
return Estimate.unknown();
}
long knownRowCount = 0;
long knownDataSize = 0;
for (PartitionStatistics statistics : statisticsWithKnownRowCountAndDataSize) {
long rowCount = statistics.getBasicStatistics().getRowCount().orElseThrow(() -> new VerifyException("rowCount is not present"));
verify(rowCount >= 0, "rowCount must be greater than or equal to zero");
HiveColumnStatistics columnStatistics = statistics.getColumnStatistics().get(column);
verify(columnStatistics != null, "columnStatistics is null");
long dataSize = columnStatistics.getTotalSizeInBytes().orElseThrow(() -> new VerifyException("totalSizeInBytes is not present"));
verify(dataSize >= 0, "dataSize must be greater than or equal to zero");
knownRowCount += rowCount;
knownDataSize += dataSize;
}
if (totalRowCount == 0) {
return Estimate.zero();
}
if (knownRowCount == 0) {
return Estimate.unknown();
}
double averageValueDataSizeInBytes = ((double) knownDataSize) / knownRowCount;
return Estimate.of(averageValueDataSizeInBytes * totalRowCount);
} | @Test
public void testCalculateDataSize()
{
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(), 0), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(), 1000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(PartitionStatistics.empty()), 1000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(rowsCount(1000)), 1000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(dataSize(1000)), 1000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(dataSize(1000), rowsCount(1000)), 1000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(rowsCountAndDataSize(500, 1000)), 2000), Estimate.of(4000));
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(rowsCountAndDataSize(0, 0)), 2000), Estimate.unknown());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(rowsCountAndDataSize(0, 0)), 0), Estimate.zero());
assertEquals(calculateDataSize(COLUMN, ImmutableList.of(rowsCountAndDataSize(1000, 0)), 2000), Estimate.of(0));
assertEquals(
calculateDataSize(
COLUMN,
ImmutableList.of(
rowsCountAndDataSize(500, 1000),
rowsCountAndDataSize(1000, 5000)),
5000),
Estimate.of(20000));
assertEquals(
calculateDataSize(
COLUMN,
ImmutableList.of(
dataSize(1000),
rowsCountAndDataSize(500, 1000),
rowsCount(3000),
rowsCountAndDataSize(1000, 5000)),
5000),
Estimate.of(20000));
} |
@Override
public void doRegister(URL url) {
try {
checkDestroyed();
zkClient.create(toUrlPath(url), url.getParameter(DYNAMIC_KEY, true), true);
} catch (Throwable e) {
throw new RpcException(
"Failed to register " + url + " to zookeeper " + getUrl() + ", cause: " + e.getMessage(), e);
}
} | @Test
void testDoRegisterWithException() {
Assertions.assertThrows(RpcException.class, () -> {
URL errorUrl = URL.valueOf("multicast://0.0.0.0/");
zookeeperRegistry.doRegister(errorUrl);
});
} |
public static void ensureCorrectArgs(
final FunctionName functionName, final Object[] args, final Class<?>... argTypes
) {
if (args == null) {
throw new KsqlFunctionException("Null argument list for " + functionName.text() + ".");
}
if (args.length != argTypes.length) {
throw new KsqlFunctionException("Incorrect arguments for " + functionName.text() + ".");
}
for (int i = 0; i < argTypes.length; i++) {
if (args[i] == null) {
continue;
}
if (!argTypes[i].isAssignableFrom(args[i].getClass())) {
throw new KsqlFunctionException(
String.format(
"Incorrect arguments type for %s. "
+ "Expected %s for arg number %d but found %s.",
functionName.text(),
argTypes[i].getCanonicalName(),
i,
args[i].getClass().getCanonicalName()
));
}
}
} | @Test(expected = KsqlException.class)
public void shouldFailIfArgCountIsTooMany() {
final Object[] args = new Object[]{"TtestArg1", 10L};
UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class);
} |
public static String clientTagPrefix(final String clientTagKey) {
return CLIENT_TAG_PREFIX + clientTagKey;
} | @Test
public void shouldThrowExceptionWhenClientTagKeyExceedMaxLimit() {
final String key = String.join("", nCopies(MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH + 1, "k"));
props.put(StreamsConfig.clientTagPrefix(key), "eu-central-1a");
final ConfigException exception = assertThrows(ConfigException.class, () -> new StreamsConfig(props));
assertEquals(
String.format("Invalid value %s for configuration %s: Tag key exceeds maximum length of %s.",
key, StreamsConfig.CLIENT_TAG_PREFIX, StreamsConfig.MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH),
exception.getMessage()
);
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyWithOnlyNullPassedAsNullArray() {
// Truth is tolerant of this erroneous varargs call.
Iterable<Object> actual = asList((Object) null);
assertThat(actual).containsExactly((Object[]) null);
} |
public static Object getMessageAnnotation(String key, Message message) {
if (message != null && message.getMessageAnnotations() != null) {
Map<Symbol, Object> annotations = message.getMessageAnnotations().getValue();
return annotations.get(AmqpMessageSupport.getSymbol(key));
}
return null;
} | @Test
public void testGetMessageAnnotationWhenMessageHasEmptyAnnotationsMap() {
Map<Symbol, Object> messageAnnotationsMap = new HashMap<Symbol,Object>();
Message message = Proton.message();
message.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
assertNull(AmqpMessageSupport.getMessageAnnotation("x-opt-test", message));
} |
public static synchronized X509Certificate createX509V3Certificate(KeyPair kp, int days, String issuerCommonName,
String subjectCommonName, String domain,
String signAlgoritm)
throws GeneralSecurityException, IOException {
return createX509V3Certificate( kp, days, issuerCommonName, subjectCommonName, domain, signAlgoritm, null );
} | @Test
public void testGenerateCertificateIssuer() throws Exception
{
// Setup fixture.
final KeyPair keyPair = subjectKeyPair;
final int days = 2;
final String issuerCommonName = "issuer common name";
final String subjectCommonName = "subject common name";
final String domain = "domain.example.org";
final Set<String> sanDnsNames = Stream.of( "alternative-a.example.org", "alternative-b.example.org" ).collect( Collectors.toSet() );
// Execute system under test.
final X509Certificate result = CertificateManager.createX509V3Certificate( keyPair, days, issuerCommonName, subjectCommonName, domain, SIGNATURE_ALGORITHM, sanDnsNames );
// Verify results.
assertNotNull( result );
final Set<String> foundIssuerCNs = parse( result.getIssuerX500Principal().getName(), "CN" );
assertEquals( 1, foundIssuerCNs.size() );
assertEquals( issuerCommonName, foundIssuerCNs.iterator().next() );
} |
@SqlInvokedScalarFunction(value = "array_average", deterministic = true, calledOnNullInput = false)
@Description("Returns the average of all array elements, or null if the array is empty. Ignores null elements.")
@SqlParameter(name = "input", type = "array<double>")
@SqlType("double")
public static String arrayAverage()
{
return "RETURN reduce(" +
"input, " +
"(double '0.0', 0), " +
"(s, x) -> IF(x IS NOT NULL, (s[1] + x, s[2] + 1), s), " +
"s -> if(s[2] = 0, cast(null as double), s[1] / cast(s[2] as double)))";
} | @Test
public void testArrayAverage()
{
assertFunctionWithError("array_average(array[1, 2])", DOUBLE, 1.5);
assertFunctionWithError("array_average(array[1, bigint '2', smallint '3', tinyint '4', 5.0])", DOUBLE, 3.0);
assertFunctionWithError("array_average(array[1, null, 2, null])", DOUBLE, 1.5);
assertFunctionWithError("array_average(array[null, null, 1])", DOUBLE, 1.0);
assertFunction("array_average(array[null])", DOUBLE, null);
assertFunction("array_average(array[null, null])", DOUBLE, null);
assertFunction("array_average(null)", DOUBLE, null);
} |
@Override
public Object evaluate(final ProcessingDTO processingDTO) {
Number input = (Number) getFromPossibleSources(name, processingDTO)
.orElse(null);
if (input == null) {
return mapMissingTo;
}
return getFromDiscretizeBins(input).orElse(defaultValue);
} | @Test
void evaluateDefaultValue() {
KiePMMLDiscretize kiePMMLDiscretize = getKiePMMLDiscretize(null, null);
ProcessingDTO processingDTO = getProcessingDTO(List.of(new KiePMMLNameValue(NAME, 20)));
Object retrieved = kiePMMLDiscretize.evaluate(processingDTO);
assertThat(retrieved).isNull();
kiePMMLDiscretize = getKiePMMLDiscretize(MAP_MISSING_TO, DEFAULTVALUE);
processingDTO = getProcessingDTO(List.of(new KiePMMLNameValue(NAME, 20)));
retrieved = kiePMMLDiscretize.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(DEFAULTVALUE);
processingDTO = getProcessingDTO(List.of(new KiePMMLNameValue(NAME, 21)));
retrieved = kiePMMLDiscretize.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(DEFAULTVALUE);
processingDTO = getProcessingDTO(List.of(new KiePMMLNameValue(NAME, 40)));
retrieved = kiePMMLDiscretize.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(DEFAULTVALUE);
} |
public static void onGeTuiNotificationClicked(Object gtNotificationMessage) {
if (gtNotificationMessage == null) {
SALog.i(TAG, "gtNotificationMessage is null");
return;
}
if (!isTrackPushEnabled()) return;
try {
String msgId = ReflectUtil.callMethod(gtNotificationMessage, "getMessageId");
String title = ReflectUtil.callMethod(gtNotificationMessage, "getTitle");
String content = ReflectUtil.callMethod(gtNotificationMessage, "getContent");
if (!TextUtils.isEmpty(msgId) &&
!TextUtils.isEmpty(title) &&
!TextUtils.isEmpty(content)) {
PushProcess.getInstance().trackGTClickDelayed(msgId, title, content);
}
} catch (Exception e) {
SALog.printStackTrace(e);
}
} | @Test
public void onGeTuiNotificationClicked() {
PushAutoTrackHelper.onGeTuiNotificationClicked(new GetTuiData());
} |
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testLeavingMemberBumpsGroupEpoch() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
Uuid barTopicId = Uuid.randomUuid();
String barTopicName = "bar";
Uuid zarTopicId = Uuid.randomUuid();
String zarTopicName = "zar";
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
// Consumer group with two members.
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 6)
.addTopic(barTopicId, barTopicName, 3)
.addTopic(zarTopicId, zarTopicName, 1)
.addRacks()
.build())
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withMember(new ConsumerGroupMember.Builder(memberId1)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.build())
.withMember(new ConsumerGroupMember.Builder(memberId2)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
// Use zar only here to ensure that metadata needs to be recomputed.
.setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.build())
.withAssignment(memberId1, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.withAssignment(memberId2, mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.withAssignmentEpoch(10))
.build();
// Member 2 leaves the consumer group.
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId2)
.setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH)
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setTopicPartitions(Collections.emptyList()));
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId2)
.setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH),
result.response()
);
List<CoordinatorRecord> expectedRecords = Arrays.asList(
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2),
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2),
// Subscription metadata is recomputed because zar is no longer there.
GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {
{
put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
}
}),
GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)
);
assertRecordsEquals(expectedRecords, result.records());
} |
public static String buildFormPostContent(final WebContext context) {
val requestedUrl = context.getFullRequestURL();
val parameters = context.getRequestParameters();
val buffer = new StringBuilder();
buffer.append("<html>\n");
buffer.append("<body>\n");
buffer.append("<form action=\"" + escapeHtml(requestedUrl) + "\" name=\"f\" method=\"post\">\n");
if (parameters != null) {
for (val entry : parameters.entrySet()) {
val values = entry.getValue();
if (values != null && values.length > 0) {
buffer.append("<input type='hidden' name=\"" + escapeHtml(entry.getKey()) + "\" value=\"" + values[0] + "\" />\n");
}
}
}
buffer.append("<input value='POST' type='submit' />\n");
buffer.append("</form>\n");
buffer.append("<script type='text/javascript'>document.forms['f'].submit();</script>\n");
buffer.append("</body>\n");
buffer.append("</html>\n");
return buffer.toString();
} | @Test
public void testBuildFormPostContentWithData() {
val content = HttpActionHelper
.buildFormPostContent(MockWebContext.create().setFullRequestURL(CALLBACK_URL).addRequestParameter(NAME, VALUE));
assertEquals("<html>\n<body>\n<form action=\"" + CALLBACK_URL + "\" name=\"f\" method=\"post\">\n"
+ "<input type='hidden' name=\"" + NAME + "\" value=\"" + VALUE + "\" />\n" +
"<input value='POST' type='submit' />\n</form>\n" +
"<script type='text/javascript'>document.forms['f'].submit();</script>\n" +
"</body>\n</html>\n", content);
} |
public static ScanReport fromJson(String json) {
return JsonUtil.parse(json, ScanReportParser::fromJson);
} | @Test
public void invalidTableName() {
assertThatThrownBy(() -> ScanReportParser.fromJson("{\"table-name\":23}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a string value: table-name: 23");
} |
@Override
public void processElement(StreamRecord<MergeOnReadInputSplit> element) {
splits.add(element.getValue());
enqueueProcessSplits();
} | @Test
void testWriteRecords() throws Exception {
TestData.writeData(TestData.DATA_SET_INSERT, conf);
try (OneInputStreamOperatorTestHarness<MergeOnReadInputSplit, RowData> harness = createReader()) {
harness.setup();
harness.open();
SteppingMailboxProcessor processor = createLocalMailbox(harness);
StreamReadMonitoringFunction func = TestUtils.getMonitorFunc(conf);
List<MergeOnReadInputSplit> splits = generateSplits(func);
assertThat("Should have 4 splits", splits.size(), is(4));
for (MergeOnReadInputSplit split : splits) {
// Process this element to enqueue to mail-box.
harness.processElement(split, -1);
// Run the mail-box once to read all records from the given split.
assertThat("Should process 1 split", processor.runMailboxStep());
}
// Assert the output has expected elements.
TestData.assertRowDataEquals(harness.extractOutputValues(), TestData.DATA_SET_INSERT);
TestData.writeData(TestData.DATA_SET_UPDATE_INSERT, conf);
final List<MergeOnReadInputSplit> splits2 = generateSplits(func);
assertThat("Should have 4 splits", splits2.size(), is(4));
for (MergeOnReadInputSplit split : splits2) {
// Process this element to enqueue to mail-box.
harness.processElement(split, -1);
// Run the mail-box once to read all records from the given split.
assertThat("Should processed 1 split", processor.runMailboxStep());
}
// The result sets behaves like append only: DATA_SET_ONE + DATA_SET_TWO
List<RowData> expected = new ArrayList<>(TestData.DATA_SET_INSERT);
expected.addAll(TestData.DATA_SET_UPDATE_INSERT);
TestData.assertRowDataEquals(harness.extractOutputValues(), expected);
}
} |
public boolean isExist(final String key) {
try {
return null != client.checkExists().forPath(key);
} catch (Exception e) {
throw new ShenyuException(e);
}
} | @Test
void isExist() throws Exception {
assertThrows(ShenyuException.class, () -> client.isExist("/test"));
ExistsBuilderImpl existsBuilder = mock(ExistsBuilderImpl.class);
when(curatorFramework.checkExists()).thenReturn(existsBuilder);
when(existsBuilder.forPath(anyString())).thenReturn(new Stat());
boolean exist = client.isExist("/test");
assertTrue(exist);
} |
public static MessageExtBrokerInner buildTransactionalMessageFromHalfMessage(MessageExt msgExt) {
final MessageExtBrokerInner msgInner = new MessageExtBrokerInner();
msgInner.setWaitStoreMsgOK(false);
msgInner.setMsgId(msgExt.getMsgId());
msgInner.setTopic(msgExt.getProperty(MessageConst.PROPERTY_REAL_TOPIC));
msgInner.setBody(msgExt.getBody());
final String realQueueIdStr = msgExt.getProperty(MessageConst.PROPERTY_REAL_QUEUE_ID);
if (StringUtils.isNumeric(realQueueIdStr)) {
msgInner.setQueueId(Integer.parseInt(realQueueIdStr));
}
msgInner.setFlag(msgExt.getFlag());
msgInner.setTagsCode(MessageExtBrokerInner.tagsString2tagsCode(msgInner.getTags()));
msgInner.setBornTimestamp(msgExt.getBornTimestamp());
msgInner.setBornHost(msgExt.getBornHost());
msgInner.setTransactionId(msgExt.getProperty(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX));
MessageAccessor.setProperties(msgInner, msgExt.getProperties());
MessageAccessor.putProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED, "true");
MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED_QUEUE_OFFSET);
MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_REAL_QUEUE_ID);
msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties()));
int sysFlag = msgExt.getSysFlag();
sysFlag |= MessageSysFlag.TRANSACTION_PREPARED_TYPE;
msgInner.setSysFlag(sysFlag);
return msgInner;
} | @Test
public void testBuildTransactionalMessageFromHalfMessage() {
MessageExt halfMessage = new MessageExt();
halfMessage.setTopic(TransactionalMessageUtil.buildHalfTopic());
MessageAccessor.putProperty(halfMessage, MessageConst.PROPERTY_REAL_TOPIC, "real-topic");
halfMessage.setMsgId("msgId");
halfMessage.setTransactionId("tranId");
MessageAccessor.putProperty(halfMessage, MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, "tranId");
MessageAccessor.putProperty(halfMessage, MessageConst.PROPERTY_PRODUCER_GROUP, "trans-producer-grp");
MessageExtBrokerInner msgExtInner = TransactionalMessageUtil.buildTransactionalMessageFromHalfMessage(halfMessage);
assertEquals("real-topic", msgExtInner.getTopic());
assertEquals("true", msgExtInner.getProperty(MessageConst.PROPERTY_TRANSACTION_PREPARED));
assertEquals(msgExtInner.getProperty(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX),
halfMessage.getProperty(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX));
assertEquals(msgExtInner.getMsgId(), halfMessage.getMsgId());
assertTrue(MessageSysFlag.check(msgExtInner.getSysFlag(), MessageSysFlag.TRANSACTION_PREPARED_TYPE));
assertEquals(msgExtInner.getProperty(MessageConst.PROPERTY_PRODUCER_GROUP), halfMessage.getProperty(MessageConst.PROPERTY_PRODUCER_GROUP));
} |
Record convert(Object data) {
return convert(data, null);
} | @Test
public void testMapConvert() {
Table table = mock(Table.class);
when(table.schema()).thenReturn(SCHEMA);
RecordConverter converter = new RecordConverter(table, config);
Map<String, Object> data = createMapData();
Record record = converter.convert(data);
assertRecordValues(record);
} |
@Override
public void monitor(RedisServer master) {
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
master.getPort().intValue(), master.getQuorum().intValue());
} | @Test
public void testMonitor() {
Collection<RedisServer> masters = connection.masters();
RedisServer master = masters.iterator().next();
master.setName(master.getName() + ":");
connection.monitor(master);
} |
public static Map<String, String> getProps(String properties) {
Map<String, String> configs = new HashMap<>();
if (StringUtils.isNotEmpty(properties)) {
for (String property : properties.split(";")) {
if (StringUtils.isNotEmpty(property)) {
int delimiterPosition = property.indexOf(":");
String key = property.substring(0, delimiterPosition);
String value = property.substring(delimiterPosition + 1);
configs.put(key, value);
}
}
}
return configs;
} | @Test
void givenKafkaTopicProperties_whenGetConfig_thenReturnMappedValues() {
assertThat(PropertyUtils.getProps("retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1"))
.isEqualTo(Map.of(
"retention.ms", "604800000",
"segment.bytes", "52428800",
"retention.bytes", "1048576000",
"partitions", "1",
"min.insync.replicas", "1"
));
} |
static void readFullyDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException {
int nextReadLength = Math.min(buf.remaining(), temp.length);
int bytesRead = 0;
while (nextReadLength > 0 && (bytesRead = f.read(temp, 0, nextReadLength)) >= 0) {
buf.put(temp, 0, bytesRead);
nextReadLength = Math.min(buf.remaining(), temp.length);
}
if (bytesRead < 0 && buf.remaining() > 0) {
throw new EOFException("Reached the end of stream with " + buf.remaining() + " bytes left to read");
}
} | @Test
public void testDirectReadFullyJustRight() throws Exception {
final ByteBuffer readBuffer = ByteBuffer.allocateDirect(10);
MockInputStream stream = new MockInputStream();
// reads all of the bytes available without EOFException
DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(10, readBuffer.position());
Assert.assertEquals(10, readBuffer.limit());
// trying to read 0 more bytes doesn't result in EOFException
DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(10, readBuffer.position());
Assert.assertEquals(10, readBuffer.limit());
readBuffer.flip();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY), readBuffer);
} |
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
} | @Test
@SuppressWarnings("unchecked")
public void testDecodeStaticStructStaticArray() {
String rawInput =
"0x0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "000000000000000000000000000000000000000000000000000000000000007b"
+ "000000000000000000000000000000000000000000000000000000000000007b"
+ "0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000";
assertEquals(
FunctionReturnDecoder.decode(
rawInput, AbiV2TestFixture.getBarStaticArrayFunction.getOutputParameters()),
Arrays.asList(
new StaticArray3(
AbiV2TestFixture.Bar.class,
new AbiV2TestFixture.Bar(
BigInteger.valueOf(0), BigInteger.valueOf(0)),
new AbiV2TestFixture.Bar(
BigInteger.valueOf(123), BigInteger.valueOf(123)),
new AbiV2TestFixture.Bar(
BigInteger.valueOf(0), BigInteger.valueOf(0)))));
} |
@Override
public HttpRestResult<String> httpDelete(String path, Map<String, String> headers, Map<String, String> paramValues,
String encode, long readTimeoutMs) throws Exception {
final long endTime = System.currentTimeMillis() + readTimeoutMs;
String currentServerAddr = serverListMgr.getCurrentServerAddr();
int maxRetry = this.maxRetry;
HttpClientConfig httpConfig = HttpClientConfig.builder()
.setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue())
.setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(100)).build();
do {
try {
Header newHeaders = Header.newInstance();
if (headers != null) {
newHeaders.addAll(headers);
}
Query query = Query.newInstance().initParams(paramValues);
HttpRestResult<String> result = nacosRestTemplate.delete(getUrl(currentServerAddr, path), httpConfig,
newHeaders, query, String.class);
if (isFail(result)) {
LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}",
serverListMgr.getCurrentServerAddr(), result.getCode());
} else {
// Update the currently available server addr
serverListMgr.updateCurrentServerAddr(currentServerAddr);
return result;
}
} catch (ConnectException connectException) {
LOGGER.error("[NACOS ConnectException httpDelete] currentServerAddr:{}, err : {}",
serverListMgr.getCurrentServerAddr(), ExceptionUtil.getStackTrace(connectException));
} catch (SocketTimeoutException stoe) {
LOGGER.error("[NACOS SocketTimeoutException httpDelete] currentServerAddr:{}, err : {}",
serverListMgr.getCurrentServerAddr(), ExceptionUtil.getStackTrace(stoe));
} catch (Exception ex) {
LOGGER.error("[NACOS Exception httpDelete] currentServerAddr: " + serverListMgr.getCurrentServerAddr(),
ex);
throw ex;
}
if (serverListMgr.getIterator().hasNext()) {
currentServerAddr = serverListMgr.getIterator().next();
} else {
maxRetry--;
if (maxRetry < 0) {
throw new ConnectException(
"[NACOS HTTP-DELETE] The maximum number of tolerable server reconnection errors has been reached");
}
serverListMgr.refreshCurrentServerAddr();
}
} while (System.currentTimeMillis() <= endTime);
LOGGER.error("no available server");
throw new ConnectException("no available server");
} | @Test
void testHttpDeleteFailed() throws Exception {
assertThrows(ConnectException.class, () -> {
when(nacosRestTemplate.<String>delete(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class),
any(Header.class), any(Query.class), eq(String.class))).thenReturn(mockResult);
when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_NOT_FOUND);
serverHttpAgent.httpDelete("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000);
});
} |
@Override
public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() {
return new UnbindChangeProcessor();
} | @Test
public void leftJoinShouldPropagateChangeOfFKFromNonNullToNonNullValue() {
final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>();
leftJoinProcessor.init(context);
context.setRecordMetadata("topic", 0, 0);
final LeftValue leftRecordValue = new LeftValue(fk2);
leftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, new LeftValue(fk1)), 0));
assertThat(context.forwarded().size(), is(2));
assertThat(
context.forwarded().get(1).record(),
is(new Record<>(fk2, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0))
);
} |
@Deprecated
public String getJSON() {
return getJSON(Long.MAX_VALUE);
} | @Test
public void testGetJSONFromZLIBCompressedMessage() throws Exception {
for (int level = -1; level <= 9; level++) {
final GELFMessage msg = new GELFMessage(TestHelper.zlibCompress(GELF_JSON, level));
assertEquals(GELF_JSON, msg.getJSON(1024));
}
} |
public RingbufferStoreConfig setFactoryImplementation(@Nonnull RingbufferStoreFactory factoryImplementation) {
this.factoryImplementation = checkNotNull(factoryImplementation, "Ringbuffer store factory cannot be null");
this.factoryClassName = null;
return this;
} | @Test
public void setFactoryImplementation() {
RingbufferStoreFactory factory = new RingbufferStoreFactory() {
@Override
public RingbufferStore newRingbufferStore(String name, Properties properties) {
return null;
}
};
config.setFactoryImplementation(factory);
assertEquals(factory, config.getFactoryImplementation());
} |
public void incrCounter(Enum<?> key, long amount) {
findCounter(key).increment(amount);
} | @SuppressWarnings("deprecation")
@Test
public void testCounterIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Iterator<Group> iterator = counters.iterator();
counters.incrCounter("group2", "counter2", 1);
iterator.next();
} |
public CompletableFuture<QueryAssignmentResponse> queryAssignment(ProxyContext ctx,
QueryAssignmentRequest request) {
CompletableFuture<QueryAssignmentResponse> future = new CompletableFuture<>();
try {
validateTopicAndConsumerGroup(request.getTopic(), request.getGroup());
List<org.apache.rocketmq.proxy.common.Address> addressList = this.convertToAddressList(request.getEndpoints());
ProxyTopicRouteData proxyTopicRouteData = this.messagingProcessor.getTopicRouteDataForProxy(
ctx,
addressList,
request.getTopic().getName());
boolean fifo = false;
SubscriptionGroupConfig config = this.messagingProcessor.getSubscriptionGroupConfig(ctx,
request.getGroup().getName());
if (config != null && config.isConsumeMessageOrderly()) {
fifo = true;
}
List<Assignment> assignments = new ArrayList<>();
Map<String, Map<Long, Broker>> brokerMap = buildBrokerMap(proxyTopicRouteData.getBrokerDatas());
for (QueueData queueData : proxyTopicRouteData.getQueueDatas()) {
if (PermName.isReadable(queueData.getPerm()) && queueData.getReadQueueNums() > 0) {
Map<Long, Broker> brokerIdMap = brokerMap.get(queueData.getBrokerName());
if (brokerIdMap != null) {
Broker broker = brokerIdMap.get(MixAll.MASTER_ID);
Permission permission = this.convertToPermission(queueData.getPerm());
if (fifo) {
for (int i = 0; i < queueData.getReadQueueNums(); i++) {
MessageQueue defaultMessageQueue = MessageQueue.newBuilder()
.setTopic(request.getTopic())
.setId(i)
.setPermission(permission)
.setBroker(broker)
.build();
assignments.add(Assignment.newBuilder()
.setMessageQueue(defaultMessageQueue)
.build());
}
} else {
MessageQueue defaultMessageQueue = MessageQueue.newBuilder()
.setTopic(request.getTopic())
.setId(-1)
.setPermission(permission)
.setBroker(broker)
.build();
assignments.add(Assignment.newBuilder()
.setMessageQueue(defaultMessageQueue)
.build());
}
}
}
}
QueryAssignmentResponse response;
if (assignments.isEmpty()) {
response = QueryAssignmentResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.FORBIDDEN, "no readable queue"))
.build();
} else {
response = QueryAssignmentResponse.newBuilder()
.addAllAssignments(assignments)
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name()))
.build();
}
future.complete(response);
} catch (Throwable t) {
future.completeExceptionally(t);
}
return future;
} | @Test
public void testQueryAssignmentWithNoReadPerm() throws Throwable {
when(this.messagingProcessor.getTopicRouteDataForProxy(any(), any(), anyString()))
.thenReturn(createProxyTopicRouteData(2, 2, PermName.PERM_WRITE));
QueryAssignmentResponse response = this.routeActivity.queryAssignment(
createContext(),
QueryAssignmentRequest.newBuilder()
.setEndpoints(grpcEndpoints)
.setTopic(GRPC_TOPIC)
.setGroup(GRPC_GROUP)
.build()
).get();
assertEquals(Code.FORBIDDEN, response.getStatus().getCode());
} |
@Override
public MutableNetwork<Node, Edge> apply(MapTask mapTask) {
List<ParallelInstruction> parallelInstructions = Apiary.listOrEmpty(mapTask.getInstructions());
MutableNetwork<Node, Edge> network =
NetworkBuilder.directed()
.allowsSelfLoops(false)
.allowsParallelEdges(true)
.expectedNodeCount(parallelInstructions.size() * 2)
.build();
// Add all the instruction nodes and output nodes
ParallelInstructionNode[] instructionNodes =
new ParallelInstructionNode[parallelInstructions.size()];
InstructionOutputNode[][] outputNodes =
new InstructionOutputNode[parallelInstructions.size()][];
for (int i = 0; i < parallelInstructions.size(); ++i) {
// InstructionOutputNode's are the source of truth on instruction outputs.
// Clear the instruction's outputs to reduce chance for confusion.
List<InstructionOutput> outputs =
Apiary.listOrEmpty(parallelInstructions.get(i).getOutputs());
outputNodes[i] = new InstructionOutputNode[outputs.size()];
JsonFactory factory =
MoreObjects.firstNonNull(mapTask.getFactory(), Transport.getJsonFactory());
ParallelInstruction parallelInstruction =
clone(factory, parallelInstructions.get(i)).setOutputs(null);
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(parallelInstruction, Nodes.ExecutionLocation.UNKNOWN);
instructionNodes[i] = instructionNode;
network.addNode(instructionNode);
// Connect the instruction node output to the output PCollection node
for (int j = 0; j < outputs.size(); ++j) {
InstructionOutput instructionOutput = outputs.get(j);
InstructionOutputNode outputNode =
InstructionOutputNode.create(
instructionOutput, "generatedPcollection" + this.idGenerator.getId());
network.addNode(outputNode);
if (parallelInstruction.getParDo() != null) {
network.addEdge(
instructionNode,
outputNode,
MultiOutputInfoEdge.create(
parallelInstruction.getParDo().getMultiOutputInfos().get(j)));
} else {
network.addEdge(instructionNode, outputNode, DefaultEdge.create());
}
outputNodes[i][j] = outputNode;
}
}
// Connect PCollections as inputs to instructions
for (ParallelInstructionNode instructionNode : instructionNodes) {
ParallelInstruction parallelInstruction = instructionNode.getParallelInstruction();
if (parallelInstruction.getFlatten() != null) {
for (InstructionInput input :
Apiary.listOrEmpty(parallelInstruction.getFlatten().getInputs())) {
attachInput(input, network, instructionNode, outputNodes);
}
} else if (parallelInstruction.getParDo() != null) {
attachInput(
parallelInstruction.getParDo().getInput(), network, instructionNode, outputNodes);
} else if (parallelInstruction.getPartialGroupByKey() != null) {
attachInput(
parallelInstruction.getPartialGroupByKey().getInput(),
network,
instructionNode,
outputNodes);
} else if (parallelInstruction.getRead() != null) {
// Reads have no inputs so nothing to do
} else if (parallelInstruction.getWrite() != null) {
attachInput(
parallelInstruction.getWrite().getInput(), network, instructionNode, outputNodes);
} else {
throw new IllegalArgumentException(
String.format(
"Unknown type of instruction %s for map task %s", parallelInstruction, mapTask));
}
}
return network;
} | @Test
public void testFlatten() {
// ReadA --\
// |--> Flatten
// ReadB --/
InstructionOutput readOutputA = createInstructionOutput("ReadA.out");
ParallelInstruction readA = createParallelInstruction("ReadA", readOutputA);
readA.setRead(new ReadInstruction());
InstructionOutput readOutputB = createInstructionOutput("ReadB.out");
ParallelInstruction readB = createParallelInstruction("ReadB", readOutputB);
readB.setRead(new ReadInstruction());
FlattenInstruction flattenInstruction = new FlattenInstruction();
flattenInstruction.setInputs(
ImmutableList.of(
createInstructionInput(0, 0), // ReadA.out
createInstructionInput(1, 0))); // ReadB.out
InstructionOutput flattenOutput = createInstructionOutput("Flatten.out");
ParallelInstruction flatten = createParallelInstruction("Flatten", flattenOutput);
flatten.setFlatten(flattenInstruction);
MapTask mapTask = new MapTask();
mapTask.setInstructions(ImmutableList.of(readA, readB, flatten));
mapTask.setFactory(Transport.getJsonFactory());
Network<Node, Edge> network =
new MapTaskToNetworkFunction(IdGenerators.decrementingLongs()).apply(mapTask);
assertNetworkProperties(network);
assertEquals(6, network.nodes().size());
assertEquals(5, network.edges().size());
ParallelInstructionNode readANode = get(network, readA);
InstructionOutputNode readOutputANode = getOnlySuccessor(network, readANode);
assertEquals(readOutputA, readOutputANode.getInstructionOutput());
ParallelInstructionNode readBNode = get(network, readB);
InstructionOutputNode readOutputBNode = getOnlySuccessor(network, readBNode);
assertEquals(readOutputB, readOutputBNode.getInstructionOutput());
// Make sure the successors for both ReadA and ReadB output PCollections are the same
assertEquals(network.successors(readOutputANode), network.successors(readOutputBNode));
ParallelInstructionNode flattenNode = getOnlySuccessor(network, readOutputANode);
InstructionOutputNode flattenOutputNode = getOnlySuccessor(network, flattenNode);
assertEquals(flattenOutput, flattenOutputNode.getInstructionOutput());
} |
@Override
public Object apply(Object input) {
return PropertyOrFieldSupport.EXTRACTION.getValueOf(propertyOrFieldName, input);
} | @Test
void should_throw_exception_if_no_object_is_given() {
// GIVEN
ByNameSingleExtractor underTest = new ByNameSingleExtractor("id");
// WHEN
Throwable thrown = catchThrowable(() -> underTest.apply(null));
// THEN
then(thrown).isInstanceOf(IllegalArgumentException.class);
} |
@VisibleForTesting
static SwitchGenerationCase checkSwitchGenerationCase(Type type, List<RowExpression> values)
{
if (values.size() > 32) {
// 32 is chosen because
// * SET_CONTAINS performs worst when smaller than but close to power of 2
// * Benchmark shows performance of SET_CONTAINS is better at 50, but similar at 25.
return SwitchGenerationCase.SET_CONTAINS;
}
if (!(type instanceof IntegerType || type instanceof BigintType || type instanceof DateType)) {
return SwitchGenerationCase.HASH_SWITCH;
}
for (RowExpression expression : values) {
// For non-constant expressions, they will be added to the default case in the generated switch code. They do not affect any of
// the cases other than the default one. Therefore, it's okay to skip them when choosing between DIRECT_SWITCH and HASH_SWITCH.
// Same argument applies for nulls.
if (!(expression instanceof ConstantExpression)) {
continue;
}
Object constant = ((ConstantExpression) expression).getValue();
if (constant == null) {
continue;
}
long longConstant = ((Number) constant).longValue();
if (longConstant < Integer.MIN_VALUE || longConstant > Integer.MAX_VALUE) {
return SwitchGenerationCase.HASH_SWITCH;
}
}
return SwitchGenerationCase.DIRECT_SWITCH;
} | @Test
public void testBigint()
{
FunctionAndTypeManager functionAndTypeManager = createTestMetadataManager().getFunctionAndTypeManager();
List<RowExpression> values = new ArrayList<>();
values.add(constant(Integer.MAX_VALUE + 1L, BIGINT));
values.add(constant(Integer.MIN_VALUE - 1L, BIGINT));
values.add(constant(3L, BIGINT));
assertEquals(checkSwitchGenerationCase(BIGINT, values), HASH_SWITCH);
values.add(constant(null, BIGINT));
assertEquals(checkSwitchGenerationCase(BIGINT, values), HASH_SWITCH);
values.add(new CallExpression(
CAST.name(),
functionAndTypeManager.lookupCast(CAST, DOUBLE, BIGINT),
BIGINT,
Collections.singletonList(constant(12345678901234.0, DOUBLE))));
assertEquals(checkSwitchGenerationCase(BIGINT, values), HASH_SWITCH);
for (long i = 6; i <= 32; ++i) {
values.add(constant(i, BIGINT));
}
assertEquals(checkSwitchGenerationCase(BIGINT, values), HASH_SWITCH);
values.add(constant(33L, BIGINT));
assertEquals(checkSwitchGenerationCase(BIGINT, values), SET_CONTAINS);
} |
@Override
public SparkTable loadTable(Identifier ident) throws NoSuchTableException {
Pair<Table, Long> table = load(ident);
return new SparkTable(table.first(), table.second(), false /* refresh eagerly */);
} | @TestTemplate
public void testTimeTravel() {
sql("CREATE TABLE %s (id INT, dep STRING) USING iceberg", tableName);
Table table = validationCatalog.loadTable(tableIdent);
sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName);
table.refresh();
Snapshot firstSnapshot = table.currentSnapshot();
waitUntilAfter(firstSnapshot.timestampMillis());
sql("INSERT INTO TABLE %s VALUES (2, 'hr')", tableName);
table.refresh();
Snapshot secondSnapshot = table.currentSnapshot();
waitUntilAfter(secondSnapshot.timestampMillis());
sql("INSERT INTO TABLE %s VALUES (3, 'hr')", tableName);
table.refresh();
try {
TABLE_CACHE.add("key", table);
assertEquals(
"Should have expected rows in 3rd snapshot",
ImmutableList.of(row(1, "hr"), row(2, "hr"), row(3, "hr")),
sql("SELECT * FROM testcache.key ORDER BY id"));
assertEquals(
"Should have expected rows in 2nd snapshot",
ImmutableList.of(row(1, "hr"), row(2, "hr")),
sql(
"SELECT * FROM testcache.`key#at_timestamp_%s` ORDER BY id",
secondSnapshot.timestampMillis()));
assertEquals(
"Should have expected rows in 1st snapshot",
ImmutableList.of(row(1, "hr")),
sql(
"SELECT * FROM testcache.`key#snapshot_id_%d` ORDER BY id",
firstSnapshot.snapshotId()));
} finally {
TABLE_CACHE.remove("key");
}
} |
protected void notify(List<URL> urls) {
if (CollectionUtils.isEmpty(urls)) {
return;
}
for (Map.Entry<URL, Set<NotifyListener>> entry : getSubscribed().entrySet()) {
URL url = entry.getKey();
if (!UrlUtils.isMatch(url, urls.get(0))) {
continue;
}
Set<NotifyListener> listeners = entry.getValue();
if (listeners != null) {
for (NotifyListener listener : listeners) {
try {
notify(url, listener, filterEmpty(url, urls));
} catch (Throwable t) {
// 1-7: Failed to notify registry event.
logger.error(
REGISTRY_FAILED_NOTIFY_EVENT,
"consumer is offline",
"",
"Failed to notify registry event, urls: " + urls + ", cause: " + t.getMessage(),
t);
}
}
}
}
} | @Test
void testNotify() {
final AtomicReference<Boolean> notified = new AtomicReference<Boolean>(false);
NotifyListener listener1 = urls -> notified.set(Boolean.TRUE);
URL url1 = new ServiceConfigURL("dubbo", "192.168.0.1", 2200, parametersConsumer);
abstractRegistry.subscribe(url1, listener1);
NotifyListener listener2 = urls -> notified.set(Boolean.TRUE);
URL url2 = new ServiceConfigURL("dubbo", "192.168.0.2", 2201, parametersConsumer);
abstractRegistry.subscribe(url2, listener2);
NotifyListener listener3 = urls -> notified.set(Boolean.TRUE);
URL url3 = new ServiceConfigURL("dubbo", "192.168.0.3", 2202, parametersConsumer);
abstractRegistry.subscribe(url3, listener3);
List<URL> urls = new ArrayList<>();
urls.add(url1);
urls.add(url2);
urls.add(url3);
abstractRegistry.notify(url1, listener1, urls);
Map<URL, Map<String, List<URL>>> map = abstractRegistry.getNotified();
MatcherAssert.assertThat(true, Matchers.equalTo(map.containsKey(url1)));
MatcherAssert.assertThat(false, Matchers.equalTo(map.containsKey(url2)));
MatcherAssert.assertThat(false, Matchers.equalTo(map.containsKey(url3)));
} |
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowFunctionStatusStatement) {
return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowProcedureStatusStatement) {
return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowTablesStatement) {
return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType()));
}
return Optional.empty();
} | @Test
void assertCreateWithShowTables() {
when(sqlStatementContext.getSqlStatement()).thenReturn(new MySQLShowTablesStatement());
Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext);
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(ShowTablesExecutor.class));
} |
static ByteBuffer epochEntriesAsByteBuffer(List<EpochEntry> epochEntries) throws IOException {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(stream, StandardCharsets.UTF_8))) {
CheckpointFile.CheckpointWriteBuffer<EpochEntry> writeBuffer =
new CheckpointFile.CheckpointWriteBuffer<>(writer, 0, LeaderEpochCheckpointFile.FORMATTER);
writeBuffer.write(epochEntries);
writer.flush();
}
return ByteBuffer.wrap(stream.toByteArray());
} | @Test
public void testEpochEntriesAsByteBuffer() throws Exception {
int expectedEpoch = 0;
long expectedStartOffset = 1L;
int expectedVersion = 0;
List<EpochEntry> epochs = Arrays.asList(new EpochEntry(expectedEpoch, expectedStartOffset));
ByteBuffer buffer = RemoteLogManager.epochEntriesAsByteBuffer(epochs);
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(buffer.array()), StandardCharsets.UTF_8));
assertEquals(String.valueOf(expectedVersion), bufferedReader.readLine());
assertEquals(String.valueOf(epochs.size()), bufferedReader.readLine());
assertEquals(expectedEpoch + " " + expectedStartOffset, bufferedReader.readLine());
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DelegatingScheduledFutureStripper<?> that = (DelegatingScheduledFutureStripper<?>) o;
return original.equals(that.original);
} | @Test
public void equals() {
ScheduledFuture<Future<Integer>> original = taskScheduler.schedule(new SimpleCallableTestTask(), 0, TimeUnit.SECONDS);
ScheduledFuture<Future<Integer>> joker = taskScheduler.schedule(new SimpleCallableTestTask(), 1, TimeUnit.SECONDS);
ScheduledFuture testA = new DelegatingScheduledFutureStripper<Future<Integer>>(original);
ScheduledFuture testB = new DelegatingScheduledFutureStripper<Future<Integer>>(original);
ScheduledFuture testC = new DelegatingScheduledFutureStripper<Future<Integer>>(joker);
assertNotNull(testA);
assertEquals(testA, testA);
assertEquals(testA, testB);
assertNotEquals(testA, testC);
} |
public List<PartitionInfo> getTopicMetadata(String topic, boolean allowAutoTopicCreation, Timer timer) {
MetadataRequest.Builder request = new MetadataRequest.Builder(Collections.singletonList(topic), allowAutoTopicCreation);
Map<String, List<PartitionInfo>> topicMetadata = getTopicMetadata(request, timer);
return topicMetadata.get(topic);
} | @Test
public void testGetTopicMetadataUnknownTopic() {
buildFetcher();
assignFromUser(singleton(tp0));
client.prepareResponse(newMetadataResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION));
List<PartitionInfo> topicMetadata = topicMetadataFetcher.getTopicMetadata(topicName, true, time.timer(5000L));
assertNull(topicMetadata);
} |
@Override
public void reconcileExecutionDeployments(
ResourceID taskExecutorHost,
ExecutionDeploymentReport executionDeploymentReport,
Map<ExecutionAttemptID, ExecutionDeploymentState> expectedDeployedExecutions) {
final Set<ExecutionAttemptID> unknownExecutions =
new HashSet<>(executionDeploymentReport.getExecutions());
final Set<ExecutionAttemptID> missingExecutions = new HashSet<>();
for (Map.Entry<ExecutionAttemptID, ExecutionDeploymentState> execution :
expectedDeployedExecutions.entrySet()) {
boolean deployed = unknownExecutions.remove(execution.getKey());
if (!deployed && execution.getValue() != ExecutionDeploymentState.PENDING) {
missingExecutions.add(execution.getKey());
}
}
if (!unknownExecutions.isEmpty()) {
handler.onUnknownDeploymentsOf(unknownExecutions, taskExecutorHost);
}
if (!missingExecutions.isEmpty()) {
handler.onMissingDeploymentsOf(missingExecutions, taskExecutorHost);
}
} | @Test
void testMissingDeployments() {
TestingExecutionDeploymentReconciliationHandler handler =
new TestingExecutionDeploymentReconciliationHandler();
DefaultExecutionDeploymentReconciler reconciler =
new DefaultExecutionDeploymentReconciler(handler);
ResourceID resourceId = generate();
ExecutionAttemptID attemptId = createExecutionAttemptId();
reconciler.reconcileExecutionDeployments(
resourceId,
new ExecutionDeploymentReport(Collections.emptySet()),
Collections.singletonMap(attemptId, ExecutionDeploymentState.DEPLOYED));
assertThat(handler.getUnknownExecutions()).isEmpty();
assertThat(handler.getMissingExecutions()).contains(attemptId);
} |
@Override
public Date getDate(final int columnIndex) throws SQLException {
return (Date) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, Date.class), Date.class);
} | @Test
void assertGetDateWithColumnIndex() throws SQLException {
when(mergeResultSet.getValue(1, Date.class)).thenReturn(new Date(0L));
assertThat(shardingSphereResultSet.getDate(1), is(new Date(0L)));
} |
public static SqlMap of(final SqlType keyType, final SqlType valueType) {
return new SqlMap(keyType, valueType);
} | @SuppressWarnings("UnstableApiUsage")
@Test
public void shouldImplementHashCodeAndEqualsProperly() {
new EqualsTester()
.addEqualityGroup(SqlMap.of(SOME_TYPE, SOME_TYPE), SqlMap.of(SOME_TYPE, SOME_TYPE))
.addEqualityGroup(SqlMap.of(OTHER_TYPE, SOME_TYPE))
.addEqualityGroup(SqlMap.of(SOME_TYPE, OTHER_TYPE))
.addEqualityGroup(SOME_TYPE)
.testEquals();
} |
public static boolean isValidSigned(byte[] signedContent, byte[] signature, Certificate[] signingCertificateChain) {
if (signedContent == null || signature == null || signingCertificateChain == null) {
return false;
}
try {
CMSSignedData signedData
= new CMSSignedData(new CMSProcessableByteArray(signedContent), new ByteArrayInputStream(signature));
SignerInformationVerifierProvider sivp = (SignerId sid) -> {
for (Certificate knownCert : signingCertificateChain) {
SignerInformationVerifier siv
= new JcaSimpleSignerInfoVerifierBuilder().setProvider("BC").build((X509Certificate) knownCert);
if (siv.getAssociatedCertificate().getIssuer().equals(sid.getIssuer())
&& siv.getAssociatedCertificate().getSerialNumber().equals(sid.getSerialNumber())) {
return siv;
}
}
throw new RuntimeException("Signature was created with an unknown certificate");
};
return signedData.verifySignatures(sivp);
} catch (CMSException e) {
//Probably the signature was created with an unknown certificate or something else is wrong with the signature
LOG.debug(e.getMessage(), e);
} catch (Exception e) {
LOG.debug(e.getMessage(), e);
}
return false;
} | @Test
public void isValidSignedTest() throws Exception {
AS2SignedDataGenerator gen = SigningUtils.createSigningGenerator(AS2SignatureAlgorithm.SHA1WITHRSA,
new Certificate[] { signingCert }, signingKP.getPrivate());
CMSProcessableByteArray sData = new CMSProcessableByteArray(MESSAGE.getBytes(StandardCharsets.UTF_8));
CMSSignedData signedData = gen.generate(sData, true);
assertTrue(SigningUtils.isValidSigned(MESSAGE.getBytes(StandardCharsets.UTF_8), signedData.getEncoded(),
new Certificate[] { signingCert }), "Message must be valid");
assertFalse(SigningUtils.isValidSigned(MESSAGE.getBytes(StandardCharsets.UTF_8), signedData.getEncoded(),
new Certificate[] { evilSigningCert }), "Message must be invalid");
} |
public Future<?> scheduleWithFixedDelay(Runnable task, long initialDelay, long delay, TimeUnit unit) {
Preconditions.checkState(isOpen.get(), "CloseableExecutorService is closed");
ScheduledFuture<?> scheduledFuture =
scheduledExecutorService.scheduleWithFixedDelay(task, initialDelay, delay, unit);
return new InternalScheduledFutureTask(scheduledFuture);
} | @Test
public void testCloseableScheduleWithFixedDelay() throws InterruptedException {
CloseableScheduledExecutorService service = new CloseableScheduledExecutorService(executorService);
final CountDownLatch latch = new CountDownLatch(QTY);
service.scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
latch.countDown();
}
},
DELAY_MS,
DELAY_MS,
TimeUnit.MILLISECONDS);
assertTrue(latch.await((QTY * 2) * DELAY_MS, TimeUnit.MILLISECONDS));
} |
@Override
public void append(final LogEvent event) {
if(null == event.getMessage()) {
return;
}
final StringBuilder buffer = new StringBuilder();
buffer.append(new String(getLayout().toByteArray(event), StandardCharsets.UTF_8));
if(ignoreExceptions()) {
final Throwable thrown = event.getThrown();
if(thrown != null) {
buffer.append(Strings.LINE_SEPARATOR);
final String[] trace = ExceptionUtils.getStackFrames(thrown);
for(final String t : trace) {
buffer.append(t).append(Strings.LINE_SEPARATOR);
}
}
}
library.NSLog("%@", buffer.toString());
} | @Test
public void testAppend() {
final SystemLogAppender a = new SystemLogAppender(PatternLayout.newBuilder().withPattern("%level - %m%n").build());
a.append(new Log4jLogEvent.Builder().setLoggerName(SystemLogAppender.class.getCanonicalName()).setLevel(Level.ERROR).setThrown(new RuntimeException()).setMessage(new SimpleMessage("Test")).build());
} |
public List<URL> getCacheUrls(URL url) {
Map<String, List<URL>> categoryNotified = notified.get(url);
if (CollectionUtils.isNotEmptyMap(categoryNotified)) {
List<URL> urls = categoryNotified.values().stream()
.flatMap(Collection::stream)
.collect(Collectors.toList());
return urls;
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
if (StringUtils.isNotEmpty(key)
&& key.equals(url.getServiceKey())
&& (Character.isLetter(key.charAt(0)) || key.charAt(0) == '_')
&& StringUtils.isNotEmpty(value)) {
String[] arr = value.trim().split(URL_SPLIT);
List<URL> urls = new ArrayList<>();
for (String u : arr) {
urls.add(URL.valueOf(u));
}
return urls;
}
}
return null;
} | @Test
void getCacheUrlsTest() {
List<URL> urls = new ArrayList<>();
urls.add(testUrl);
// check if notify successfully
Assertions.assertFalse(notifySuccess);
abstractRegistry.notify(testUrl, listener, urls);
Assertions.assertTrue(notifySuccess);
List<URL> cacheUrl = abstractRegistry.getCacheUrls(testUrl);
Assertions.assertEquals(1, cacheUrl.size());
URL nullUrl = URL.valueOf("http://1.2.3.4:9090/registry?check=false&file=N/A&interface=com.testa");
cacheUrl = abstractRegistry.getCacheUrls(nullUrl);
Assertions.assertTrue(Objects.isNull(cacheUrl));
} |
public List<T> pollAll() {
List<T> retList = new ArrayList<T>(size);
for (int i = 0; i < entries.length; i++) {
LinkedElement<T> current = entries[i];
while (current != null) {
retList.add(current.element);
current = current.next;
}
}
this.clear();
return retList;
} | @Test
public void testPollAll() {
LOG.info("Test poll all");
for (Integer i : list) {
assertTrue(set.add(i));
}
// remove all elements by polling
List<Integer> poll = set.pollAll();
assertEquals(0, set.size());
assertTrue(set.isEmpty());
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
// we should get all original items
for (Integer i : poll) {
assertTrue(list.contains(i));
}
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
} |
public static IntArrayList shuffle(IntArrayList list, Random random) {
int maxHalf = list.size() / 2;
for (int x1 = 0; x1 < maxHalf; x1++) {
int x2 = random.nextInt(maxHalf) + maxHalf;
int tmp = list.buffer[x1];
list.buffer[x1] = list.buffer[x2];
list.buffer[x2] = tmp;
}
return list;
} | @Test
public void testShuffle() {
assertEquals(from(4, 1, 3, 2), ArrayUtil.shuffle(from(1, 2, 3, 4), new Random(0)));
assertEquals(from(4, 3, 2, 1, 5), ArrayUtil.shuffle(from(1, 2, 3, 4, 5), new Random(1)));
} |
@Override
protected List<Object[]> rows() {
List<Object[]> rows = new ArrayList<>(dataConnectionCatalogEntries.size());
for (DataConnectionCatalogEntry dl : dataConnectionCatalogEntries) {
final Map<String, String> options;
if (!securityEnabled) {
options = dl.options();
} else {
options = new TreeMap<>();
Set<String> safeOptions = connectorCache.forType(dl.type()).nonSensitiveConnectorOptions();
for (Map.Entry<String, String> e : dl.options().entrySet()) {
if (safeOptions.contains(e.getKey())) {
options.put(e.getKey(), e.getValue());
}
}
}
Object[] row = new Object[]{
catalog(),
dataConnectionSchema,
dl.name(),
dl.type(),
dl.isShared(),
uncheckCall(() -> JsonUtil.toJson(options)),
dl.source().name()
};
rows.add(row);
}
return rows;
} | @Test
public void test_rows_enabledSecurity() {
when(connectorCache.forType("Kafka")).thenReturn(new KafkaSqlConnector());
// given
DataConnectionCatalogEntry dc = new DataConnectionCatalogEntry(
"dc-name",
"Kafka",
false,
Map.of(OPTION_BOOTSTRAP_SERVERS, "value", "password", "secret")
);
DataConnectionsTable dcTable = new DataConnectionsTable(
"catalog",
"public",
"dc-schema",
singletonList(dc),
connectorCache,
true);
// when
List<Object[]> rows = dcTable.rows();
// then
assertThat(rows).containsExactly(new Object[]{
"catalog"
, "dc-schema"
, "dc-name"
, "Kafka"
, false
, "{\"" + OPTION_BOOTSTRAP_SERVERS + "\":\"value\"}"
, "SQL"
});
} |
@Override
public void print(Iterator<RowData> it, PrintWriter printWriter) {
if (!it.hasNext()) {
printEmptyResult(it, printWriter);
return;
}
long numRows = printTable(it, printWriter);
printFooter(printWriter, numRows);
} | @Test
void testPrintWithEmptyResultAndRowKind() {
PrintStyle.tableauWithTypeInferredColumnWidths(
getSchema(),
getConverter(),
PrintStyle.DEFAULT_MAX_COLUMN_WIDTH,
true,
true)
.print(Collections.emptyIterator(), new PrintWriter(outContent));
assertThat(outContent.toString()).isEqualTo("Empty set" + System.lineSeparator());
} |
void commitOffsetsOrTransaction(final Map<Task, Map<TopicPartition, OffsetAndMetadata>> offsetsPerTask) {
log.debug("Committing task offsets {}", offsetsPerTask.entrySet().stream().collect(Collectors.toMap(t -> t.getKey().id(), Entry::getValue))); // avoid logging actual Task objects
final Set<TaskId> corruptedTasks = new HashSet<>();
if (executionMetadata.processingMode() == EXACTLY_ONCE_ALPHA) {
for (final Task task : taskManager.activeRunningTaskIterable()) {
final Map<TopicPartition, OffsetAndMetadata> taskOffsetsToCommit = offsetsPerTask.getOrDefault(task, emptyMap());
if (!taskOffsetsToCommit.isEmpty() || taskManager.streamsProducerForTask(task.id()).transactionInFlight()) {
try {
taskManager.streamsProducerForTask(task.id())
.commitTransaction(taskOffsetsToCommit, taskManager.consumerGroupMetadata());
updateTaskCommitMetadata(taskOffsetsToCommit);
} catch (final TimeoutException timeoutException) {
log.error(
String.format("Committing task %s failed.", task.id()),
timeoutException
);
corruptedTasks.add(task.id());
}
}
}
} else if (executionMetadata.processingMode() == EXACTLY_ONCE_V2) {
if (!offsetsPerTask.isEmpty() || taskManager.threadProducer().transactionInFlight()) {
final Map<TopicPartition, OffsetAndMetadata> allOffsets = offsetsPerTask.values().stream()
.flatMap(e -> e.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
try {
taskManager.threadProducer().commitTransaction(allOffsets, taskManager.consumerGroupMetadata());
updateTaskCommitMetadata(allOffsets);
} catch (final TimeoutException timeoutException) {
log.error(
String.format("Committing task(s) %s failed.",
offsetsPerTask
.keySet()
.stream()
.map(t -> t.id().toString())
.collect(Collectors.joining(", "))),
timeoutException
);
offsetsPerTask
.keySet()
.forEach(task -> corruptedTasks.add(task.id()));
}
}
} else {
// processingMode == ALOS
if (!offsetsPerTask.isEmpty()) {
final Map<TopicPartition, OffsetAndMetadata> allOffsets = offsetsPerTask.values().stream()
.flatMap(e -> e.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
try {
taskManager.consumerCommitSync(allOffsets);
updateTaskCommitMetadata(allOffsets);
} catch (final CommitFailedException error) {
throw new TaskMigratedException("Consumer committing offsets failed, " +
"indicating the corresponding thread is no longer part of the group", error);
} catch (final TimeoutException timeoutException) {
log.error(
String.format("Committing task(s) %s failed.",
offsetsPerTask
.keySet()
.stream()
.map(t -> t.id().toString())
.collect(Collectors.joining(", "))),
timeoutException
);
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException("Error encountered committing offsets via consumer", error);
}
}
}
if (!corruptedTasks.isEmpty()) {
throw new TaskCorruptedException(corruptedTasks);
}
} | @Test
public void testCommitWithOpenTransactionButNoOffsetsEOSV2() {
final Tasks tasks = mock(Tasks.class);
final TaskManager taskManager = mock(TaskManager.class);
final ConsumerGroupMetadata groupMetadata = mock(ConsumerGroupMetadata.class);
when(taskManager.consumerGroupMetadata()).thenReturn(groupMetadata);
final TaskExecutionMetadata metadata = mock(TaskExecutionMetadata.class);
final StreamsProducer producer = mock(StreamsProducer.class);
when(metadata.processingMode()).thenReturn(EXACTLY_ONCE_V2);
when(taskManager.threadProducer()).thenReturn(producer);
when(producer.transactionInFlight()).thenReturn(true);
final TaskExecutor taskExecutor = new TaskExecutor(tasks, taskManager, metadata, new LogContext());
taskExecutor.commitOffsetsOrTransaction(Collections.emptyMap());
verify(producer).commitTransaction(Collections.emptyMap(), groupMetadata);
} |
public static String addUUID(String pathStr, String uuid) {
Preconditions.checkArgument(StringUtils.isNotEmpty(pathStr), "empty path");
Preconditions.checkArgument(StringUtils.isNotEmpty(uuid), "empty uuid");
// In some cases, Spark will add the UUID to the filename itself.
if (pathStr.contains(uuid)) {
return pathStr;
}
int dot; // location of the first '.' in the file name
int lastSlash = pathStr.lastIndexOf('/');
if (lastSlash >= 0) {
Preconditions.checkState(lastSlash + 1 < pathStr.length(),
"Bad path: " + pathStr);
dot = pathStr.indexOf('.', lastSlash);
} else {
dot = pathStr.indexOf('.');
}
if (dot >= 0) {
return pathStr.substring(0, dot) + "-" + uuid + pathStr.substring(dot);
} else {
return pathStr + "-" + uuid;
}
} | @Test
public void testEmptyUUID() throws Throwable {
intercept(IllegalArgumentException.class,
() -> addUUID("part-0000.gz", ""));
} |
@JsonCreator
public static StageSource create(@JsonProperty("stage") @Min(0) int stage,
@JsonProperty("match") @Nullable Stage.Match match,
@JsonProperty("rules") List<String> rules) {
return builder()
.stage(stage)
.match(match)
.rules(rules)
.build();
} | @Test
public void testSerialization() throws Exception {
final StageSource stageSource = StageSource.create(23, Stage.Match.ALL, Collections.singletonList("some-rule"));
final JsonNode json = objectMapper.convertValue(stageSource, JsonNode.class);
assertThat(json.path("stage").asInt()).isEqualTo(23);
assertThat(json.path("match").asText()).isEqualTo("ALL");
assertThat(json.path("rules").isArray()).isTrue();
assertThat(json.path("rules")).hasSize(1);
assertThat(json.path("rules").get(0).asText()).isEqualTo("some-rule");
} |
@Override
public Object adapt(final HttpAction action, final WebContext context) {
if (action != null) {
var code = action.getCode();
val response = ((JEEContext) context).getNativeResponse();
if (code < 400) {
response.setStatus(code);
} else {
try {
response.sendError(code);
} catch (final IOException e) {
throw new TechnicalException(e);
}
}
if (action instanceof WithLocationAction withLocationAction) {
context.setResponseHeader(HttpConstants.LOCATION_HEADER, withLocationAction.getLocation());
} else if (action instanceof WithContentAction withContentAction) {
val content = withContentAction.getContent();
if (content != null) {
try {
response.getWriter().write(content);
} catch (final IOException e) {
throw new TechnicalException(e);
}
}
}
return null;
}
throw new TechnicalException("No action provided");
} | @Test
public void testActionWithContent() {
JEEHttpActionAdapter.INSTANCE.adapt(new OkAction(TestsConstants.VALUE), context);
verify(response).setStatus(200);
verify(writer).write(TestsConstants.VALUE);
} |
public static void validate(ProjectMeasuresQuery query) {
validateFilterKeys(query.getMetricCriteria().stream().map(MetricCriterion::getMetricKey).collect(Collectors.toSet()));
validateSort(query.getSort());
} | @Test
public void query_with_empty_metrics_is_valid() {
ProjectMeasuresQueryValidator.validate(new ProjectMeasuresQuery());
} |
@Override public SlotAssignmentResult ensure(long key1, int key2) {
return super.ensure0(key1, key2);
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testPut_whenDisposed() {
hsa.dispose();
hsa.ensure(1, 1);
} |
SortMergeSubpartitionReader createSubpartitionReader(
BufferAvailabilityListener availabilityListener,
int targetSubpartition,
PartitionedFile resultFile)
throws IOException {
synchronized (lock) {
checkState(!isReleased, "Partition is already released.");
PartitionedFileReader fileReader = createFileReader(resultFile, targetSubpartition);
SortMergeSubpartitionReader subpartitionReader =
new SortMergeSubpartitionReader(availabilityListener, fileReader);
if (allReaders.isEmpty()) {
bufferPool.registerRequester(this);
}
allReaders.add(subpartitionReader);
sortedReaders.add(subpartitionReader);
subpartitionReader
.getReleaseFuture()
.thenRun(() -> releaseSubpartitionReader(subpartitionReader));
mayTriggerReading();
return subpartitionReader;
}
} | @Test
@Timeout(60)
void testCreateSubpartitionReader() throws Exception {
ManuallyTriggeredScheduledExecutorService ioExecutor =
new ManuallyTriggeredScheduledExecutorService();
readScheduler =
new SortMergeResultPartitionReadScheduler(bufferPool, ioExecutor, new Object());
SortMergeSubpartitionReader subpartitionReader =
readScheduler.createSubpartitionReader(
new NoOpBufferAvailablityListener(), 0, partitionedFile);
assertThat(readScheduler.isRunning()).isTrue();
assertThat(readScheduler.getDataFileChannel().isOpen()).isTrue();
assertThat(readScheduler.getIndexFileChannel().isOpen()).isTrue();
assertThat(ioExecutor.numQueuedRunnables()).isEqualTo(1);
int numBuffersRead = 0;
while (numBuffersRead < numBuffersPerSubpartition) {
ioExecutor.triggerAll();
ResultSubpartition.BufferAndBacklog bufferAndBacklog =
subpartitionReader.getNextBuffer();
if (bufferAndBacklog != null) {
int numBytes = bufferAndBacklog.buffer().readableBytes();
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(numBytes);
Buffer fullBuffer =
((CompositeBuffer) bufferAndBacklog.buffer()).getFullBufferData(segment);
assertThat(ByteBuffer.wrap(dataBytes)).isEqualTo(fullBuffer.getNioBufferReadable());
fullBuffer.recycleBuffer();
++numBuffersRead;
}
}
} |
public ZMsg append(String stringValue)
{
add(stringValue);
return this;
} | @Test
public void testAppend()
{
ZMsg msg = new ZMsg().append((ZMsg) null).append(ZMsg.newStringMsg("123"));
assertThat(msg.popString(), is("123"));
msg.append(ZMsg.newStringMsg("123")).append(msg);
assertThat(msg.size(), is(2));
assertThat(msg.contentSize(), is(6L));
} |
public boolean isAlive(E endpoint) {
AtomicInteger attempts = pingAttempts.get(endpoint);
return attempts != null && attempts.get() < maxPingAttempts;
} | @Test
public void member_isNotAlive_whenNoHeartbeat() {
Member member = newMember(5000);
assertFalse(failureDetector.isAlive(member));
} |
static <T, R> CheckedFunction<T, R> decorateCheckedFunction(Observation observation,
CheckedFunction<T, R> function) {
return (T t) -> observation.observeChecked(() -> function.apply(t));
} | @Test
public void shouldDecorateCheckedFunctionAndReturnWithSuccess() throws Throwable {
given(helloWorldService.returnHelloWorldWithNameWithException("Tom"))
.willReturn("Hello world Tom");
CheckedFunction<String, String> function = Observations.decorateCheckedFunction(observation,
helloWorldService::returnHelloWorldWithNameWithException);
String result = function.apply("Tom");
assertThat(result).isEqualTo("Hello world Tom");
assertThatObservationWasStartedAndFinishedWithoutErrors();
then(helloWorldService).should().returnHelloWorldWithNameWithException("Tom");
} |
private Values() {} | @Test
@Category(NeedsRunner.class)
public void testValues() {
PCollection<KV<String, Integer>> input =
p.apply(
Create.of(Arrays.asList(TABLE))
.withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of())));
PCollection<Integer> output = input.apply(Values.create());
PAssert.that(output).containsInAnyOrder(1, 2, 3, 4, 4);
p.run();
} |
static int evaluateLevenshteinDistanceAllHits(LevenshteinDistance levenshteinDistance, List<String> terms,
List<String> texts) {
logger.debug("evaluateLevenshteinDistanceAllHits {} {}", terms, texts);
int batchSize = terms.size();
int limit = texts.size() - batchSize + 1;
String toSearch = String.join(" ", terms);
int toReturn = 0;
for (int i = 0; i < limit; i++) {
String subText = String.join(" ", texts.subList(i, i + batchSize));
int distance = evaluateLevenshteinDistance(levenshteinDistance, toSearch, subText);
if (distance > -1) {
toReturn++;
}
}
return toReturn;
} | @Test
void evaluateLevenshteinDistanceAllHits() {
String wordSeparatorCharacterRE = "\\s+"; // brown-foxy does not match
Pattern pattern = Pattern.compile(wordSeparatorCharacterRE);
List<String> terms = KiePMMLTextIndex.splitText(TERM_0, pattern);
List<String> texts = KiePMMLTextIndex.splitText(TEXT_0, pattern);
LevenshteinDistance levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(1);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(2);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(3);
//---
wordSeparatorCharacterRE = "[\\s\\-]"; // brown-foxy match
pattern = Pattern.compile(wordSeparatorCharacterRE);
terms = KiePMMLTextIndex.splitText(TERM_0, pattern);
texts = KiePMMLTextIndex.splitText(TEXT_0, pattern);
levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(1);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(3);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts)).isEqualTo(4);
} |
ValidationResult getElasticProfileValidationResultResponseFromBody(String responseBody) {
return new JSONResultMessageHandler().toValidationResult(responseBody);
} | @Test
public void shouldHandleValidationResponse() {
String responseBody = "[{\"key\":\"key-one\",\"message\":\"error on key one\"}, {\"key\":\"key-two\",\"message\":\"error on key two\"}]";
ValidationResult result = new ElasticAgentExtensionConverterV4().getElasticProfileValidationResultResponseFromBody(responseBody);
assertThat(result.isSuccessful(), is(false));
assertThat(result.getErrors().size(), is(2));
assertThat(result.getErrors().get(0).getKey(), is("key-one"));
assertThat(result.getErrors().get(0).getMessage(), is("error on key one"));
assertThat(result.getErrors().get(1).getKey(), is("key-two"));
assertThat(result.getErrors().get(1).getMessage(), is("error on key two"));
} |
public static String sign(String metadata, String key) throws RuntimeException {
return sign(metadata.getBytes(StandardCharsets.UTF_8), key);
} | @Test
void testEncryptWithNoParameters() {
String encryptWithNoParams = SignatureUtils.sign(null, "TestMethod#hello", "TOKEN");
Assertions.assertEquals(encryptWithNoParams, "2DGkTcyXg4plU24rY8MZkEJwOMRW3o+wUP3HssRc3EE=");
} |
@Override
public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) {
final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
UnregisterBrokerRequest.Builder createRequest(int timeoutMs) {
UnregisterBrokerRequestData data =
new UnregisterBrokerRequestData().setBrokerId(brokerId);
return new UnregisterBrokerRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
final UnregisterBrokerResponse response =
(UnregisterBrokerResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
switch (error) {
case NONE:
future.complete(null);
break;
case REQUEST_TIMED_OUT:
throw error.exception();
default:
log.error("Unregister broker request for broker ID {} failed: {}",
brokerId, error.message());
future.completeExceptionally(error.exception());
break;
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new UnregisterBrokerResult(future);
} | @Test
public void testUnregisterBrokerTimeoutAndSuccessRetry() throws ExecutionException, InterruptedException {
int nodeId = 1;
try (final AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(
NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0));
env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.REQUEST_TIMED_OUT, 0));
env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.NONE, 0));
UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId);
// Validate response
assertNotNull(result.all());
result.all().get();
}
} |
private AlarmEntityId(final URI uri) {
super(uri);
} | @Test(expected = IllegalArgumentException.class)
public void verifyCorruptSchemaRejected() {
alarmEntityId("other:");
} |
@Override
public void editSchedule() {
// grab all changed metrics and update bindings
collector.clear();
queueMetrics.getMetrics(collector, false);
jvmMetrics.getMetrics(collector, false);
for (MetricsRecord record : collector.getRecords()) {
for (AbstractMetric am : record.metrics()) {
bindings.put(am.name().replace(' ', '_'), am.value());
}
}
// evaluate all invariants with new bindings
try {
// fastpath check all invariants at once (much faster)
boolean allInvHold = (boolean) combinedInvariants.eval(bindings);
// if any fails, check individually to produce more insightful log
if (!allInvHold) {
for (Map.Entry<String, CompiledScript> e : invariants.entrySet()) {
boolean invariantsHold = (boolean) e.getValue().eval(bindings);
if (!invariantsHold) {
// filter bindings to produce minimal set
Map<String, Object> matchingBindings =
extractMatchingBindings(e.getKey(), bindings);
logOrThrow("Invariant \"" + e.getKey()
+ "\" is NOT holding, with bindings: " + matchingBindings);
}
}
}
} catch (ScriptException e) {
logOrThrow(e.getMessage());
}
} | @Test(timeout = 5000)
public void testManyRuns() {
QueueMetrics qm =
QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
qm.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
Resource.newInstance(1, 1));
int numIterations = 1000;
long start = System.currentTimeMillis();
for (int i = 0; i < numIterations; i++) {
ic.editSchedule();
}
long end = System.currentTimeMillis();
System.out.println("Runtime per iteration (avg of " + numIterations
+ " iterations): " + (end - start) + " tot time");
} |
public boolean isWatchStart() {
return isWatchStart;
} | @Test
public void isWatchStartTest() {
PlainPermissionManager plainPermissionManager = new PlainPermissionManager();
Assert.assertTrue(plainPermissionManager.isWatchStart());
} |
public static List<String> getAllConfiguredLoggers() {
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configuration config = context.getConfiguration();
return config.getLoggers().values().stream().map(LoggerConfig::toString).collect(Collectors.toList());
} | @Test
public void testGetAllConfiguredLoggers() {
List<String> allLoggers = LoggerUtils.getAllConfiguredLoggers();
assertEquals(allLoggers.size(), 2);
assertTrue(allLoggers.contains(ROOT));
assertTrue(allLoggers.contains(PINOT));
} |
@Override
public synchronized T getValue(int index) {
BarSeries series = getBarSeries();
if (series == null) {
// Series is null; the indicator doesn't need cache.
// (e.g. simple computation of the value)
// --> Calculating the value
T result = calculate(index);
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
}
// Series is not null
final int removedBarsCount = series.getRemovedBarsCount();
final int maximumResultCount = series.getMaximumBarCount();
T result;
if (index < removedBarsCount) {
// Result already removed from cache
if (log.isTraceEnabled()) {
log.trace("{}: result from bar {} already removed from cache, use {}-th instead",
getClass().getSimpleName(), index, removedBarsCount);
}
increaseLengthTo(removedBarsCount, maximumResultCount);
highestResultIndex = removedBarsCount;
result = results.get(0);
if (result == null) {
// It should be "result = calculate(removedBarsCount);".
// We use "result = calculate(0);" as a workaround
// to fix issue #120 (https://github.com/mdeverdelhan/ta4j/issues/120).
result = calculate(0);
results.set(0, result);
}
} else {
if (index == series.getEndIndex()) {
// Don't cache result if last bar
result = calculate(index);
} else {
increaseLengthTo(index, maximumResultCount);
if (index > highestResultIndex) {
// Result not calculated yet
highestResultIndex = index;
result = calculate(index);
results.set(results.size() - 1, result);
} else {
// Result covered by current cache
int resultInnerIndex = results.size() - 1 - (highestResultIndex - index);
result = results.get(resultInnerIndex);
if (result == null) {
result = calculate(index);
results.set(resultInnerIndex, result);
}
}
}
}
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
} | @Test
public void recursiveCachedIndicatorOnMovingBarSeriesShouldNotCauseStackOverflow() {
// Added to check issue #120: https://github.com/mdeverdelhan/ta4j/issues/120
// See also: CachedIndicator#getValue(int index)
series = new MockBarSeries(numFunction);
series.setMaximumBarCount(5);
assertEquals(5, series.getBarCount());
ZLEMAIndicator zlema = new ZLEMAIndicator(new ClosePriceIndicator(series), 1);
try {
assertNumEquals(4996, zlema.getValue(8));
} catch (Throwable t) {
fail(t.getMessage());
}
} |
public CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageAsync(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) {
MessageStore messageStore = brokerController.getMessageStoreByBrokerName(brokerName);
if (messageStore != null) {
return messageStore.getMessageAsync(innerConsumerGroupName, topic, queueId, offset, 1, null)
.thenApply(result -> {
if (result == null) {
LOG.warn("getMessageResult is null , innerConsumerGroupName {}, topic {}, offset {}, queueId {}", innerConsumerGroupName, topic, offset, queueId);
return Triple.of(null, "getMessageResult is null", false); // local store, so no retry
}
List<MessageExt> list = decodeMsgList(result, deCompressBody);
if (list == null || list.isEmpty()) {
// OFFSET_FOUND_NULL returned by TieredMessageStore indicates exception occurred
boolean needRetry = GetMessageStatus.OFFSET_FOUND_NULL.equals(result.getStatus())
&& messageStore instanceof TieredMessageStore;
LOG.warn("Can not get msg , topic {}, offset {}, queueId {}, needRetry {}, result is {}",
topic, offset, queueId, needRetry, result);
return Triple.of(null, "Can not get msg", needRetry);
}
return Triple.of(list.get(0), "", false);
});
} else {
return getMessageFromRemoteAsync(topic, offset, queueId, brokerName);
}
} | @Test
public void getMessageAsyncTest_localStore_decodeNothing_TieredMessageStore() throws Exception {
when(brokerController.getMessageStoreByBrokerName(any())).thenReturn(tieredMessageStore);
for (GetMessageStatus status : GetMessageStatus.values()) {
GetMessageResult getMessageResult = new GetMessageResult();
getMessageResult.setStatus(status);
when(tieredMessageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any()))
.thenReturn(CompletableFuture.completedFuture(getMessageResult));
Triple<MessageExt, String, Boolean> rst = escapeBridge.getMessageAsync(TEST_TOPIC, 0, DEFAULT_QUEUE_ID, BROKER_NAME, false).join();
Assert.assertNull(rst.getLeft());
Assert.assertEquals("Can not get msg", rst.getMiddle());
if (GetMessageStatus.OFFSET_FOUND_NULL.equals(status)) {
Assert.assertTrue(rst.getRight()); // TieredMessageStore returns OFFSET_FOUND_NULL, need retry
} else {
Assert.assertFalse(rst.getRight()); // other status, like DefaultMessageStore, no retry
}
}
} |
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
//set OfficeParserConfig if the user hasn't specified one
configure(context);
// Have the OOXML file processed
OOXMLExtractorFactory.parse(stream, handler, metadata, context);
} | @Test
public void testNoFormat() throws Exception {
ContentHandler handler = new BodyContentHandler();
Metadata metadata = new Metadata();
try (InputStream stream = getResourceAsStream("/test-documents/testWORD_no_format.docx")) {
new OOXMLParser().parse(stream, handler, metadata, new ParseContext());
}
String content = handler.toString();
assertContains("This is a piece of text that causes an exception", content);
} |
@Override
public int read() throws EOFException {
return (pos < size) ? (data[pos++] & 0xff) : -1;
} | @Test(expected = IndexOutOfBoundsException.class)
public void testReadForBOffLen_negativeOffset() throws Exception {
in.read(INIT_DATA, -10, 1);
} |
@Override
public Object saveContent(@NonNull StaplerRequest staplerRequest, @NonNull Item item) {
JSONObject body;
try {
body = JSONObject.fromObject(IOUtils.toString(staplerRequest.getReader()));
} catch (IOException e) {
throw new ServiceException.UnexpectedErrorException("Failed to read request body");
}
body.put("$class", "io.jenkins.blueocean.blueocean_github_pipeline.GithubScmSaveFileRequest");
GithubScmSaveFileRequest request = staplerRequest.bindJSON(GithubScmSaveFileRequest.class, body);
if(request == null){
throw new ServiceException.BadRequestException(new ErrorMessage(400, "Failed to bind request"));
}
ScmContentProvider scmContentProvider = ScmContentProvider.resolve(item);
if(scmContentProvider != null){
return saveContent(request, item);
}
throw new ServiceException.BadRequestException("No save scm content provider found for pipeline: " + item.getFullName());
} | @Test
public void unauthorizedSaveContentToMbpGHEShouldFail() throws UnirestException, IOException {
User alice = User.get("alice");
alice.setFullName("Alice Cooper");
alice.addProperty(new Mailer.UserProperty("[email protected]"));
String aliceCredentialId = createGithubEnterpriseCredential(alice);
StaplerRequest staplerRequest = mockStapler(GithubEnterpriseScm.ID);
GitContent content = new GitContent.Builder().autoCreateBranch(true).base64Data("c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n")
.branch("test1").message("another commit").owner("cloudbeers").path("Jankinsfile").repo("PR-demo").sha("e23b8ef5c2c4244889bf94db6c05cc08ea138aef").build();
when(staplerRequest.bindJSON(Mockito.eq(GithubScmSaveFileRequest.class), Mockito.any(JSONObject.class))).thenReturn(new GithubScmSaveFileRequest(content));
MultiBranchProject mbp = mockMbp(aliceCredentialId, user, GithubEnterpriseScm.DOMAIN_NAME);
String request = "{\n" +
" \"content\" : {\n" +
" \"message\" : \"first commit\",\n" +
" \"path\" : \"Jenkinsfile\",\n" +
" \"branch\" : \"test1\",\n" +
" \"repo\" : \"PR-demo\",\n" +
" \"sha\" : \"e23b8ef5c2c4244889bf94db6c05cc08ea138aef\",\n" +
" \"base64Data\" : "+"\"c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n\""+
" }\n" +
"}";
when(staplerRequest.getReader()).thenReturn(new BufferedReader(new StringReader(request), request.length()));
try {
//Bob trying to access content but his credential is not setup so should fail
new GithubScmContentProvider().saveContent(staplerRequest, mbp);
}catch (ServiceException.PreconditionRequired e){
assertEquals("Can't access content from github: no credential found", e.getMessage());
return;
}
fail("Should have failed with PreConditionException");
} |
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof JobMeta ) ) {
return feedback;
}
JobMeta jobMeta = (JobMeta) subject;
String description = jobMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or too short" ) );
}
return feedback;
} | @Test
public void testVerifyRule_ShortDescription_DisabledRule() {
JobHasDescriptionImportRule importRule = getImportRule( 10, false );
JobMeta jobMeta = new JobMeta();
jobMeta.setDescription( "short" );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
} |
public static long elapsed(long started, long finished) {
return Times.elapsed(started, finished, true);
} | @Test
void testPositiveStartandFinishTimes() {
long elapsed = Times.elapsed(5, 10, true);
assertEquals(5, elapsed, "Elapsed time is not 5");
elapsed = Times.elapsed(5, 10, false);
assertEquals(5, elapsed, "Elapsed time is not 5");
} |
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
} | @Test
public void testSchemaDeserialize() {
StandardStructObjectInspector schemaObjectInspector =
ObjectInspectorFactory.getStandardStructObjectInspector(
Arrays.asList("0:col1", "1:col2"),
Arrays.asList(
PrimitiveObjectInspectorFactory.writableLongObjectInspector,
PrimitiveObjectInspectorFactory.writableStringObjectInspector));
Deserializer deserializer =
new Deserializer.Builder()
.schema(CUSTOMER_SCHEMA)
.writerInspector((StructObjectInspector) IcebergObjectInspector.create(CUSTOMER_SCHEMA))
.sourceInspector(schemaObjectInspector)
.build();
Record expected = GenericRecord.create(CUSTOMER_SCHEMA);
expected.set(0, 1L);
expected.set(1, "Bob");
Record actual = deserializer.deserialize(new Object[] {new LongWritable(1L), new Text("Bob")});
assertThat(actual).isEqualTo(expected);
} |
public static void resetDeepLinkProcessor() {
mDeepLinkProcessor = null;
} | @Test
public void resetDeepLinkProcessor() {
DeepLinkManager.resetDeepLinkProcessor();
} |
@SafeVarargs
public static void rethrowFromCollection(Collection<?> values, Class<? extends Throwable> ... ignored) throws Throwable {
outerLoop:
for (Object value : values) {
if (value instanceof Throwable throwable) {
for (Class<? extends Throwable> ignoredClass : ignored) {
if (ignoredClass.isAssignableFrom(value.getClass())) {
continue outerLoop;
}
}
throw throwable;
}
}
} | @Test
public void testRethrowFromCollection_when_notIgnoredThrowableOnList_then_isRethrown() {
assertThatExceptionOfType(TestException.class)
.isThrownBy(() -> rethrowFromCollection(Collections.singleton(new TestException())));
assertThatExceptionOfType(TestException.class)
.isThrownBy(() -> rethrowFromCollection(Collections.singleton(new TestException()), NullPointerException.class));
assertThatExceptionOfType(TestException.class)
.isThrownBy(() -> rethrowFromCollection(asList(new NullPointerException(), new TestException()), NullPointerException.class));
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN);
builder.dataType(PG_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT);
builder.dataType(PG_SMALLINT);
break;
case INT:
builder.columnType(PG_INTEGER);
builder.dataType(PG_INTEGER);
break;
case BIGINT:
builder.columnType(PG_BIGINT);
builder.dataType(PG_BIGINT);
break;
case FLOAT:
builder.columnType(PG_REAL);
builder.dataType(PG_REAL);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION);
builder.dataType(PG_DOUBLE_PRECISION);
break;
case DECIMAL:
if (column.getSourceType() != null
&& column.getSourceType().equalsIgnoreCase(PG_MONEY)) {
builder.columnType(PG_MONEY);
builder.dataType(PG_MONEY);
} else {
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", PG_NUMERIC, precision, scale));
builder.dataType(PG_NUMERIC);
builder.precision(precision);
builder.scale(scale);
}
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", PG_VARCHAR, column.getColumnLength()));
builder.dataType(PG_VARCHAR);
} else {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
}
break;
case DATE:
builder.columnType(PG_DATE);
builder.dataType(PG_DATE);
break;
case TIME:
Integer timeScale = column.getScale();
if (timeScale != null && timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
if (timeScale != null && timeScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIME, timeScale));
} else {
builder.columnType(PG_TIME);
}
builder.dataType(PG_TIME);
builder.scale(timeScale);
break;
case TIMESTAMP:
Integer timestampScale = column.getScale();
if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
if (timestampScale != null && timestampScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIMESTAMP, timestampScale));
} else {
builder.columnType(PG_TIMESTAMP);
}
builder.dataType(PG_TIMESTAMP);
builder.scale(timestampScale);
break;
case ARRAY:
ArrayType arrayType = (ArrayType) column.getDataType();
SeaTunnelDataType elementType = arrayType.getElementType();
switch (elementType.getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN_ARRAY);
builder.dataType(PG_BOOLEAN_ARRAY);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT_ARRAY);
builder.dataType(PG_SMALLINT_ARRAY);
break;
case INT:
builder.columnType(PG_INTEGER_ARRAY);
builder.dataType(PG_INTEGER_ARRAY);
break;
case BIGINT:
builder.columnType(PG_BIGINT_ARRAY);
builder.dataType(PG_BIGINT_ARRAY);
break;
case FLOAT:
builder.columnType(PG_REAL_ARRAY);
builder.dataType(PG_REAL_ARRAY);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION_ARRAY);
builder.dataType(PG_DOUBLE_PRECISION_ARRAY);
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
builder.columnType(PG_TEXT_ARRAY);
builder.dataType(PG_TEXT_ARRAY);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
elementType.getSqlType().name(),
column.getName());
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertLong() {
Column column = PhysicalColumn.builder().name("test").dataType(BasicType.LONG_TYPE).build();
BasicTypeDefine typeDefine = PostgresTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(PostgresTypeConverter.PG_BIGINT, typeDefine.getColumnType());
Assertions.assertEquals(PostgresTypeConverter.PG_BIGINT, typeDefine.getDataType());
} |
public static String getS3EncryptionContextBase64Encoded(
String bucket,
Configuration conf,
boolean propagateExceptions) throws IOException {
try {
final String encryptionContextValue = getS3EncryptionContext(bucket, conf);
if (StringUtils.isBlank(encryptionContextValue)) {
return "";
}
final Map<String, String> encryptionContextMap = S3AUtils
.getTrimmedStringCollectionSplitByEquals(encryptionContextValue);
if (encryptionContextMap.isEmpty()) {
return "";
}
final String encryptionContextJson = JacksonUtil.getSharedWriter()
.writeValueAsString(encryptionContextMap);
return Base64.encodeBase64String(encryptionContextJson.getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
if (propagateExceptions) {
throw e;
}
LOG.warn("Cannot retrieve {} for bucket {}",
S3_ENCRYPTION_CONTEXT, bucket, e);
return "";
}
} | @Test
public void testGetS3EncryptionContextBase64Encoded() throws IOException {
Configuration configuration = new Configuration(false);
configuration.set(S3_ENCRYPTION_CONTEXT, GLOBAL_CONTEXT);
final String result = S3AEncryption.getS3EncryptionContextBase64Encoded("bucket",
configuration, true);
final String decoded = new String(Base64.decodeBase64(result), StandardCharsets.UTF_8);
final TypeReference<Map<String, String>> typeRef = new TypeReference<Map<String, String>>() {};
final Map<String, String> resultMap = new ObjectMapper().readValue(decoded, typeRef);
Assert.assertEquals("hadoop", resultMap.get("project"));
Assert.assertEquals("HADOOP-19197", resultMap.get("jira"));
} |
@ConstantFunction(name = "milliseconds_add", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true)
public static ConstantOperator millisecondsAdd(ConstantOperator date, ConstantOperator millisecond) {
return ConstantOperator.createDatetimeOrNull(date.getDatetime().plus(millisecond.getInt(), ChronoUnit.MILLIS));
} | @Test
public void millisecondsAdd() {
assertEquals("2015-03-23T09:23:55.010",
ScalarOperatorFunctions.millisecondsAdd(O_DT_20150323_092355, O_INT_10).getDatetime().toString());
} |
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
if (resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersion.class) ||
resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersions.class)) {
checkVersion(resourceInfo.getResourceMethod().getAnnotationsByType(SupportedSearchVersion.class));
} else if (resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersion.class) ||
resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersions.class)) {
checkVersion(resourceInfo.getResourceClass().getAnnotationsByType(SupportedSearchVersion.class));
}
} | @Test
public void testFilterWithInvalidDistribution() throws Exception {
final Method resourceMethod = TestResourceWithMethodAnnotation.class.getMethod("methodWithAnnotation");
when(resourceInfo.getResourceMethod()).thenReturn(resourceMethod);
when(versionProvider.get()).thenReturn(elasticSearchV6);
Exception exception = assertThrows(InternalServerErrorException.class, () -> {
filter.filter(requestContext);
});
assertTrue(exception.getMessage().contains("OpenSearch"));
verify(versionProvider, times(1)).get();
} |
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = trees.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = b;
for (int i = 0; i < ntrees; i++) {
base += shrinkage * trees[i].predict(xj);
prediction[i][j] = base;
}
}
return prediction;
} | @Test
public void testAbaloneHuber() {
test(Loss.huber(0.9), "abalone", Abalone.formula, Abalone.train, 2.2228);
} |
public static List<String> parseFilterColumnNameList(String filterExpression) {
if (isNullOrWhitespaceOnly(filterExpression)) {
return new ArrayList<>();
}
SqlSelect sqlSelect = parseFilterExpression(filterExpression);
if (!sqlSelect.hasWhere()) {
return new ArrayList<>();
}
SqlNode where = sqlSelect.getWhere();
return parseColumnNameList(where);
} | @Test
public void testParseFilterColumnNameList() {
List<String> computedColumnNames =
TransformParser.parseFilterColumnNameList(" uniq_id > 10 and id is not null");
Assertions.assertThat(computedColumnNames.toArray())
.isEqualTo(new String[] {"uniq_id", "id"});
} |
public static Formatter forDates(@Nonnull String format) {
return new DateFormat(format);
} | @Test
public void testWeekDates() {
Formatter f = forDates("IYYY-\"W\"IW-FMID");
check(LocalDate.of(2022, 10, 25), f, "2022-W43-2");
check(LocalDate.of(2019, 12, 30), f, "2020-W01-1");
f = forDates("YYYY-MM-DD \"is the\" FMIDDDth \"day of week-year\" FMIYYY.");
check(LocalDate.of(2008, 12, 29), f, "2008-12-29 is the 1st day of week-year 2009.");
check(LocalDate.of(2010, 1, 3), f, "2010-01-03 is the 371st day of week-year 2009.");
f = forDates("IYYY IYY IY I - FMIYYY FMIYY FMIY FMI");
check(LocalDate.of(1, 1, 1), f, "0001 001 01 1 - 1 1 1 1");
} |
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle) throws IOException {
if (!(stateHandle instanceof FileStateHandle)) {
return false;
}
final Path srcPath = ((FileStateHandle) stateHandle).getFilePath();
final Path dst = getNewDstPath(srcPath.getName());
return fs.canCopyPaths(srcPath, dst);
} | @Test
void testCanDuplicate() throws IOException {
final FsCheckpointStateToolset stateToolset =
new FsCheckpointStateToolset(
new Path("test-path"), new TestDuplicatingFileSystem());
final boolean canFastDuplicate =
stateToolset.canFastDuplicate(
new FileStateHandle(new Path("old-test-path", "test-file"), 0));
assertThat(canFastDuplicate).isTrue();
} |
CompletableFuture<Void> beginExecute(
@Nonnull List<? extends Tasklet> tasklets,
@Nonnull CompletableFuture<Void> cancellationFuture,
@Nonnull ClassLoader jobClassLoader
) {
final ExecutionTracker executionTracker = new ExecutionTracker(tasklets.size(), cancellationFuture);
try {
final Map<Boolean, List<Tasklet>> byCooperation =
tasklets.stream().collect(partitioningBy(
tasklet -> doWithClassLoader(jobClassLoader, tasklet::isCooperative)
));
submitCooperativeTasklets(executionTracker, jobClassLoader, byCooperation.get(true));
submitBlockingTasklets(executionTracker, jobClassLoader, byCooperation.get(false));
} catch (Throwable t) {
executionTracker.future.internalCompleteExceptionally(t);
}
return executionTracker.future;
} | @Test
public void when_tryCancelOnReturnedFuture_then_fails() {
// Given
final MockTasklet t = new MockTasklet().callsBeforeDone(Integer.MAX_VALUE);
CompletableFuture<Void> f = tes.beginExecute(singletonList(t), cancellationFuture, classLoader);
// When - Then
assertThrows(UnsupportedOperationException.class, () -> f.cancel(true));
} |
@Override
public boolean overlap(final Window other) throws IllegalArgumentException {
if (getClass() != other.getClass()) {
throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type "
+ other.getClass() + ".");
}
final TimeWindow otherWindow = (TimeWindow) other;
return startMs < otherWindow.endMs && otherWindow.startMs < endMs;
} | @Test
public void shouldNotOverlapIfOtherWindowIsBeforeThisWindow() {
/*
* This: [-------)
* Other: [-----)
*/
assertFalse(window.overlap(new TimeWindow(0, 25)));
assertFalse(window.overlap(new TimeWindow(0, start - 1)));
assertFalse(window.overlap(new TimeWindow(0, start)));
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test(description = "RequestBody with filter")
public void testRequestBodyWithFilter() {
Components components = new Components();
components.addRequestBodies("User", new RequestBody());
OpenAPI oas = new OpenAPI()
.info(new Info().description("info"))
.components(components);
Reader reader = new Reader(oas);
OpenAPI openAPI = reader.read(SimpleRequestBodyResource.class);
OpenAPISpecFilter filterImpl = new RefRequestBodyFilter();
SpecFilter f = new SpecFilter();
openAPI = f.filter(openAPI, filterImpl, null, null, null);
String yaml = "openapi: 3.0.1\n" +
"info:\n" +
" description: info\n" +
"paths:\n" +
" /:\n" +
" get:\n" +
" summary: Simple get operation\n" +
" description: Defines a simple get operation with a payload complex input object\n" +
" operationId: sendPayload\n" +
" requestBody:\n" +
" $ref: '#/components/requestBodies/User'\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n" +
" deprecated: true\n" +
"components:\n" +
" schemas:\n" +
" User:\n" +
" type: object\n" +
" properties:\n" +
" id:\n" +
" type: integer\n" +
" format: int64\n" +
" username:\n" +
" type: string\n" +
" firstName:\n" +
" type: string\n" +
" lastName:\n" +
" type: string\n" +
" email:\n" +
" type: string\n" +
" password:\n" +
" type: string\n" +
" phone:\n" +
" type: string\n" +
" userStatus:\n" +
" type: integer\n" +
" description: User Status\n" +
" format: int32\n" +
" xml:\n" +
" name: User\n" +
" requestBodies:\n" +
" User: {}\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
} |
@Override
public Predicate negate() {
return new NotEqualPredicate(attributeName, value);
} | @Test
public void negate_thenReturnNotEqualPredicate() {
EqualPredicate equalPredicate = new EqualPredicate("foo", 1);
NotEqualPredicate negate = (NotEqualPredicate) equalPredicate.negate();
assertEquals("foo", negate.attributeName);
assertEquals(1, negate.value);
} |
public String lookup(final String name, final String uriParamName, final boolean isReLookup)
{
final long beginNs = clock.nanoTime();
maxTimeTracker.update(beginNs);
String resolvedName = null;
try
{
resolvedName = delegateResolver.lookup(name, uriParamName, isReLookup);
return resolvedName;
}
finally
{
final long endNs = clock.nanoTime();
maxTimeTracker.measureAndUpdate(endNs);
logLookup(delegateResolver.getClass().getSimpleName(), endNs - beginNs, name, isReLookup, resolvedName);
}
} | @Test
void lookupShouldMeasureExecutionTimeEvenIfExceptionIsThrown()
{
final NameResolver delegateResolver = mock(NameResolver.class);
final Error error = new Error("broken");
when(delegateResolver.lookup(anyString(), anyString(), anyBoolean())).thenThrow(error);
final NanoClock clock = mock(NanoClock.class);
final long beginNs = 236745823658245L;
final long endNs = 7534957349857893459L;
when(clock.nanoTime()).thenReturn(beginNs, endNs);
final DutyCycleTracker maxTime = mock(DutyCycleTracker.class);
final TimeTrackingNameResolver resolver = new TimeTrackingNameResolver(delegateResolver, clock, maxTime);
final String name = "test:555";
final String endpoint = "control";
final boolean isReLookup = true;
final Error exception = assertThrowsExactly(Error.class, () -> resolver.lookup(name, endpoint, isReLookup));
assertSame(error, exception);
final InOrder inOrder = inOrder(delegateResolver, clock, maxTime);
inOrder.verify(clock).nanoTime();
inOrder.verify(maxTime).update(beginNs);
inOrder.verify(delegateResolver).lookup(name, endpoint, isReLookup);
inOrder.verify(clock).nanoTime();
inOrder.verify(maxTime).measureAndUpdate(endNs);
inOrder.verifyNoMoreInteractions();
} |
@VisibleForTesting
static Optional<Catalog> loadCatalog(Configuration conf, String catalogName) {
String catalogType = getCatalogType(conf, catalogName);
if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) {
return Optional.empty();
} else {
String name = catalogName == null ? ICEBERG_DEFAULT_CATALOG_NAME : catalogName;
return Optional.of(
CatalogUtil.buildIcebergCatalog(
name, getCatalogProperties(conf, name, catalogType), conf));
}
} | @Test
public void testLoadCatalogUnknown() {
String catalogName = "barCatalog";
conf.set(
InputFormatConfig.catalogPropertyConfigKey(catalogName, CatalogUtil.ICEBERG_CATALOG_TYPE),
"fooType");
assertThatThrownBy(() -> Catalogs.loadCatalog(conf, catalogName))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessage("Unknown catalog type: fooType");
} |
static InvokerResult convertToResult(Query query, SearchProtocol.SearchReply protobuf,
DocumentDatabase documentDatabase, int partId, int distKey)
{
InvokerResult result = new InvokerResult(query, protobuf.getHitsCount());
result.getResult().setTotalHitCount(protobuf.getTotalHitCount());
result.getResult().setCoverage(convertToCoverage(protobuf));
convertSearchReplyErrors(result.getResult(), protobuf.getErrorsList());
List<String> featureNames = protobuf.getMatchFeatureNamesList();
var haveMatchFeatures = ! featureNames.isEmpty();
MatchFeatureData matchFeatures = haveMatchFeatures ? new MatchFeatureData(featureNames) : null;
var haveGrouping = ! protobuf.getGroupingBlob().isEmpty();
if (haveGrouping) {
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(protobuf.getGroupingBlob().asReadOnlyByteBuffer()));
int cnt = buf.getInt(null);
ArrayList<Grouping> list = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
Grouping g = new Grouping();
g.deserialize(buf);
list.add(g);
}
GroupingListHit hit = new GroupingListHit(list, documentDatabase, query);
result.getResult().hits().add(hit);
}
for (var replyHit : protobuf.getHitsList()) {
LeanHit hit = (replyHit.getSortData().isEmpty())
? new LeanHit(replyHit.getGlobalId().toByteArray(), partId, distKey, replyHit.getRelevance())
: new LeanHit(replyHit.getGlobalId().toByteArray(), partId, distKey, replyHit.getRelevance(), replyHit.getSortData().toByteArray());
if (haveMatchFeatures) {
var hitFeatures = matchFeatures.addHit();
var featureList = replyHit.getMatchFeaturesList();
if (featureList.size() == featureNames.size()) {
int idx = 0;
for (SearchProtocol.Feature value : featureList) {
ByteString tensorBlob = value.getTensor();
if (tensorBlob.isEmpty()) {
hitFeatures.set(idx++, value.getNumber());
} else {
hitFeatures.set(idx++, tensorBlob.toByteArray());
}
}
hit.addMatchFeatures(hitFeatures);
} else {
result.getResult().hits().addError(ErrorMessage.createBackendCommunicationError("mismatch in match feature sizes"));
}
}
result.getLeanHits().add(hit);
}
var slimeTrace = protobuf.getSlimeTrace();
if ( ! slimeTrace.isEmpty()) {
var traces = new Value.ArrayValue();
traces.add(new SlimeAdapter(BinaryFormat.decode(slimeTrace.toByteArray()).get()));
query.trace(traces, query.getTrace().getLevel());
}
return result;
} | @Test
void testSearchReplyDecodingWithSortData() {
Query q = new Query("search/?query=test");
InvokerResult result = ProtobufSerialization.convertToResult(q, createSearchReply(5, true), null, 1, 2);
assertEquals(result.getResult().getTotalHitCount(), 7);
List<LeanHit> hits = result.getLeanHits();
assertEquals(5, hits.size());
int hitNum = 0;
for (LeanHit hit : hits) {
assertEquals('a', hit.getGid()[0]);
assertEquals(hitNum, hit.getGid()[11]);
assertEquals(0.0, hit.getRelevance(), DELTA);
assertEquals(1, hit.getPartId());
assertEquals(2, hit.getDistributionKey());
assertTrue(hit.hasSortData());
assertEquals('b', hit.getSortData()[0]);
assertEquals(hitNum, hit.getSortData()[11]);
hitNum++;
}
} |
static Properties adminClientConfiguration(String bootstrapHostnames, PemTrustSet kafkaCaTrustSet, PemAuthIdentity authIdentity, Properties config) {
if (config == null) {
throw new InvalidConfigurationException("The config parameter should not be null");
}
config.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapHostnames);
// configuring TLS encryption if requested
if (kafkaCaTrustSet != null) {
config.putIfAbsent(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
config.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PEM");
config.setProperty(SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG, kafkaCaTrustSet.trustedCertificatesString());
}
// configuring TLS client authentication
if (authIdentity != null) {
config.putIfAbsent(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
config.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PEM");
config.setProperty(SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG, authIdentity.certificateChainAsPem());
config.setProperty(SslConfigs.SSL_KEYSTORE_KEY_CONFIG, authIdentity.privateKeyAsPem());
}
config.putIfAbsent(AdminClientConfig.METADATA_MAX_AGE_CONFIG, "30000");
config.putIfAbsent(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10000");
config.putIfAbsent(AdminClientConfig.RETRIES_CONFIG, "3");
config.putIfAbsent(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "40000");
return config;
} | @Test
public void testCustomConfig() {
Properties customConfig = new Properties();
customConfig.setProperty(AdminClientConfig.RETRIES_CONFIG, "5"); // Override a value we have default for
customConfig.setProperty(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG, "13000"); // Override a value we do not use
Properties config = DefaultAdminClientProvider.adminClientConfiguration("my-kafka:9092", null, null, customConfig);
assertThat(config.size(), is(6));
assertThat(config.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG), is("my-kafka:9092"));
assertThat(config.get(AdminClientConfig.METADATA_MAX_AGE_CONFIG), is("30000"));
assertThat(config.get(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG), is("10000"));
assertThat(config.get(AdminClientConfig.RETRIES_CONFIG), is("5"));
assertThat(config.get(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG), is("40000"));
assertThat(config.get(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), is("13000"));
} |
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
// spec requires us to return a new list
final List<Object> result = new ArrayList<>();
for ( Object list : lists ) {
if ( list == null ) {
// TODO review accordingly to spec, original behavior was: return null;
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "one of the elements in the list is null"));
} else if ( list instanceof Collection ) {
result.addAll( (Collection) list );
} else {
result.add( list );
}
}
return FEELFnResult.ofResult( result );
} | @Test
void invokeNull() {
FunctionTestUtil.assertResultError(concatenateFunction.invoke(null), InvalidParametersEvent.class);
} |
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
} | @Test
public void testValidator467() {
UrlValidator validator = new UrlValidator(UrlValidator.ALLOW_2_SLASHES);
assertTrue(validator.isValid("https://example.com/some_path/path/"));
assertTrue(validator.isValid("https://example.com//somepath/path/"));
assertTrue(validator.isValid("https://example.com//some_path/path/"));
assertTrue(validator.isValid("http://example.com//_test")); // VALIDATOR-429
} |
public void putDefaultNullValue(String fieldName, Object defaultNullValue) {
_fieldToValueMap.put(fieldName, defaultNullValue);
_nullValueFields.add(fieldName);
} | @Test
public void testNullValueFieldsNotEqual() {
GenericRow first = new GenericRow();
first.putDefaultNullValue("one", 1);
GenericRow second = new GenericRow();
second.putDefaultNullValue("one", 2);
Assert.assertNotEquals(first, second);
first = new GenericRow();
first.putDefaultNullValue("one", 1);
second = new GenericRow();
second.putDefaultNullValue("one", null);
Assert.assertNotEquals(first, second);
} |
public void validate(ExternalIssueReport report, Path reportPath) {
if (report.rules != null && report.issues != null) {
Set<String> ruleIds = validateRules(report.rules, reportPath);
validateIssuesCctFormat(report.issues, ruleIds, reportPath);
} else if (report.rules == null && report.issues != null) {
String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX);
LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " +
"Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink);
validateIssuesDeprecatedFormat(report.issues, reportPath);
} else {
throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath));
}
} | @Test
public void validate_whenContainsDeprecatedTypeEntry_shouldThrowException() throws IOException {
ExternalIssueReport report = read(REPORTS_LOCATION);
report.issues[0].type = "BUG";
assertThatThrownBy(() -> validator.validate(report, reportPath))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Deprecated 'type' field found in the following report: 'report-path'.");
} |
public DataPoint addDataPoint(Object label) {
DataPoint dp = new DataPoint();
labels.add(label);
dataPoints.add(dp);
return dp;
} | @Test(expected = IllegalArgumentException.class)
public void dataPointBandSeries() {
cm = new ChartModel(FOO, BAR);
cm.addDataPoint(System.currentTimeMillis())
.data(ZOO, VALUES3[0]);
} |
@Override
public CompletableFuture<JobResult> requestJobResult(JobID jobId, Time timeout) {
final CompletableFuture<JobResult> jobResultFuture = super.requestJobResult(jobId, timeout);
if (executionMode == ClusterEntrypoint.ExecutionMode.NORMAL) {
// terminate the MiniDispatcher once we served the first JobResult successfully
jobResultFuture.thenAccept(
(JobResult result) -> {
ApplicationStatus status =
result.getSerializedThrowable().isPresent()
? ApplicationStatus.FAILED
: ApplicationStatus.SUCCEEDED;
if (!ApplicationStatus.UNKNOWN.equals(result.getApplicationStatus())) {
log.info(
"Shutting down cluster because someone retrieved the job result"
+ " and the status is globally terminal.");
shutDownFuture.complete(status);
}
});
} else {
log.info("Not shutting down cluster after someone retrieved the job result.");
}
return jobResultFuture;
} | @Test
public void testJobResultRetrieval() throws Exception {
final MiniDispatcher miniDispatcher =
createMiniDispatcher(ClusterEntrypoint.ExecutionMode.NORMAL);
miniDispatcher.start();
try {
// wait until we have submitted the job
final TestingJobManagerRunner testingJobManagerRunner =
testingJobManagerRunnerFactory.takeCreatedJobManagerRunner();
testingJobManagerRunner.completeResultFuture(executionGraphInfo);
assertFalse(miniDispatcher.getTerminationFuture().isDone());
final DispatcherGateway dispatcherGateway =
miniDispatcher.getSelfGateway(DispatcherGateway.class);
final CompletableFuture<JobResult> jobResultFuture =
dispatcherGateway.requestJobResult(jobGraph.getJobID(), timeout);
final JobResult jobResult = jobResultFuture.get();
assertThat(jobResult.getJobId(), is(jobGraph.getJobID()));
} finally {
RpcUtils.terminateRpcEndpoint(miniDispatcher);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.