focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public double interpolate(double... x) {
if (x.length != this.x[0].length) {
throw new IllegalArgumentException(String.format("Invalid input vector size: %d, expected: %d", x.length, this.x[0].length));
}
double sum = 0.0, sumw = 0.0;
for (int i = 0; i < this.x.length; i++) {
double f = rbf.f(MathEx.distance(x, this.x[i]));
sumw += w[i] * f;
sum += f;
}
return normalized ? sumw / sum : sumw;
} | @Test
public void testInterpolate() {
System.out.println("interpolate");
double[][] x = {{0, 0}, {1, 1}};
double[] y = {0, 1};
RBFInterpolation instance = new RBFInterpolation(x, y, new GaussianRadialBasis());
double[] x1 = {0.5, 0.5};
assertEquals(0, instance.interpolate(x[0]), 1E-7);
assertEquals(1, instance.interpolate(x[1]), 1E-7);
assertEquals(0.569349, instance.interpolate(x1), 1E-6);
} |
public static GlobalLockConfig setAndReturnPrevious(GlobalLockConfig config) {
GlobalLockConfig previous = holder.get();
holder.set(config);
return previous;
} | @Test
void setAndReturnPrevious() {
GlobalLockConfig config1 = new GlobalLockConfig();
assertNull(GlobalLockConfigHolder.setAndReturnPrevious(config1), "should return null");
assertSame(config1, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "holder fail to store config");
GlobalLockConfig config2 = new GlobalLockConfig();
assertSame(config1, GlobalLockConfigHolder.setAndReturnPrevious(config2), "fail to get previous config");
assertSame(config2, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "holder fail to store latest config");
} |
@Override
public ScheduledFuture<?> scheduleWithRepetition(Runnable command, long initialDelay, long period, TimeUnit unit) {
return internalExecutor.scheduleAtFixedRate(command, initialDelay, period, unit);
} | @Test
public void testScheduleWithRepetition() {
TestRunnable runnable = new TestRunnable(5);
ScheduledFuture<?> future = executionService.scheduleWithRepetition(runnable, 0, 100, MILLISECONDS);
runnable.await();
boolean result = future.cancel(true);
assertTrue(result);
} |
public static SchemeHandler get() {
return new SchemeHandlerFactory().create();
} | @Test
public void testGet() {
assertNotNull(SchemeHandlerFactory.get());
} |
static List<RawMetric> constructMetricsList(ObjectName jmxMetric,
MBeanAttributeInfo[] attributes,
Object[] attrValues) {
String domain = fixIllegalChars(jmxMetric.getDomain());
LinkedHashMap<String, String> labels = getLabelsMap(jmxMetric);
String firstLabel = labels.keySet().iterator().next();
String firstLabelValue = fixIllegalChars(labels.get(firstLabel));
labels.remove(firstLabel); //removing first label since it's value will be in name
List<RawMetric> result = new ArrayList<>(attributes.length);
for (int i = 0; i < attributes.length; i++) {
String attrName = fixIllegalChars(attributes[i].getName());
convertNumericValue(attrValues[i]).ifPresent(convertedValue -> {
String name = String.format("%s_%s_%s", domain, firstLabelValue, attrName);
var metric = RawMetric.create(name, labels, convertedValue);
result.add(metric);
});
}
return result;
} | @Test
void convertsJmxMetricsAccordingToJmxExporterFormat() throws Exception {
List<RawMetric> metrics = JmxMetricsFormatter.constructMetricsList(
new ObjectName(
"kafka.server:type=Some.BrokerTopic-Metrics,name=BytesOutPer-Sec,topic=test,some-lbl=123"),
new MBeanAttributeInfo[] {
createMbeanInfo("FifteenMinuteRate"),
createMbeanInfo("Mean"),
createMbeanInfo("Calls-count"),
createMbeanInfo("SkipValue"),
},
new Object[] {
123.0,
100.0,
10L,
"string values not supported"
}
);
assertThat(metrics).hasSize(3);
assertMetricsEqual(
RawMetric.create(
"kafka_server_Some_BrokerTopic_Metrics_FifteenMinuteRate",
Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"),
BigDecimal.valueOf(123.0)
),
metrics.get(0)
);
assertMetricsEqual(
RawMetric.create(
"kafka_server_Some_BrokerTopic_Metrics_Mean",
Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"),
BigDecimal.valueOf(100.0)
),
metrics.get(1)
);
assertMetricsEqual(
RawMetric.create(
"kafka_server_Some_BrokerTopic_Metrics_Calls_count",
Map.of("name", "BytesOutPer-Sec", "topic", "test", "some_lbl", "123"),
BigDecimal.valueOf(10)
),
metrics.get(2)
);
} |
public List<Address> getWatchedAddresses() {
keyChainGroupLock.lock();
try {
List<Address> addresses = new LinkedList<>();
for (Script script : watchedScripts)
if (ScriptPattern.isP2PKH(script))
addresses.add(script.getToAddress(network));
return addresses;
} finally {
keyChainGroupLock.unlock();
}
} | @Test
public void getWatchedAddresses() {
Address watchedAddress = new ECKey().toAddress(ScriptType.P2PKH, TESTNET);
wallet.addWatchedAddress(watchedAddress);
List<Address> watchedAddresses = wallet.getWatchedAddresses();
assertEquals(1, watchedAddresses.size());
assertEquals(watchedAddress, watchedAddresses.get(0));
} |
@Async
@Transactional
public SamlMetadataProcessResult startCollectMetadata(Connection con, Map<String, String> map) {
SamlMetadataProcessResult result = new SamlMetadataProcessResult(con.getId());
EntitiesDescriptor descriptor;
try {
String metadataXML = getMetadataFromConnection(con);
descriptor = convertMetadataXMLtoEntitiesDescriptor(metadataXML);
String hash = getSignatureValue(descriptor.getSignature());
Optional<SamlMetadataProcessResult> process = samlMetadataProcessResultRepository.findByConnectionIdAndHash(con.getId(), hash);
if (process.isPresent()) return result;
updateMetadata(descriptor, con, map, result);
result.setMetadata(metadataXML);
if (result.allEntriesSuccessful()) {
result.setHash(hash);
}
} catch (InitializationException | ComponentInitializationException | UnmarshallingException | IOException | MetadataParseException e) {
map.put("status", "failed");
LOGGER.error("Failed to collect/parse metadata: {}", e.getMessage());
result.addProcessError(e.getMessage(), "");
}
samlMetadataProcessResultRepository.saveAndFlush(result);
return result;
} | @Test
public void startCollectMetadataWithUnknownServiceEntityIDTest() throws IOException {
Connection connection = newConnection();
when(httpClientMock.execute(any(HttpGet.class))).thenReturn(httpResponseMock);
when(httpResponseMock.getEntity()).thenReturn(httpEntityMock);
when(httpEntityMock.getContent()).thenReturn(getClass().getClassLoader().getResourceAsStream("metadata/valid-metadata.xml"));
when(serviceServiceMock.findAllowedServiceById(anyLong(), anyString())).thenReturn(null);
SamlMetadataProcessResult result = metadataProcessorServiceMock.startCollectMetadata(connection, null);
assertEquals(0, result.getTotalUpdated());
assertEquals(3, result.getTotalErrors());
assertEquals(3, result.getTotalProcessed());
assertEquals(3, result.getSamlMetadataProcessErrors().size());
assertEquals("Dienst: entityID bestaat niet", result.getSamlMetadataProcessErrors().get(0).getErrorReason());
assertNotNull(result.getSamlMetadataProcessErrors().get(0).getService());
assertNotNull(result.getMetadata());
assertEquals(CertificateType.SIGNING, connection.getCertificates().get(0).getCertType());
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
EntityType originatorEntityType = msg.getOriginator().getEntityType();
if (!EntityType.DEVICE.equals(originatorEntityType)) {
ctx.tellFailure(msg, new IllegalArgumentException(
"Unsupported originator entity type: [" + originatorEntityType + "]. Only DEVICE entity type is supported."
));
return;
}
DeviceId originator = new DeviceId(msg.getOriginator().getId());
rateLimits.compute(originator, (__, rateLimit) -> {
if (rateLimit == null) {
rateLimit = new TbRateLimits(rateLimitConfig);
}
boolean isNotRateLimited = rateLimit.tryConsume();
if (isNotRateLimited) {
sendEventAndTell(ctx, originator, msg);
} else {
ctx.tellNext(msg, "Rate limited");
}
return rateLimit;
});
} | @Test
public void givenMetadataDoesNotContainTs_whenOnMsg_thenMsgTsIsUsedAsEventTs() {
// GIVEN
given(ctxMock.getDeviceStateNodeRateLimitConfig()).willReturn("1:1");
try {
initNode(TbMsgType.ACTIVITY_EVENT);
} catch (TbNodeException e) {
fail("Node failed to initialize!", e);
}
given(ctxMock.getTenantId()).willReturn(TENANT_ID);
given(ctxMock.getDeviceStateManager()).willReturn(deviceStateManagerMock);
long msgTs = METADATA_TS + 1;
msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, DEVICE_ID, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT, msgTs);
// WHEN
node.onMsg(ctxMock, msg);
// THEN
then(deviceStateManagerMock).should().onDeviceActivity(eq(TENANT_ID), eq(DEVICE_ID), eq(msgTs), any());
} |
public static String encode(String s) {
return encode(s, UTF_8);
} | @Test
public void testEncodeDecode2() {
String origin = null;
String encode1 = UrlUtils.encode(origin);
assertThat(encode1).isNull();
origin = "";
encode1 = UrlUtils.encode(origin);
assertThat(encode1).isEqualTo(origin);
} |
public MetricsBuilder exportServicePort(Integer exportServicePort) {
this.exportServicePort = exportServicePort;
return getThis();
} | @Test
void exportServicePort() {
MetricsBuilder builder = MetricsBuilder.newBuilder();
builder.exportServicePort(2999);
Assertions.assertEquals(2999, builder.build().getExportServicePort());
} |
@Override
public SarifSchema210 deserialize(Path reportPath) {
try {
return mapper
.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION)
.addHandler(new DeserializationProblemHandler() {
@Override
public Object handleInstantiationProblem(DeserializationContext ctxt, Class<?> instClass, Object argument, Throwable t) throws IOException {
if (!instClass.equals(SarifSchema210.Version.class)) {
return NOT_HANDLED;
}
throw new UnsupportedSarifVersionException(format(UNSUPPORTED_VERSION_MESSAGE_TEMPLATE, argument), t);
}
})
.readValue(reportPath.toFile(), SarifSchema210.class);
} catch (UnsupportedSarifVersionException e) {
throw new IllegalStateException(e.getMessage(), e);
} catch (JsonMappingException | JsonParseException e) {
throw new IllegalStateException(format(SARIF_JSON_SYNTAX_ERROR, reportPath), e);
} catch (IOException e) {
throw new IllegalStateException(format(SARIF_REPORT_ERROR, reportPath), e);
}
} | @Test
public void deserialize_shouldFail_whenSarifVersionIsNotSupported() throws URISyntaxException {
URL sarifResource = requireNonNull(getClass().getResource("unsupported-sarif-version-abc.json"));
Path sarif = Paths.get(sarifResource.toURI());
assertThatThrownBy(() -> serializer.deserialize(sarif))
.isInstanceOf(IllegalStateException.class)
.hasMessage(format(UNSUPPORTED_VERSION_MESSAGE_TEMPLATE, "A.B.C"));
} |
@Override
public int updateAllNotifyMessageRead(Long userId, Integer userType) {
return notifyMessageMapper.updateListRead(userId, userType);
} | @Test
public void testUpdateAllNotifyMessageRead() {
// mock 数据
NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到
o.setUserId(1L);
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setReadStatus(false);
o.setReadTime(null);
o.setTemplateParams(randomTemplateParams());
});
notifyMessageMapper.insert(dbNotifyMessage);
// 测试 userId 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L)));
// 测试 userType 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 readStatus 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true)));
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
// 调用
int updateCount = notifyMessageService.updateAllNotifyMessageRead(userId, userType);
// 断言
assertEquals(1, updateCount);
NotifyMessageDO notifyMessage = notifyMessageMapper.selectById(dbNotifyMessage.getId());
assertTrue(notifyMessage.getReadStatus());
assertNotNull(notifyMessage.getReadTime());
} |
public boolean hasProjectSubscribersForTypes(String projectUuid, Set<Class<? extends Notification>> notificationTypes) {
Set<String> dispatcherKeys = handlers.stream()
.filter(handler -> notificationTypes.stream().anyMatch(notificationType -> handler.getNotificationClass() == notificationType))
.map(NotificationHandler::getMetadata)
.filter(Optional::isPresent)
.map(Optional::get)
.map(NotificationDispatcherMetadata::getDispatcherKey)
.collect(Collectors.toSet());
return dbClient.propertiesDao().hasProjectNotificationSubscribersForDispatchers(projectUuid, dispatcherKeys);
} | @Test
public void hasProjectSubscribersForType_returns_false_if_there_are_no_handler() {
String projectUuid = randomAlphabetic(7);
NotificationService underTest = new NotificationService(dbClient);
assertThat(underTest.hasProjectSubscribersForTypes(projectUuid, ImmutableSet.of(Notification1.class))).isFalse();
assertThat(underTest.hasProjectSubscribersForTypes(projectUuid, ImmutableSet.of(Notification2.class))).isFalse();
} |
@Override
public String getDistinctId() {
return null;
} | @Test
public void getDistinctId() {
Assert.assertNull(mSensorsAPI.getDistinctId());
} |
public static JSONObject parseObj(String jsonStr) {
return new JSONObject(jsonStr);
} | @Test
public void parseObjTest() {
// 测试转义
final JSONObject jsonObject = JSONUtil.parseObj("{\n" +
" \"test\": \"\\\\地库地库\",\n" +
"}");
assertEquals("\\地库地库", jsonObject.getObj("test"));
} |
public List<Class<?>> getAllCommandClass() {
final Set<String> commandList =
frameworkModel.getExtensionLoader(BaseCommand.class).getSupportedExtensions();
final List<Class<?>> classes = new ArrayList<>();
for (String commandName : commandList) {
BaseCommand command =
frameworkModel.getExtensionLoader(BaseCommand.class).getExtension(commandName);
classes.add(command.getClass());
}
return classes;
} | @Test
void testGetAllCommandClass() {
List<Class<?>> classes = commandHelper.getAllCommandClass();
// update this list when introduce a new command
List<Class<?>> expectedClasses = new LinkedList<>();
expectedClasses.add(GreetingCommand.class);
expectedClasses.add(Help.class);
expectedClasses.add(Live.class);
expectedClasses.add(Ls.class);
expectedClasses.add(Offline.class);
expectedClasses.add(OfflineApp.class);
expectedClasses.add(OfflineInterface.class);
expectedClasses.add(Online.class);
expectedClasses.add(OnlineApp.class);
expectedClasses.add(OnlineInterface.class);
expectedClasses.add(PublishMetadata.class);
expectedClasses.add(Quit.class);
expectedClasses.add(Ready.class);
expectedClasses.add(Startup.class);
expectedClasses.add(Version.class);
expectedClasses.add(ChangeTelnet.class);
expectedClasses.add(CountTelnet.class);
expectedClasses.add(InvokeTelnet.class);
expectedClasses.add(SelectTelnet.class);
expectedClasses.add(PortTelnet.class);
expectedClasses.add(PwdTelnet.class);
expectedClasses.add(ShutdownTelnet.class);
expectedClasses.add(EnableDetailProfiler.class);
expectedClasses.add(DisableDetailProfiler.class);
expectedClasses.add(EnableSimpleProfiler.class);
expectedClasses.add(DisableSimpleProfiler.class);
expectedClasses.add(SetProfilerWarnPercent.class);
expectedClasses.add(GetRouterSnapshot.class);
expectedClasses.add(GetEnabledRouterSnapshot.class);
expectedClasses.add(EnableRouterSnapshot.class);
expectedClasses.add(DisableRouterSnapshot.class);
expectedClasses.add(GetRecentRouterSnapshot.class);
expectedClasses.add(LoggerInfo.class);
expectedClasses.add(SwitchLogger.class);
expectedClasses.add(SwitchLogLevel.class);
expectedClasses.add(SerializeCheckStatus.class);
expectedClasses.add(SerializeWarnedClasses.class);
expectedClasses.add(GetConfig.class);
expectedClasses.add(GetAddress.class);
expectedClasses.add(GracefulShutdown.class);
expectedClasses.add(DefaultMetricsReporterCmd.class);
assertThat(classes, containsInAnyOrder(expectedClasses.toArray(new Class<?>[0])));
} |
static LinkedHashMap<String, Double> getFixedProbabilityMap(final LinkedHashMap<String, Double> probabilityResultMap) {
LinkedHashMap<String, Double> toReturn = new LinkedHashMap<>();
String[] resultMapKeys = probabilityResultMap.keySet().toArray(new String[0]);
AtomicReference<Double> sumCounter = new AtomicReference<>(0.0);
for (int i = 0; i < probabilityResultMap.size(); i++) {
String key = resultMapKeys[i];
double value = probabilityResultMap.get(key);
if (i < resultMapKeys.length - 1) {
sumCounter.accumulateAndGet(value, Double::sum);
toReturn.put(key, value);
} else {
// last element
toReturn.put(key, 1 - sumCounter.get());
}
}
return toReturn;
} | @Test
void getFixedProbabilityMap() {
double initialTotalProbability = 0.99;
AtomicReference<Double> totalReference = new AtomicReference<>(initialTotalProbability);
Random rand = new Random();
List<Double> doubles = IntStream.range(0, 3).mapToDouble(value -> {
double remainingProbability = totalReference.get();
double toReturn = invalidBound(remainingProbability) ? 0.00 :
Precision.round(rand.nextDouble(remainingProbability), 2);
totalReference.set(Precision.round((remainingProbability - toReturn), 2));
return toReturn;
}).boxed().sorted((f1, f2) -> Double.compare(f2, f1)).toList();
LinkedHashMap<String, Double> probabilityResultMap = new LinkedHashMap<>();
int counter = 0;
for (Double toPut : doubles) {
probabilityResultMap.put("Element-" + counter, toPut);
counter++;
}
double initialProbability = Precision.round(probabilityResultMap.values().stream().mapToDouble(x -> x).sum(),
2);
assertThat(initialProbability).isLessThanOrEqualTo(initialTotalProbability);
LinkedHashMap<String, Double> retrieved = PMMLRuntimeContextImpl.getFixedProbabilityMap(probabilityResultMap);
double totalProbability = retrieved.values().stream().mapToDouble(x -> x).sum();
assertThat(totalProbability).isCloseTo(1.0, Percentage.withPercentage(0.01));
} |
@Override
public ObjectNode queryService(String namespaceId, String serviceName) throws NacosException {
Service service = getServiceFromGroupedServiceName(namespaceId, serviceName, true);
if (!ServiceManager.getInstance().containSingleton(service)) {
throw new NacosApiException(NacosException.INVALID_PARAM, ErrorCode.SERVICE_NOT_EXIST,
"service not found, namespace: " + namespaceId + ", serviceName: " + serviceName);
}
ObjectNode result = JacksonUtils.createEmptyJsonNode();
ServiceMetadata serviceMetadata = metadataManager.getServiceMetadata(service).orElse(new ServiceMetadata());
setServiceMetadata(result, serviceMetadata, service);
ArrayNode clusters = JacksonUtils.createEmptyArrayNode();
for (String each : serviceStorage.getClusters(service)) {
ClusterMetadata clusterMetadata =
serviceMetadata.getClusters().containsKey(each) ? serviceMetadata.getClusters().get(each)
: new ClusterMetadata();
clusters.add(newClusterNode(each, clusterMetadata));
}
result.set(FieldsConstants.CLUSTERS, clusters);
return result;
} | @Test
void testQueryService() throws NacosException {
ClusterMetadata clusterMetadata = new ClusterMetadata();
Map<String, ClusterMetadata> clusterMetadataMap = new HashMap<>(2);
clusterMetadataMap.put("D", clusterMetadata);
ServiceMetadata metadata = new ServiceMetadata();
metadata.setClusters(clusterMetadataMap);
Mockito.when(metadataManager.getServiceMetadata(Mockito.any())).thenReturn(Optional.of(metadata));
Mockito.when(serviceStorage.getClusters(Mockito.any())).thenReturn(Collections.singleton("D"));
ObjectNode objectNode = serviceOperatorV2.queryService("A", "B@@C");
assertEquals("A", objectNode.get(FieldsConstants.NAME_SPACE_ID).asText());
assertEquals("C", objectNode.get(FieldsConstants.NAME).asText());
assertEquals(1, objectNode.get(FieldsConstants.CLUSTERS).size());
} |
@GwtIncompatible("java.util.regex.Pattern")
public void doesNotContainMatch(@Nullable Pattern regex) {
checkNotNull(regex);
if (actual == null) {
failWithActual("expected a string that does not contain a match for", regex);
return;
}
Matcher matcher = regex.matcher(actual);
if (matcher.find()) {
failWithoutActual(
fact("expected not to contain a match for", regex),
fact("but contained", matcher.group()),
fact("full string", actualCustomStringRepresentationForPackageMembersToCall()));
}
} | @Test
public void stringDoesNotContainMatchStringUsesFind() {
expectFailureWhenTestingThat("aba").doesNotContainMatch("[b]");
assertFailureValue("expected not to contain a match for", "[b]");
} |
public static String rtrim( String source ) {
if ( source == null ) {
return null;
}
int max = source.length();
while ( max > 0 && isSpace( source.charAt( max - 1 ) ) ) {
max--;
}
return source.substring( 0, max );
} | @Test
public void testRtrim() {
assertEquals( null, Const.rtrim( null ) );
assertEquals( "", Const.rtrim( "" ) );
assertEquals( "", Const.rtrim( " " ) );
assertEquals( "test", Const.rtrim( "test " ) );
assertEquals( "test ", Const.ltrim( " test " ) );
} |
@Override
public SendResult send(
Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
msg.setTopic(withNamespace(msg.getTopic()));
if (this.getAutoBatch() && !(msg instanceof MessageBatch)) {
return sendByAccumulator(msg, null, null);
} else {
return sendDirect(msg, null, null);
}
} | @Test
public void testSendMessageSync_Success() throws RemotingException, InterruptedException, MQBrokerException, MQClientException {
when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute());
SendResult sendResult = producer.send(message);
assertThat(sendResult.getSendStatus()).isEqualTo(SendStatus.SEND_OK);
assertThat(sendResult.getOffsetMsgId()).isEqualTo("123");
assertThat(sendResult.getQueueOffset()).isEqualTo(456L);
} |
@Override
public ConfigErrors errors() {
return errors;
} | @Test
public void shouldValidateMissingLabel() {
PipelineConfig pipelineConfig = createAndValidatePipelineLabel(null);
assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), is(PipelineConfig.BLANK_LABEL_TEMPLATE_ERROR_MESSAGE));
pipelineConfig = createAndValidatePipelineLabel("");
assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), is(PipelineConfig.BLANK_LABEL_TEMPLATE_ERROR_MESSAGE));
} |
public boolean hasSameNameAs(Header header) {
AssertParameter.notNull(header, Header.class);
return this.name.equalsIgnoreCase(header.getName());
} | @Test
public void header_has_same_name_as_expected() {
final Header header1 = new Header("foo", "bar");
final Header header2 = new Header("Foo", "baz");
assertThat(header2.hasSameNameAs(header1)).isTrue();
} |
public static ServiceListResponse buildSuccessResponse(int count, List<String> serviceNames) {
return new ServiceListResponse(count, serviceNames, "success");
} | @Test
void testSerializeSuccessResponse() throws JsonProcessingException {
ServiceListResponse response = ServiceListResponse.buildSuccessResponse(10, Collections.singletonList("a"));
String json = mapper.writeValueAsString(response);
assertTrue(json.contains("\"count\":10"));
assertTrue(json.contains("\"serviceNames\":[\"a\"]"));
assertTrue(json.contains("\"resultCode\":200"));
assertTrue(json.contains("\"errorCode\":0"));
assertTrue(json.contains("\"success\":true"));
} |
@Override
public byte[] get(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
} | @Test
public void testGeo() {
RedisTemplate<String, String> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
redisTemplate.afterPropertiesSet();
String key = "test_geo_key";
Point point = new Point(116.401001, 40.119499);
redisTemplate.opsForGeo().add(key, point, "a");
point = new Point(111.545998, 36.133499);
redisTemplate.opsForGeo().add(key, point, "b");
point = new Point(111.483002, 36.030998);
redisTemplate.opsForGeo().add(key, point, "c");
Circle within = new Circle(116.401001, 40.119499, 80000);
RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates();
GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args);
assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a");
} |
public static StreamedRow toRowFromDelimited(final Buffer buff) {
try {
final QueryResponseMetadata metadata = deserialize(buff, QueryResponseMetadata.class);
return StreamedRow.header(new QueryId(Strings.nullToEmpty(metadata.queryId)),
createSchema(metadata));
} catch (KsqlRestClientException e) {
// Not a {@link QueryResponseMetadata}
}
try {
final KsqlErrorMessage error = deserialize(buff, KsqlErrorMessage.class);
return StreamedRow.error(new RuntimeException(error.getMessage()), error.getErrorCode());
} catch (KsqlRestClientException e) {
// Not a {@link KsqlErrorMessage}
}
try {
final PushContinuationToken continuationToken
= deserialize(buff, PushContinuationToken.class);
return StreamedRow.continuationToken(continuationToken);
} catch (KsqlRestClientException e) {
// Not a {@link KsqlErrorMessage}
}
try {
final List<?> row = deserialize(buff, List.class);
return StreamedRow.pushRow(GenericRow.fromList(row));
} catch (KsqlRestClientException e) {
// Not a {@link List}
}
throw new IllegalStateException("Couldn't parse message: " + buff.toString());
} | @Test
public void shouldParseRow() {
// When:
final StreamedRow row = KsqlTargetUtil.toRowFromDelimited(Buffer.buffer(
"[3467362496,5.5]"));
// Then:
assertThat(row.getRow().isPresent(), is(true));
assertThat(row.getRow().get().getColumns().size(), is(2));
assertThat(row.getRow().get().getColumns().get(0), is(3467362496L));
assertThat(row.getRow().get().getColumns().get(1), is(BigDecimal.valueOf(5.5d)));
} |
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
} | @Test
public void shouldNotChooseSpecificWhenTrickyVarArgLoop() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING, INT),
function("two", 0, STRING_VARARGS)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(INTEGER), SqlArgument.of(SqlTypes.STRING)))
);
// Then:
assertThat(e.getMessage(), containsString("Function 'name' does not accept parameters "
+ "(STRING, INTEGER, STRING)"));
} |
public synchronized void releaseWriteLock() {
status = 0;
} | @Test
public void releaseWriteLockTest() {
SimpleReadWriteLock simpleReadWriteLock = new SimpleReadWriteLock();
simpleReadWriteLock.tryWriteLock();
simpleReadWriteLock.releaseWriteLock();
boolean result = simpleReadWriteLock.tryReadLock();
Assert.isTrue(result);
} |
@VisibleForTesting
static String toString(@Nullable TaskManagerLocation location) {
// '(unassigned)' being the default value is added to support backward-compatibility for the
// deprecated fields
return location != null ? location.getEndpoint() : "(unassigned)";
} | @Test
void testArchivedTaskManagerLocationFallbackHandling() {
assertThat(
JobExceptionsHandler.toString(
(ExceptionHistoryEntry.ArchivedTaskManagerLocation) null))
.isNull();
} |
String zone(String podName) {
String nodeUrlString = String.format("%s/api/v1/nodes/%s", kubernetesMaster, nodeName(podName));
return extractZone(callGet(nodeUrlString));
} | @Test
public void zone() throws JsonProcessingException {
// given
String podName = "pod-name";
stub(String.format("/api/v1/namespaces/%s/pods/%s", NAMESPACE, podName), pod("hazelcast-0", NAMESPACE, "node-name"));
//language=JSON
String nodeResponse = """
{
"kind": "Node",
"metadata": {
"labels": {
"failure-domain.beta.kubernetes.io/region": "deprecated-region",
"failure-domain.beta.kubernetes.io/zone": "deprecated-zone",
"topology.kubernetes.io/region": "us-central1",
"topology.kubernetes.io/zone": "us-central1-a"
}
}
}""";
stub("/api/v1/nodes/node-name", nodeResponse);
// when
String zone = kubernetesClient.zone(podName);
// then
assertEquals("us-central1-a", zone);
} |
@Override
public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) {
validateSnapshotIdsRefinement(fromSnapshotId, toSnapshotId);
return new IncrementalDataTableScan(
table(),
schema(),
context().fromSnapshotIdExclusive(fromSnapshotId).toSnapshotId(toSnapshotId));
} | @TestTemplate
public void testInvalidScans() {
add(table.newAppend(), files("A"));
assertThatThrownBy(() -> appendsBetweenScan(1, 1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("from and to snapshot ids cannot be the same");
add(table.newAppend(), files("B"));
add(table.newAppend(), files("C"));
add(table.newAppend(), files("D"));
add(table.newAppend(), files("E"));
assertThatThrownBy(() -> table.newScan().appendsBetween(2, 5).appendsBetween(1, 4))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("from snapshot id 1 not in existing snapshot ids range (2, 4]");
assertThatThrownBy(() -> table.newScan().appendsBetween(1, 2).appendsBetween(1, 3))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("to snapshot id 3 not in existing snapshot ids range (1, 2]");
} |
@Override
public List<QualityProfile> load(String projectKey) {
StringBuilder url = new StringBuilder(WS_URL + "?project=").append(encodeForUrl(projectKey));
return handleErrors(url, () -> String.format("Failed to load the quality profiles of project '%s'", projectKey), true);
} | @Test
public void load_tries_default_if_no_profiles_found_for_project() throws IOException {
HttpException e = new HttpException("", 404, "{\"errors\":[{\"msg\":\"No project found with key 'foo'\"}]}");
WsTestUtil.mockException(wsClient, "/api/qualityprofiles/search.protobuf?project=foo", e);
WsTestUtil.mockStream(wsClient, "/api/qualityprofiles/search.protobuf?defaults=true", createStreamOfProfiles("qp"));
underTest.load("foo");
verifyCalledPath("/api/qualityprofiles/search.protobuf?project=foo");
verifyCalledPath("/api/qualityprofiles/search.protobuf?defaults=true");
} |
@SuppressWarnings("checkstyle:NestedIfDepth")
@Nullable
public PartitioningStrategy getPartitioningStrategy(
String mapName,
PartitioningStrategyConfig config,
final List<PartitioningAttributeConfig> attributeConfigs
) {
if (attributeConfigs != null && !attributeConfigs.isEmpty()) {
return cache.computeIfAbsent(mapName, k -> createAttributePartitionStrategy(attributeConfigs));
}
if (config != null && config.getPartitioningStrategy() != null) {
return config.getPartitioningStrategy();
}
if (config != null && config.getPartitioningStrategyClass() != null) {
PartitioningStrategy<?> strategy = cache.get(mapName);
if (strategy != null) {
return strategy;
}
try {
// We don't use computeIfAbsent intentionally so that the map isn't blocked if the instantiation takes a
// long time - it's user code
strategy = ClassLoaderUtil.newInstance(configClassLoader, config.getPartitioningStrategyClass());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
cache.putIfAbsent(mapName, strategy);
return strategy;
}
return null;
} | @Test
public void whenStrategyInstantiationThrowsException_getPartitioningStrategy_rethrowsException() {
PartitioningStrategyConfig cfg = new PartitioningStrategyConfig();
cfg.setPartitioningStrategyClass("NonExistentPartitioningStrategy");
// while attempting to get partitioning strategy, ClassNotFound exception will be thrown and wrapped in HazelcastException
// use a random UUID as map name, to avoid obtaining the PartitioningStrategy from cache.
assertThatThrownBy(() -> partitioningStrategyFactory.getPartitioningStrategy(UUID.randomUUID().toString(), cfg, null))
.has(rootCause(ClassNotFoundException.class));
} |
@Override
protected IMetaStoreClient newClient() {
try {
try {
return GET_CLIENT.invoke(
hiveConf, (HiveMetaHookLoader) tbl -> null, HiveMetaStoreClient.class.getName());
} catch (RuntimeException e) {
// any MetaException would be wrapped into RuntimeException during reflection, so let's
// double-check type here
if (e.getCause() instanceof MetaException) {
throw (MetaException) e.getCause();
}
throw e;
}
} catch (MetaException e) {
throw new RuntimeMetaException(e, "Failed to connect to Hive Metastore");
} catch (Throwable t) {
if (t.getMessage() != null
&& t.getMessage().contains("Another instance of Derby may have already booted")) {
throw new RuntimeMetaException(
t,
"Failed to start an embedded metastore because embedded "
+ "Derby supports only one client at a time. To fix this, use a metastore that supports "
+ "multiple clients.");
}
throw new RuntimeMetaException(t, "Failed to connect to Hive Metastore");
}
} | @Test
public void testGetTablesFailsForNonReconnectableException() throws Exception {
HiveMetaStoreClient hmsClient = Mockito.mock(HiveMetaStoreClient.class);
Mockito.doReturn(hmsClient).when(clients).newClient();
Mockito.doThrow(new MetaException("Another meta exception"))
.when(hmsClient)
.getTables(Mockito.anyString(), Mockito.anyString());
assertThatThrownBy(() -> clients.run(client -> client.getTables("default", "t")))
.isInstanceOf(MetaException.class)
.hasMessage("Another meta exception");
} |
@Override
public Mono<List<ListedAuthProvider>> listAll() {
return client.list(AuthProvider.class, provider ->
provider.getMetadata().getDeletionTimestamp() == null,
defaultComparator()
)
.map(this::convertTo)
.collectList()
.flatMap(providers -> listMyConnections()
.map(connection -> connection.getSpec().getRegistrationId())
.collectList()
.map(connectedNames -> providers.stream()
.peek(provider -> {
boolean isBound = connectedNames.contains(provider.getName());
provider.setIsBound(isBound);
})
.collect(Collectors.toList())
)
.defaultIfEmpty(providers)
)
.flatMap(providers -> fetchEnabledAuthProviders()
.map(names -> providers.stream()
.peek(provider -> {
boolean enabled = names.contains(provider.getName());
provider.setEnabled(enabled);
})
.collect(Collectors.toList())
)
.defaultIfEmpty(providers)
);
} | @Test
@WithMockUser(username = "admin")
void listAll() {
AuthProvider github = createAuthProvider("github");
github.getSpec().setBindingUrl("fake-binding-url");
AuthProvider gitlab = createAuthProvider("gitlab");
gitlab.getSpec().setBindingUrl("fake-binding-url");
AuthProvider gitee = createAuthProvider("gitee");
when(client.list(eq(AuthProvider.class), any(), any()))
.thenReturn(Flux.just(github, gitlab, gitee));
when(client.list(eq(UserConnection.class), any(), any())).thenReturn(Flux.empty());
ConfigMap configMap = new ConfigMap();
configMap.setData(new HashMap<>());
configMap.getData().put(SystemSetting.AuthProvider.GROUP, "{\"enabled\":[\"github\"]}");
when(client.fetch(eq(ConfigMap.class), eq(SystemSetting.SYSTEM_CONFIG)))
.thenReturn(Mono.just(configMap));
authProviderService.listAll()
.as(StepVerifier::create)
.consumeNextWith(result -> {
assertThat(result).hasSize(3);
try {
JSONAssert.assertEquals("""
[{
"name": "github",
"displayName": "github",
"bindingUrl": "fake-binding-url",
"enabled": true,
"isBound": false,
"supportsBinding": false,
"privileged": false
}, {
"name": "gitlab",
"displayName": "gitlab",
"bindingUrl": "fake-binding-url",
"enabled": false,
"isBound": false,
"supportsBinding": false,
"privileged": false
},{
"name": "gitee",
"displayName": "gitee",
"enabled": false,
"isBound": false,
"supportsBinding": false,
"privileged": false
}]
""",
JsonUtils.objectToJson(result),
true);
} catch (JSONException e) {
throw new RuntimeException(e);
}
})
.verifyComplete();
} |
@Override
protected Map get(ResultSet rs, int position, SharedSessionContractImplementor session, Object owner) throws SQLException {
return (Map) rs.getObject(position);
} | @Test
public void test() {
doInJPA(entityManager -> {
Book book = new Book();
book.setIsbn("978-9730228236");
book.getProperties().put("title", "High-Performance Java Persistence");
book.getProperties().put("author", "Vlad Mihalcea");
book.getProperties().put("publisher", "Amazon");
book.getProperties().put("price", "$44.95");
entityManager.persist(book);
});
doInJPA(entityManager -> {
Book book = entityManager.unwrap(Session.class)
.bySimpleNaturalId(Book.class)
.load("978-9730228236");
assertEquals("High-Performance Java Persistence", book.getProperties().get("title"));
assertEquals("Vlad Mihalcea", book.getProperties().get("author"));
});
} |
@Override
public Http2AllocationStrategy copy() {
return new Http2AllocationStrategy(this);
} | @Test
void copy() {
builder.maxConcurrentStreams(2).maxConnections(2).minConnections(1);
Http2AllocationStrategy strategy = builder.build();
Http2AllocationStrategy copy = strategy.copy();
assertThat(copy.maxConcurrentStreams()).isEqualTo(strategy.maxConcurrentStreams());
assertThat(copy.permitMaximum()).isEqualTo(strategy.permitMaximum());
assertThat(copy.permitMinimum()).isEqualTo(strategy.permitMinimum());
} |
public static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options) {
return createBatchExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
} | @Test
public void useDefaultParallelismFromContextBatch() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(TestFlinkRunner.class);
ExecutionEnvironment bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options);
assertThat(bev, instanceOf(LocalEnvironment.class));
assertThat(options.getParallelism(), is(LocalStreamEnvironment.getDefaultLocalParallelism()));
assertThat(bev.getParallelism(), is(LocalStreamEnvironment.getDefaultLocalParallelism()));
} |
public Set<Analysis.AliasedDataSource> extractDataSources(final AstNode node) {
new Visitor().process(node, null);
return getAllSources();
} | @Test
public void shouldExtractUnaliasedDataSources() {
// Given:
final AstNode stmt = givenQuery("SELECT * FROM TEST1;");
// When:
extractor.extractDataSources(stmt);
// Then:
assertContainsAlias(TEST1);
} |
public static <T> NavigableSet<Point<T>> fastKNearestPoints(SortedSet<Point<T>> points, Instant time, int k) {
checkNotNull(points, "The input SortedSet of Points cannot be null");
checkNotNull(time, "The input time cannot be null");
checkArgument(k >= 0, "k (" + k + ") must be non-negative");
if (k >= points.size()) {
return newTreeSet(points);
}
Point<T> stub = points.first();
Point<T> searchPoint = Point.builder(stub).time(time).latLong(0.0, 0.0).build();
//create two iterators, one goes up from the searchPoint, one goes down from the searchPoint
NavigableSet<Point<T>> headSet = ((NavigableSet<Point<T>>) points).headSet(searchPoint, true);
NavigableSet<Point<T>> tailSet = ((NavigableSet<Point<T>>) points).tailSet(searchPoint, false);
Iterator<Point<T>> headIter = headSet.descendingIterator();
Iterator<Point<T>> tailIter = tailSet.iterator();
TreeSet<Point<T>> results = newTreeSet();
Point<T> up = (headIter.hasNext()) ? headIter.next() : null;
Point<T> down = (tailIter.hasNext()) ? tailIter.next() : null;
while (results.size() < k) {
//add an element from the "down set" when we are out of elements in the "up set"
if (up == null) {
results.add(down);
down = tailIter.next();
continue;
}
//add an element from the "up set" when we are out of elements in the "down set"
if (down == null) {
results.add(up);
up = headIter.next();
continue;
}
//add the nearest point when we can choose between the "up set" and the "down set"
Duration upDistance = Duration.between(up.time(), time);
Duration downDistance = Duration.between(time, down.time());
if (theDuration(upDistance).isLessThanOrEqualTo(downDistance)) {
results.add(up);
up = (headIter.hasNext()) ? headIter.next() : null;
} else {
results.add(down);
down = (tailIter.hasNext()) ? tailIter.next() : null;
}
}
return results;
} | @Test
public void testFastKNearestPoints_3() {
NavigableSet<Point<String>> knn = fastKNearestPoints(points, EPOCH.plusMillis(5), 3);
assertEquals(3, knn.size());
Point one = knn.pollFirst();
Point two = knn.pollFirst();
Point three = knn.pollFirst();
assertFalse(one == two, "This objects are different");
assertEquals(one.time(), EPOCH, "Both match the search time");
assertEquals(two.time(), EPOCH, "Both match the search time");
assertTrue(three == p3);
assertEquals(three.time(), EPOCH.plusSeconds(1));
} |
public static <T> PTransform<PCollection<T>, PCollection<T>> intersectAll(
PCollection<T> rightCollection) {
checkNotNull(rightCollection, "rightCollection argument is null");
return new SetImpl<>(rightCollection, intersectAll());
} | @Test
@Category(NeedsRunner.class)
public void testIntersectionAll() {
PAssert.that(first.apply("strings", Sets.intersectAll(second)))
.containsInAnyOrder("a", "a", "b", "b", "c", "d", "d");
PCollection<Row> results = firstRows.apply("rows", Sets.intersectAll(secondRows));
PAssert.that(results).containsInAnyOrder(toRows("a", "a", "b", "b", "c", "d", "d"));
assertEquals(schema, results.getSchema());
p.run();
} |
@Override
public void checkBeforeUpdate(final AlterEncryptRuleStatement sqlStatement) {
checkToBeAlteredRules(sqlStatement);
checkColumnNames(sqlStatement);
checkToBeAlteredEncryptors(sqlStatement);
} | @Test
void assertCheckSQLStatementWithConflictColumnNames() {
EncryptRule rule = mock(EncryptRule.class);
when(rule.getAllTableNames()).thenReturn(Collections.singleton("t_encrypt"));
executor.setRule(rule);
assertThrows(InvalidRuleConfigurationException.class, () -> executor.checkBeforeUpdate(createConflictColumnNameSQLStatement()));
} |
@Override
public ElasticAgentPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
String pluginId = descriptor.id();
PluggableInstanceSettings pluggableInstanceSettings = null;
if (!extension.supportsClusterProfiles(pluginId)) {
pluggableInstanceSettings = getPluginSettingsAndView(descriptor, extension);
}
return new ElasticAgentPluginInfo(descriptor,
elasticElasticAgentProfileSettings(pluginId),
elasticClusterProfileSettings(pluginId),
image(pluginId),
pluggableInstanceSettings,
capabilities(pluginId));
} | @Test
public void shouldGetCapabilitiesForAPlugin() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
when(pluginManager.resolveExtensionVersion("plugin1", ELASTIC_AGENT_EXTENSION, SUPPORTED_VERSIONS)).thenReturn("2.0");
Capabilities capabilities = new Capabilities(true);
when(extension.getCapabilities(descriptor.id())).thenReturn(capabilities);
ElasticAgentPluginInfo pluginInfo = new ElasticAgentPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertThat(pluginInfo.getCapabilities(), is(capabilities));
} |
static String toDatabaseName(Namespace namespace, boolean skipNameValidation) {
if (!skipNameValidation) {
validateNamespace(namespace);
}
return namespace.level(0);
} | @Test
public void testSkipNamespaceValidation() {
List<Namespace> acceptableNames =
Lists.newArrayList(Namespace.of("db-1"), Namespace.of("db-1-1-1"));
for (Namespace name : acceptableNames) {
assertThat(IcebergToGlueConverter.toDatabaseName(name, true)).isEqualTo(name.toString());
}
} |
public <T> List<T> toList(Class<T> elementType) {
return JSONConverter.toList(this, elementType);
} | @Test
public void toListTest() {
String jsonStr = FileUtil.readString("exam_test.json", CharsetUtil.CHARSET_UTF_8);
JSONArray array = JSONUtil.parseArray(jsonStr);
List<Exam> list = array.toList(Exam.class);
assertFalse(list.isEmpty());
assertSame(Exam.class, list.get(0).getClass());
} |
public TExecPlanFragmentParams plan(TUniqueId loadId) throws UserException {
boolean isPrimaryKey = destTable.getKeysType() == KeysType.PRIMARY_KEYS;
resetAnalyzer();
// construct tuple descriptor, used for scanNode and dataSink
TupleDescriptor tupleDesc = descTable.createTupleDescriptor("DstTableTuple");
boolean negative = streamLoadInfo.getNegative();
if (isPrimaryKey) {
if (negative) {
throw new DdlException("Primary key table does not support negative load");
}
if (destTable.hasRowStorageType() && streamLoadInfo.isPartialUpdate() &&
streamLoadInfo.getPartialUpdateMode() != TPartialUpdateMode.ROW_MODE) {
throw new DdlException("column with row table only support row mode partial update");
}
} else {
if (streamLoadInfo.isPartialUpdate()) {
throw new DdlException("Only primary key table support partial update");
}
}
List<Pair<Integer, ColumnDict>> globalDicts = Lists.newArrayList();
List<Column> destColumns;
List<Boolean> missAutoIncrementColumn = Lists.newArrayList();
if (streamLoadInfo.isPartialUpdate()) {
destColumns = Load.getPartialUpateColumns(destTable, streamLoadInfo.getColumnExprDescs(),
missAutoIncrementColumn);
} else {
destColumns = destTable.getFullSchema();
}
for (Column col : destColumns) {
SlotDescriptor slotDesc = descTable.addSlotDescriptor(tupleDesc);
slotDesc.setIsMaterialized(true);
slotDesc.setColumn(col);
slotDesc.setIsNullable(col.isAllowNull());
if (negative && !col.isKey() && col.getAggregationType() != AggregateType.SUM) {
throw new DdlException("Column is not SUM AggreateType. column:" + col.getName());
}
if (col.getType().isVarchar() && Config.enable_dict_optimize_stream_load &&
IDictManager.getInstance().hasGlobalDict(destTable.getId(),
col.getColumnId())) {
Optional<ColumnDict> dict = IDictManager.getInstance().getGlobalDict(destTable.getId(), col.getColumnId());
dict.ifPresent(columnDict -> globalDicts.add(new Pair<>(slotDesc.getId().asInt(), columnDict)));
}
}
if (isPrimaryKey) {
// add op type column
SlotDescriptor slotDesc = descTable.addSlotDescriptor(tupleDesc);
slotDesc.setIsMaterialized(true);
slotDesc.setColumn(new Column(Load.LOAD_OP_COLUMN, Type.TINYINT));
slotDesc.setIsNullable(false);
}
// create scan node
StreamLoadScanNode scanNode =
new StreamLoadScanNode(loadId, new PlanNodeId(0), tupleDesc, destTable, streamLoadInfo);
scanNode.setUseVectorizedLoad(true);
scanNode.init(analyzer);
scanNode.finalizeStats(analyzer);
scanNode.setWarehouseId(streamLoadInfo.getWarehouseId());
descTable.computeMemLayout();
// create dest sink
TWriteQuorumType writeQuorum = destTable.writeQuorum();
List<Long> partitionIds = getAllPartitionIds();
boolean enableAutomaticPartition;
if (streamLoadInfo.isSpecifiedPartitions()) {
enableAutomaticPartition = false;
} else {
enableAutomaticPartition = destTable.supportedAutomaticPartition();
}
OlapTableSink olapTableSink = new OlapTableSink(destTable, tupleDesc, partitionIds, writeQuorum,
destTable.enableReplicatedStorage(), scanNode.nullExprInAutoIncrement(),
enableAutomaticPartition, streamLoadInfo.getWarehouseId());
if (missAutoIncrementColumn.size() == 1 && missAutoIncrementColumn.get(0) == Boolean.TRUE) {
olapTableSink.setMissAutoIncrementColumn();
}
if (destTable.getAutomaticBucketSize() > 0) {
olapTableSink.setAutomaticBucketSize(destTable.getAutomaticBucketSize());
}
olapTableSink.init(loadId, streamLoadInfo.getTxnId(), db.getId(), streamLoadInfo.getTimeout());
Load.checkMergeCondition(streamLoadInfo.getMergeConditionStr(), destTable, destColumns,
olapTableSink.missAutoIncrementColumn());
olapTableSink.setPartialUpdateMode(streamLoadInfo.getPartialUpdateMode());
olapTableSink.complete(streamLoadInfo.getMergeConditionStr());
// for stream load, we only need one fragment, ScanNode -> DataSink.
// OlapTableSink can dispatch data to corresponding node.
PlanFragment fragment = new PlanFragment(new PlanFragmentId(0), scanNode, DataPartition.UNPARTITIONED);
fragment.setSink(olapTableSink);
// At present, we only support dop=1 for olap table sink.
// because tablet writing needs to know the number of senders in advance
// and guaranteed order of data writing
// It can be parallel only in some scenes, for easy use 1 dop now.
fragment.setPipelineDop(1);
// After data loading, we need to check the global dict for low cardinality string column
// whether update.
fragment.setLoadGlobalDicts(globalDicts);
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(InternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(analyzer.getDescTbl().toThrift());
TPlanFragmentExecParams execParams = new TPlanFragmentExecParams();
// user load id (streamLoadInfo.id) as query id
execParams.setQuery_id(loadId);
execParams.setFragment_instance_id(new TUniqueId(loadId.hi, loadId.lo + 1));
execParams.per_exch_num_senders = Maps.newHashMap();
execParams.destinations = Lists.newArrayList();
Map<Integer, List<TScanRangeParams>> perNodeScanRange = Maps.newHashMap();
List<TScanRangeParams> scanRangeParams = Lists.newArrayList();
for (TScanRangeLocations locations : scanNode.getScanRangeLocations(0)) {
scanRangeParams.add(new TScanRangeParams(locations.getScan_range()));
}
// For stream load, only one sender
execParams.setSender_id(0);
execParams.setNum_senders(1);
perNodeScanRange.put(scanNode.getId().asInt(), scanRangeParams);
execParams.setPer_node_scan_ranges(perNodeScanRange);
params.setParams(execParams);
TQueryOptions queryOptions = new TQueryOptions();
queryOptions.setQuery_type(TQueryType.LOAD);
queryOptions.setQuery_timeout(streamLoadInfo.getTimeout());
queryOptions.setLoad_transmission_compression_type(streamLoadInfo.getTransmisionCompressionType());
queryOptions.setLog_rejected_record_num(streamLoadInfo.getLogRejectedRecordNum());
// Disable load_dop for LakeTable temporary, because BE's `LakeTabletsChannel` does not support
// parallel send from a single sender.
if (streamLoadInfo.getLoadParallelRequestNum() != 0 && !destTable.isCloudNativeTableOrMaterializedView()) {
// only dup_keys can use parallel write since other table's the order of write is important
if (destTable.getKeysType() == KeysType.DUP_KEYS) {
queryOptions.setLoad_dop(streamLoadInfo.getLoadParallelRequestNum());
} else {
queryOptions.setLoad_dop(1);
}
}
// for stream load, we use exec_mem_limit to limit the memory usage of load channel.
queryOptions.setMem_limit(streamLoadInfo.getExecMemLimit());
queryOptions.setLoad_mem_limit(streamLoadInfo.getLoadMemLimit());
if (connectContext.getSessionVariable().isEnableLoadProfile()) {
queryOptions.setEnable_profile(true);
queryOptions.setLoad_profile_collect_second(Config.stream_load_profile_collect_second);
}
params.setQuery_options(queryOptions);
TQueryGlobals queryGlobals = new TQueryGlobals();
queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
queryGlobals.setTimestamp_ms(new Date().getTime());
queryGlobals.setTime_zone(streamLoadInfo.getTimezone());
params.setQuery_globals(queryGlobals);
// Since stream load has only one fragment,
// the backend number can be directly assigned to 0
params.setBackend_num(0);
TNetworkAddress coordAddress = new TNetworkAddress(FrontendOptions.getLocalHostAddress(), Config.rpc_port);
params.setCoord(coordAddress);
LOG.info("load job id: {}, txn id: {}, parallel: {}, compress: {}, replicated: {}, quorum: {}",
DebugUtil.printId(loadId), streamLoadInfo.getTxnId(), queryOptions.getLoad_dop(),
queryOptions.getLoad_transmission_compression_type(), destTable.enableReplicatedStorage(), writeQuorum);
this.execPlanFragmentParams = params;
return params;
} | @Test
public void testPartialUpdatePlan() throws UserException {
List<Column> columns = Lists.newArrayList();
Column c1 = new Column("c1", Type.BIGINT, false);
columns.add(c1);
Column c2 = new Column("c2", Type.BIGINT, true);
columns.add(c2);
new Expectations() {
{
destTable.getKeysType();
minTimes = 0;
result = KeysType.PRIMARY_KEYS;
destTable.getBaseSchema();
minTimes = 0;
result = columns;
destTable.getPartitions();
minTimes = 0;
result = Arrays.asList(partition);
scanNode.init((Analyzer) any);
minTimes = 0;
scanNode.getChildren();
minTimes = 0;
result = Lists.newArrayList();
scanNode.getId();
minTimes = 0;
result = new PlanNodeId(5);
partition.getId();
minTimes = 0;
result = 0;
}
};
TStreamLoadPutRequest request = new TStreamLoadPutRequest();
request.setTxnId(1);
request.setLoadId(new TUniqueId(2, 3));
request.setFileType(TFileType.FILE_STREAM);
request.setFormatType(TFileFormatType.FORMAT_CSV_PLAIN);
request.setPartial_update(true);
request.setColumns("c1");
StreamLoadInfo streamLoadInfo = StreamLoadInfo.fromTStreamLoadPutRequest(request, db);
StreamLoadPlanner planner = new StreamLoadPlanner(db, destTable, streamLoadInfo);
planner.plan(streamLoadInfo.getId());
} |
@Override
public void close() throws IOException {
super.close();
try {
final Reply response = this.getStatus();
if(response != null) {
if(log.isDebugEnabled()) {
log.debug(String.format("Closed stream %s with response value %s", this, response));
}
status.withResponse(attributes.toAttributes(response)).setComplete();
}
}
catch(BackgroundException e) {
throw new IOException(e.getDetail(), e);
}
} | @Test(expected = IOException.class)
public void testClose() throws Exception {
try {
new HttpResponseOutputStream<Void>(NullOutputStream.NULL_OUTPUT_STREAM, new VoidAttributesAdapter(), new TransferStatus()) {
@Override
public Void getStatus() throws BackgroundException {
throw new InteroperabilityException("d");
}
}.close();
}
catch(IOException e) {
assertEquals("d. Please contact your web hosting service provider for assistance.", e.getMessage());
throw e;
}
} |
public void shutdown() {
DefaultMetricsSystem.shutdown();
} | @Test(timeout = 300000)
public void testTransactionSinceLastCheckpointMetrics() throws Exception {
Random random = new Random();
int retryCount = 0;
while (retryCount < 5) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
HdfsConfiguration conf2 = new HdfsConfiguration();
// Lower the checkpoint condition for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
100);
// Check for checkpoint condition very often, for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
1);
// Poll and follow ANN txns very often, for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
.nnTopology(topology).numDataNodes(1).build();
cluster2.waitActive();
DistributedFileSystem fs2 = cluster2.getFileSystem(0);
NameNode nn0 = cluster2.getNameNode(0);
NameNode nn1 = cluster2.getNameNode(1);
cluster2.transitionToActive(0);
fs2.mkdirs(new Path("/tmp-t1"));
fs2.mkdirs(new Path("/tmp-t2"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure tracking works before the first-ever
// checkpoint.
assertEquals("SBN failed to track 2 transactions pre-checkpoint.",
4L, // 2 txns added further when catch-up is called.
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
// Complete up to the boundary required for
// an auto-checkpoint. Using 94 to expect fsimage
// rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
for (int i = 1; i <= 94; i++) {
fs2.mkdirs(new Path("/tmp-" + i));
}
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Assert 100 transactions in checkpoint.
HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
// Test to ensure number tracks the right state of
// uncheckpointed edits, and does not go negative
// (as fixed in HDFS-7501).
assertEquals("Should be zero right after the checkpoint.",
0L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
fs2.mkdirs(new Path("/tmp-t3"));
fs2.mkdirs(new Path("/tmp-t4"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure we track the right numbers after
// the checkpoint resets it to zero again.
assertEquals("SBN failed to track 2 added txns after the ckpt.",
4L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
cluster2.shutdown();
break;
} catch (Exception e) {
LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
retryCount++;
}
}
} |
public boolean isMatch(boolean input) {
for (BoolMatch boolMatch : oneof) {
if (boolMatch.isMatch(input)) {
return true;
}
}
return false;
} | @Test
void isMatch() {
ListBoolMatch listBoolMatch = new ListBoolMatch();
List<BoolMatch> oneof = new ArrayList<>();
BoolMatch boolMatch1 = new BoolMatch();
boolMatch1.setExact(true);
oneof.add(boolMatch1);
listBoolMatch.setOneof(oneof);
assertTrue(listBoolMatch.isMatch(true));
assertFalse(listBoolMatch.isMatch(false));
BoolMatch boolMatch2 = new BoolMatch();
boolMatch2.setExact(false);
oneof.add(boolMatch2);
listBoolMatch.setOneof(oneof);
assertTrue(listBoolMatch.isMatch(false));
} |
@Override
public void e(String tag, String message, Object... args) {
Log.e(tag, formatString(message, args));
} | @Test
public void errorWithThrowableLoggedCorrectly() {
String expectedMessage = "Hello World";
Throwable t = new Throwable("Test Throwable");
logger.e(t, tag, "Hello %s", "World");
assertLogged(ERROR, tag, expectedMessage, t);
} |
public Builder asBuilder() {
return new Builder(columns);
} | @Test
public void shouldDetectDuplicateKeysViaAsBuilder() {
// Given:
final Builder builder = SOME_SCHEMA.asBuilder();
// When:
assertThrows(
KsqlException.class,
() -> builder.keyColumn(K0, STRING)
);
} |
@Override
public int expireEntries(Set<K> keys, Duration ttl) {
return get(expireEntriesAsync(keys, ttl));
} | @Test
public void testExpireEntries() {
RMapCacheNative<String, String> testMap = redisson.getMapCacheNative("map");
testMap.put("key1", "value");
testMap.put("key2", "value");
testMap.expireEntries(new HashSet<>(Arrays.asList("key1", "key2")), Duration.ofMillis(20000));
assertThat(testMap.remainTimeToLive("key1")).isBetween(19800L, 20000L);
} |
public static Protocol parse(File file) throws IOException {
try (JsonParser jsonParser = Schema.FACTORY.createParser(file)) {
return parse(jsonParser);
}
} | @Test
void normalization() {
final String schema = "{\n" + " \"type\":\"record\", \"name\": \"Main\", " + " \"fields\":[\n"
+ " { \"name\":\"f1\", \"type\":\"Sub\" },\n" // use Sub
+ " { \"name\":\"f2\", " + " \"type\":{\n" + " \"type\":\"enum\", \"name\":\"Sub\",\n" // define
// Sub
+ " \"symbols\":[\"OPEN\",\"CLOSE\"]\n" + " }\n" + " }\n" + " ]\n" + "}";
Schema s = new Schema.Parser().parse(schema);
assertNotNull(s);
String parsingForm = SchemaNormalization.toParsingForm(s);
assertEquals(
"{\"name\":\"Main\",\"type\":\"record\",\"fields\":[{\"name\":\"f1\",\"type\":{\"name\":\"Sub\",\"type\":\"enum\",\"symbols\":[\"OPEN\",\"CLOSE\"]}},{\"name\":\"f2\",\"type\":\"Sub\"}]}",
parsingForm);
} |
public static <T> Write<T> write() {
return new AutoValue_XmlIO_Write.Builder<T>().setCharset(StandardCharsets.UTF_8.name()).build();
} | @Test
public void testWriteDisplayData() {
XmlIO.Write<Integer> write =
XmlIO.<Integer>write().withRootElement("bird").withRecordClass(Integer.class);
DisplayData displayData = DisplayData.from(write);
assertThat(displayData, hasDisplayItem("rootElement", "bird"));
assertThat(displayData, hasDisplayItem("recordClass", Integer.class));
} |
public SendResult sendMessage(
final String addr,
final String brokerName,
final Message msg,
final SendMessageRequestHeader requestHeader,
final long timeoutMillis,
final CommunicationMode communicationMode,
final SendMessageContext context,
final DefaultMQProducerImpl producer
) throws RemotingException, MQBrokerException, InterruptedException {
return sendMessage(addr, brokerName, msg, requestHeader, timeoutMillis, communicationMode, null, null, null, 0, context, producer);
} | @Test
public void testSendMessageAsync_WithException() throws RemotingException, InterruptedException, MQBrokerException {
doThrow(new RemotingTimeoutException("Remoting Exception in Test")).when(remotingClient)
.invokeAsync(anyString(), any(RemotingCommand.class), anyLong(), any(InvokeCallback.class));
SendMessageContext sendMessageContext = new SendMessageContext();
sendMessageContext.setProducer(new DefaultMQProducerImpl(new DefaultMQProducer()));
mqClientAPI.sendMessage(brokerAddr, brokerName, msg, new SendMessageRequestHeader(), 3 * 1000, CommunicationMode.ASYNC,
new SendCallback() {
@Override
public void onSuccess(SendResult sendResult) {
}
@Override
public void onException(Throwable e) {
assertThat(e).hasMessage("Remoting Exception in Test");
}
}, null, null, 0, sendMessageContext, defaultMQProducerImpl);
doThrow(new InterruptedException("Interrupted Exception in Test")).when(remotingClient)
.invokeAsync(anyString(), any(RemotingCommand.class), anyLong(), any(InvokeCallback.class));
mqClientAPI.sendMessage(brokerAddr, brokerName, msg, new SendMessageRequestHeader(), 3 * 1000, CommunicationMode.ASYNC,
new SendCallback() {
@Override
public void onSuccess(SendResult sendResult) {
}
@Override
public void onException(Throwable e) {
assertThat(e).hasMessage("Interrupted Exception in Test");
}
}, null, null, 0, sendMessageContext, defaultMQProducerImpl);
} |
public ProcessContinuation run(
PartitionRecord partitionRecord,
RestrictionTracker<StreamProgress, StreamProgress> tracker,
OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator)
throws IOException {
BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator =
new BytesThroughputEstimator<>(sizeEstimator, Instant.now());
// Lock the partition
if (tracker.currentRestriction().isEmpty()) {
boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord);
// Clean up NewPartition on the first run regardless of locking result. If locking fails it
// means this partition is being streamed, then cleaning up NewPartitions avoids lingering
// NewPartitions.
for (NewPartition newPartition : partitionRecord.getParentPartitions()) {
metadataTableDao.deleteNewPartition(newPartition);
}
if (!lockedPartition) {
LOG.info(
"RCSP {} : Could not acquire lock with uid: {}, because this is a "
+ "duplicate and another worker is working on this partition already.",
formatByteStringRange(partitionRecord.getPartition()),
partitionRecord.getUuid());
StreamProgress streamProgress = new StreamProgress();
streamProgress.setFailToLock(true);
metrics.decPartitionStreamCount();
tracker.tryClaim(streamProgress);
return ProcessContinuation.stop();
}
} else if (tracker.currentRestriction().getCloseStream() == null
&& !metadataTableDao.doHoldLock(
partitionRecord.getPartition(), partitionRecord.getUuid())) {
// We only verify the lock if we are not holding CloseStream because if this is a retry of
// CloseStream we might have already cleaned up the lock in a previous attempt.
// Failed correctness check on this worker holds the lock on this partition. This shouldn't
// fail because there's a restriction tracker which means this worker has already acquired the
// lock and once it has acquired the lock it shouldn't fail the lock check.
LOG.warn(
"RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and "
+ "should probably be reviewed.",
formatByteStringRange(partitionRecord.getPartition()),
partitionRecord.getUuid());
StreamProgress streamProgress = new StreamProgress();
streamProgress.setFailToLock(true);
metrics.decPartitionStreamCount();
tracker.tryClaim(streamProgress);
return ProcessContinuation.stop();
}
// Process CloseStream if it exists
CloseStream closeStream = tracker.currentRestriction().getCloseStream();
if (closeStream != null) {
LOG.debug("RCSP: Processing CloseStream");
metrics.decPartitionStreamCount();
if (closeStream.getStatus().getCode() == Status.Code.OK) {
// We need to update watermark here. We're terminating this stream because we have reached
// endTime. Instant.now is greater or equal to endTime. The goal here is
// DNP will need to know this stream has passed the endTime so DNP can eventually terminate.
Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE);
Instant endTime = partitionRecord.getEndTime();
if (endTime != null) {
terminatingWatermark = endTime;
}
watermarkEstimator.setWatermark(terminatingWatermark);
metadataTableDao.updateWatermark(
partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null);
LOG.info(
"RCSP {}: Reached end time, terminating...",
formatByteStringRange(partitionRecord.getPartition()));
return ProcessContinuation.stop();
}
if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) {
LOG.error(
"RCSP {}: Reached unexpected terminal state: {}",
formatByteStringRange(partitionRecord.getPartition()),
closeStream.getStatus());
return ProcessContinuation.stop();
}
// Release the lock only if the uuid matches. In normal operation this doesn't change
// anything. However, it's possible for this RCSP to crash while processing CloseStream but
// after the side effects of writing the new partitions to the metadata table. New partitions
// can be created while this RCSP restarts from the previous checkpoint and processes the
// CloseStream again. In certain race scenarios the child partitions may merge back to this
// partition, but as a new RCSP. The new partition (same as this partition) would write the
// exact same content to the metadata table but with a different uuid. We don't want to
// accidentally delete the StreamPartition because it now belongs to the new RCSP.
// If the uuid is the same (meaning this race scenario did not take place) we release the lock
// and mark the StreamPartition to be deleted, so we can delete it after we have written the
// NewPartitions.
metadataTableDao.releaseStreamPartitionLockForDeletion(
partitionRecord.getPartition(), partitionRecord.getUuid());
// The partitions in the continuation tokens must cover the same key space as this partition.
// If there's only 1 token, then the token's partition is equals to this partition.
// If there are more than 1 tokens, then the tokens form a continuous row range equals to this
// partition.
List<ByteStringRange> childPartitions = new ArrayList<>();
List<ByteStringRange> tokenPartitions = new ArrayList<>();
// Check if NewPartitions field exists, if not we default to using just the
// ChangeStreamContinuationTokens.
boolean useNewPartitionsField =
closeStream.getNewPartitions().size()
== closeStream.getChangeStreamContinuationTokens().size();
for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) {
ByteStringRange childPartition;
if (useNewPartitionsField) {
childPartition = closeStream.getNewPartitions().get(i);
} else {
childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition();
}
childPartitions.add(childPartition);
ChangeStreamContinuationToken token =
getTokenWithCorrectPartition(
partitionRecord.getPartition(),
closeStream.getChangeStreamContinuationTokens().get(i));
tokenPartitions.add(token.getPartition());
metadataTableDao.writeNewPartition(
new NewPartition(
childPartition, Collections.singletonList(token), watermarkEstimator.getState()));
}
LOG.info(
"RCSP {}: Split/Merge into {}",
formatByteStringRange(partitionRecord.getPartition()),
partitionsToString(childPartitions));
if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) {
LOG.warn(
"RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace",
formatByteStringRange(partitionRecord.getPartition()),
partitionsToString(tokenPartitions));
}
// Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and
// merges results back to this partition again) because when we register the "new" partition,
// we unset the deletion bit.
metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition());
return ProcessContinuation.stop();
}
// Update the metadata table with the watermark
metadataTableDao.updateWatermark(
partitionRecord.getPartition(),
watermarkEstimator.getState(),
tracker.currentRestriction().getCurrentToken());
// Start to stream the partition.
ServerStream<ChangeStreamRecord> stream = null;
try {
stream =
changeStreamDao.readChangeStreamPartition(
partitionRecord,
tracker.currentRestriction(),
partitionRecord.getEndTime(),
heartbeatDuration);
for (ChangeStreamRecord record : stream) {
Optional<ProcessContinuation> result =
changeStreamAction.run(
partitionRecord,
record,
tracker,
receiver,
watermarkEstimator,
throughputEstimator);
// changeStreamAction will usually return Optional.empty() except for when a checkpoint
// (either runner or pipeline initiated) is required.
if (result.isPresent()) {
return result.get();
}
}
} catch (Exception e) {
throw e;
} finally {
if (stream != null) {
stream.cancel();
}
}
return ProcessContinuation.resume();
} | @Test
public void testLockingRowSucceed() throws IOException {
final ServerStream<ChangeStreamRecord> responses = mock(ServerStream.class);
final Iterator<ChangeStreamRecord> responseIterator = mock(Iterator.class);
when(responses.iterator()).thenReturn(responseIterator);
Heartbeat mockHeartBeat = Mockito.mock(Heartbeat.class);
when(responseIterator.next()).thenReturn(mockHeartBeat);
when(responseIterator.hasNext()).thenReturn(true);
when(changeStreamDao.readChangeStreamPartition(any(), any(), any(), any()))
.thenReturn(responses);
when(changeStreamAction.run(any(), any(), any(), any(), any(), any()))
.thenReturn(Optional.of(DoFn.ProcessContinuation.stop()));
final DoFn.ProcessContinuation result =
action.run(partitionRecord, tracker, receiver, watermarkEstimator);
assertEquals(DoFn.ProcessContinuation.stop(), result);
// Verify that on successful lock, we don't tryClaim on the tracker.
verify(tracker, never()).tryClaim(any());
verify(changeStreamAction).run(any(), any(), any(), any(), any(), any());
} |
@Override
public void createNode(OpenstackNode osNode) {
checkNotNull(osNode, ERR_NULL_NODE);
OpenstackNode updatedNode;
if (osNode.intgBridge() == null && osNode.type() != CONTROLLER) {
String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet());
checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID);
updatedNode = osNode.updateIntbridge(DeviceId.deviceId(deviceIdStr));
checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()),
NOT_DUPLICATED_MSG, updatedNode.intgBridge());
} else {
updatedNode = osNode;
checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()),
NOT_DUPLICATED_MSG, updatedNode.intgBridge());
}
osNodeStore.createNode(updatedNode);
log.info(String.format(MSG_NODE, osNode.hostname(), MSG_CREATED));
} | @Test(expected = IllegalArgumentException.class)
public void testCreateNodeWithDuplicateIntgBridge() {
target.createNode(COMPUTE_1);
target.createNode(COMPUTE_1_DUP_INT);
} |
public void container(Action<? super ContainerParameters> action) {
action.execute(container);
} | @Test
public void testContainer() {
assertThat(testJibExtension.getContainer().getJvmFlags()).isEmpty();
assertThat(testJibExtension.getContainer().getEnvironment()).isEmpty();
assertThat(testJibExtension.getContainer().getExtraClasspath()).isEmpty();
assertThat(testJibExtension.getContainer().getExpandClasspathDependencies()).isFalse();
assertThat(testJibExtension.getContainer().getMainClass()).isNull();
assertThat(testJibExtension.getContainer().getArgs()).isNull();
assertThat(testJibExtension.getContainer().getFormat()).isSameInstanceAs(ImageFormat.Docker);
assertThat(testJibExtension.getContainer().getPorts()).isEmpty();
assertThat(testJibExtension.getContainer().getLabels().get()).isEmpty();
assertThat(testJibExtension.getContainer().getAppRoot()).isEmpty();
assertThat(testJibExtension.getContainer().getFilesModificationTime().get())
.isEqualTo("EPOCH_PLUS_SECOND");
assertThat(testJibExtension.getContainer().getCreationTime().get()).isEqualTo("EPOCH");
testJibExtension.container(
container -> {
container.setJvmFlags(Arrays.asList("jvmFlag1", "jvmFlag2"));
container.setEnvironment(ImmutableMap.of("var1", "value1", "var2", "value2"));
container.setEntrypoint(Arrays.asList("foo", "bar", "baz"));
container.setExtraClasspath(Arrays.asList("/d1", "/d2", "/d3"));
container.setExpandClasspathDependencies(true);
container.setMainClass("mainClass");
container.setArgs(Arrays.asList("arg1", "arg2", "arg3"));
container.setPorts(Arrays.asList("1000", "2000-2010", "3000"));
container.setFormat(ImageFormat.OCI);
container.setAppRoot("some invalid appRoot value");
container.getFilesModificationTime().set("some invalid time value");
container.getCreationTime().set("some other invalid time value");
});
ContainerParameters container = testJibExtension.getContainer();
assertThat(container.getEntrypoint()).containsExactly("foo", "bar", "baz").inOrder();
assertThat(container.getJvmFlags()).containsExactly("jvmFlag1", "jvmFlag2").inOrder();
assertThat(container.getEnvironment())
.containsExactly("var1", "value1", "var2", "value2")
.inOrder();
assertThat(container.getExtraClasspath()).containsExactly("/d1", "/d2", "/d3").inOrder();
assertThat(testJibExtension.getContainer().getExpandClasspathDependencies()).isTrue();
assertThat(testJibExtension.getContainer().getMainClass()).isEqualTo("mainClass");
assertThat(container.getArgs()).containsExactly("arg1", "arg2", "arg3").inOrder();
assertThat(container.getPorts()).containsExactly("1000", "2000-2010", "3000").inOrder();
assertThat(container.getFormat()).isSameInstanceAs(ImageFormat.OCI);
assertThat(container.getAppRoot()).isEqualTo("some invalid appRoot value");
assertThat(container.getFilesModificationTime().get()).isEqualTo("some invalid time value");
assertThat(container.getCreationTime().get()).isEqualTo("some other invalid time value");
testJibExtension.container(
extensionContainer -> {
extensionContainer.getFilesModificationTime().set((String) null);
extensionContainer.getCreationTime().set((String) null);
});
container = testJibExtension.getContainer();
assertThat(container.getFilesModificationTime().get()).isEqualTo("EPOCH_PLUS_SECOND");
assertThat(container.getCreationTime().get()).isEqualTo("EPOCH");
} |
@Override
public Object getValue() {
return lazyMetric == null ? null : lazyMetric.getValue();
} | @Test
public void getValue() {
//Long
LazyDelegatingGauge gauge = new LazyDelegatingGauge("bar", 99l);
assertThat(gauge.getValue()).isEqualTo(99l);
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_NUMBER);
//Double
gauge = new LazyDelegatingGauge("bar", 99.0);
assertThat(gauge.getValue()).isEqualTo(99.0);
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_NUMBER);
//Boolean
gauge = new LazyDelegatingGauge("bar", true);
assertThat(gauge.getValue()).isEqualTo(true);
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_BOOLEAN);
//Text
gauge = new LazyDelegatingGauge("bar", "something");
assertThat(gauge.getValue()).isEqualTo("something");
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_TEXT);
//Ruby Hash
gauge = new LazyDelegatingGauge("bar", RUBY_HASH);
assertThat(gauge.getValue().toString()).isEqualTo(RUBY_HASH_AS_STRING);
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_RUBYHASH);
//Ruby Timestamp
gauge = new LazyDelegatingGauge("bar", RUBY_TIMESTAMP);
assertThat(gauge.getValue()).isEqualTo(TIMESTAMP);
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_RUBYTIMESTAMP);
//Unknown
gauge = new LazyDelegatingGauge("bar", Collections.singleton("value"));
assertThat(gauge.getValue()).isEqualTo(Collections.singleton("value"));
assertThat(gauge.getValue()).isEqualTo(gauge.get());
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_UNKNOWN);
//Null
gauge = new LazyDelegatingGauge("bar");
assertThat(gauge.getValue()).isNull();
assertThat(gauge.get()).isNull();
assertThat(gauge.getType()).isNull();
assertThat(gauge.getName()).isNotEmpty();
} |
public static String addQuery(String url, String... parameters) {
if (parameters.length % 2 != 0)
throw new IllegalArgumentException("Expected an even number of parameters.");
var result = new StringBuilder(url);
var printedParams = 0;
for (var i = 0; i < parameters.length; i += 2) {
var key = parameters[i];
var value = parameters[i + 1];
if (key == null)
throw new NullPointerException("Parameter key must not be null");
if (value != null) {
if (printedParams == 0)
result.append('?');
else
result.append('&');
result.append(key).append('=').append(UriUtils.encodeQueryParam(value, StandardCharsets.UTF_8));
printedParams++;
}
}
return result.toString();
} | @Test
public void testQuery() throws Exception {
var url = "http://localhost/api/foo";
assertThat(UrlUtil.addQuery(url, "a", "1", "b", null, "c", "b\ta/r"))
.isEqualTo("http://localhost/api/foo?a=1&c=b%09a/r");
} |
public CompletionStage<Void> migrate(MigrationSet set) {
InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id()));
CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor);
return lockStage.thenCompose(__ -> runMigrationInLock(lock, set));
} | @Test
public void testChecksumDataError() {
CuratorOp op1 = client.transactionOp().create().forPath("/test");
CuratorOp op2 = client.transactionOp().create().forPath("/test/bar", "first".getBytes());
Migration migration = () -> Arrays.asList(op1, op2);
MigrationSet migrationSet = MigrationSet.build("1", Collections.singletonList(migration));
complete(manager.migrate(migrationSet));
CuratorOp op2Changed = client.transactionOp().create().forPath("/test/bar", "second".getBytes());
migration = () -> Arrays.asList(op1, op2Changed);
migrationSet = MigrationSet.build("1", Collections.singletonList(migration));
try {
complete(manager.migrate(migrationSet));
fail("Should throw");
} catch (Throwable e) {
assertTrue(Throwables.getRootCause(e) instanceof MigrationException);
}
} |
@Override
public void doClean() {
long currentTime = System.currentTimeMillis();
for (ExpiredMetadataInfo each : metadataManager.getExpiredMetadataInfos()) {
if (currentTime - each.getCreateTime() > GlobalConfig.getExpiredMetadataExpiredTime()) {
removeExpiredMetadata(each);
}
}
} | @Test
void testDoClean() {
expiredMetadataCleaner.doClean();
verify(metadataManagerMock).getExpiredMetadataInfos();
verify(metadataOperateServiceMock).deleteServiceMetadata(expiredMetadataInfoMock.getService());
} |
static <E extends Enum<E>> String enumName(final E state)
{
return null == state ? "null" : state.name();
} | @Test
void stateNameReturnsNameOfTheEnumConstant()
{
final ChronoUnit state = ChronoUnit.CENTURIES;
assertEquals(state.name(), enumName(state));
} |
public static void setProtectedFieldValue(String protectedField, Object object, Object newValue) {
try {
// acgegi would silently fail to write to final fields
// FieldUtils.writeField(Object, field, true) only sets accessible on *non* public fields
// and then fails with IllegalAccessException (even if you make the field accessible in the interim!
// for backwards compatability we need to use a few steps
Field field = org.apache.commons.lang.reflect.FieldUtils.getField(object.getClass(), protectedField, true);
field.setAccessible(true);
field.set(object, newValue);
} catch (Exception x) {
throw new RuntimeException(x);
}
} | @Test
public void setProtectedFieldValue_Should_Succeed() {
InnerClassWithProtectedField sut = new InnerClassWithProtectedField();
FieldUtils.setProtectedFieldValue("myProtectedField", sut, "test");
assertEquals("test", sut.getMyNonFinalField());
} |
void doSubmit(final Runnable action) {
CONTINUATION.get().submit(action);
} | @Test
public void testRecursiveOrder() {
final Continuations CONT = new Continuations();
final StringBuilder result = new StringBuilder();
CONT.doSubmit(() -> {
result.append("BEGIN{");
recursivePostOrder(CONT, result, "root", 0);
});
CONT.doSubmit(() -> {
result.append("}END");
});
assertEquals(result.toString(),
"BEGIN{[done(rootLL:2)][done(rootLR:2)][done(rootL:1)][done(rootRL:2)][done(rootRR:2)][done(rootR:1)][done(root:0)]}END");
} |
public static String[] splitIPPortStr(String address) {
if (StringUtils.isBlank(address)) {
throw new IllegalArgumentException("ip and port string cannot be empty!");
}
if (address.charAt(0) == '[') {
address = removeBrackets(address);
}
String[] serverAddArr = null;
int i = address.lastIndexOf(Constants.IP_PORT_SPLIT_CHAR);
if (i > -1) {
serverAddArr = new String[2];
String hostAddress = address.substring(0,i);
if (hostAddress.contains("%")) {
hostAddress = hostAddress.substring(0, hostAddress.indexOf("%"));
}
serverAddArr[0] = hostAddress;
serverAddArr[1] = address.substring(i + 1);
}
return serverAddArr;
} | @Test
public void testSplitIPPortStr() {
String[] ipPort = new String[]{"127.0.0.1","8080"};
assertThat(NetUtil.splitIPPortStr("127.0.0.1:8080")).isEqualTo(ipPort);
ipPort = new String[]{"::","8080"};
assertThat(NetUtil.splitIPPortStr("[::]:8080")).isEqualTo(ipPort);
ipPort = new String[]{"2000:0000:0000:0000:0001:2345:6789:abcd","8080"};
assertThat(NetUtil.splitIPPortStr("2000:0000:0000:0000:0001:2345:6789:abcd%10:8080")).isEqualTo(ipPort);
ipPort = new String[]{"2000:0000:0000:0000:0001:2345:6789:abcd","8080"};
assertThat(NetUtil.splitIPPortStr("[2000:0000:0000:0000:0001:2345:6789:abcd]:8080")).isEqualTo(ipPort);
ipPort = new String[]{"::FFFF:192.168.1.2","8080"};
assertThat(NetUtil.splitIPPortStr("::FFFF:192.168.1.2:8080")).isEqualTo(ipPort);
ipPort = new String[]{"::FFFF:192.168.1.2","8080"};
assertThat(NetUtil.splitIPPortStr("[::FFFF:192.168.1.2]:8080")).isEqualTo(ipPort);
} |
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
} | @Test
public void testCreateTableAsColumns()
{
// TODO: validate output
analyze("CREATE TABLE test(a) AS SELECT 123");
analyze("CREATE TABLE test(a, b) AS SELECT 1, 2");
analyze("CREATE TABLE test(a) AS (VALUES 1)");
assertFails(COLUMN_NAME_NOT_SPECIFIED, "CREATE TABLE test AS SELECT 123");
assertFails(DUPLICATE_COLUMN_NAME, "CREATE TABLE test AS SELECT 1 a, 2 a");
assertFails(COLUMN_TYPE_UNKNOWN, "CREATE TABLE test AS SELECT null a");
assertFails(MISMATCHED_COLUMN_ALIASES, 1, 19, "CREATE TABLE test(x) AS SELECT 1, 2");
assertFails(MISMATCHED_COLUMN_ALIASES, 1, 19, "CREATE TABLE test(x, y) AS SELECT 1");
assertFails(MISMATCHED_COLUMN_ALIASES, 1, 19, "CREATE TABLE test(x, y) AS (VALUES 1)");
assertFails(DUPLICATE_COLUMN_NAME, 1, 24, "CREATE TABLE test(abc, AbC) AS SELECT 1, 2");
assertFails(COLUMN_TYPE_UNKNOWN, 1, 1, "CREATE TABLE test(x) AS SELECT null");
assertFails(MISSING_ATTRIBUTE, ".*'y' cannot be resolved", "CREATE TABLE test(x) WITH (p1 = y) AS SELECT null");
assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE TABLE test(x) WITH (p1 = 'p1', p2 = 'p2', p1 = 'p3') AS SELECT null");
assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE TABLE test(x) WITH (p1 = 'p1', \"p1\" = 'p2') AS SELECT null");
} |
static boolean containsIn(CloneGroup first, CloneGroup second) {
if (first.getCloneUnitLength() > second.getCloneUnitLength()) {
return false;
}
List<ClonePart> firstParts = first.getCloneParts();
List<ClonePart> secondParts = second.getCloneParts();
return SortedListsUtils.contains(secondParts, firstParts, new ContainsInComparator(second.getCloneUnitLength(), first.getCloneUnitLength()))
&& SortedListsUtils.contains(firstParts, secondParts, ContainsInComparator.RESOURCE_ID_COMPARATOR);
} | @Test
public void length_of_C1_bigger_than_length_of_C2() {
CloneGroup c1 = spy(newCloneGroup(3,
newClonePart("a", 0)));
CloneGroup c2 = spy(newCloneGroup(1,
newClonePart("a", 0)));
assertThat(Filter.containsIn(c1, c2), is(false));
// containsIn method should check only origin and length - no need to compare all parts
verify(c1).getCloneUnitLength();
verify(c2).getCloneUnitLength();
verifyNoMoreInteractions(c1);
verifyNoMoreInteractions(c2);
} |
@Override
public boolean execute(Workflow workflow, Task task, WorkflowExecutor executor) {
Map<String, Task> taskMap = TaskHelper.getTaskMap(workflow);
Optional<Task.Status> done = executeJoin(task, taskMap);
if (done.isPresent() && confirmDone(workflow, task)) { // update task status if it is done
task.setStatus(done.get());
return true;
}
return false;
} | @Test
public void testExecuteDone() {
StepRuntimeState state = new StepRuntimeState();
state.setStatus(StepInstance.Status.COMPLETED_WITH_ERROR);
when(stepInstanceDao.getStepStates(anyString(), anyLong(), anyLong(), anyList()))
.thenReturn(Collections.singletonMap("job1", state));
assertTrue(gateTask.execute(workflow, joinTask, null));
assertEquals(Task.Status.FAILED, joinTask.getStatus());
} |
@Override
public void batchDeregisterService(String serviceName, String groupName, List<Instance> instances) {
throw new UnsupportedOperationException(
"Do not support persistent instances to perform batch de registration methods.");
} | @Test
void testBatchDeregisterService() {
assertThrows(UnsupportedOperationException.class, () -> {
clientProxy.batchDeregisterService("a", "b", null);
});
} |
public static synchronized String getEcosystem(String vendor, String product, String identifiedEcosystem) {
final Pair<String, String> key = new Pair<>(vendor, product);
final String current = cache.get(key);
String result = null;
if (current == null) {
if (!StringUtils.isBlank(identifiedEcosystem)) {
cache.put(key, identifiedEcosystem);
changed.put(key, identifiedEcosystem);
result = identifiedEcosystem;
}
} else if (MULTIPLE_ECOSYSTEMS_IDENTIFIED.equals(current)) {
//do nothing - result is already null
} else if (current.equals(identifiedEcosystem) || identifiedEcosystem == null) {
result = current;
} else {
cache.put(key, MULTIPLE_ECOSYSTEMS_IDENTIFIED);
changed.put(key, MULTIPLE_ECOSYSTEMS_IDENTIFIED);
}
return result;
} | @Test
public void testGetEcosystem() {
Pair<String, String> key = new Pair<>("apache", "zookeeper");
Map<Pair<String, String>, String> map = new HashMap<>();
map.put(key, "java");
CpeEcosystemCache.setCache(map);
String expected = "java";
String result = CpeEcosystemCache.getEcosystem("apache", "zookeeper", null);
assertEquals(expected, result);
//changes to MULTIPLE = which is returned as null
result = CpeEcosystemCache.getEcosystem("apache", "zookeeper", "c++");
assertNull(result);
result = CpeEcosystemCache.getEcosystem("pivotal", "spring-framework", null);
assertNull(result);
expected = "java";
result = CpeEcosystemCache.getEcosystem("pivotal", "spring-framework", "java");
assertEquals(expected, result);
expected = "java";
result = CpeEcosystemCache.getEcosystem("pivotal", "spring-framework", "java");
assertEquals(expected, result);
result = CpeEcosystemCache.getEcosystem("microsoft", "word", null );
assertNull(result);
result = CpeEcosystemCache.getEcosystem("microsoft", "word", null );
assertNull(result);
result = CpeEcosystemCache.getEcosystem("microsoft", "word", "" );
assertNull(result);
} |
public static Find find(String regex) {
return find(regex, 0);
} | @Test
@Category(NeedsRunner.class)
public void testFindNameNone() {
PCollection<String> output =
p.apply(Create.of("a", "b", "c", "d"))
.apply(Regex.find("(?<namedgroup>[xyz])", "namedgroup"));
PAssert.that(output).empty();
p.run();
} |
@Override
public List<String> listPartitionNamesByValue(String databaseName, String tableName,
List<Optional<String>> partitionValues) {
List<Partition> partitions = get(partitionCache, OdpsTableName.of(databaseName, tableName));
ImmutableList.Builder<String> builder = ImmutableList.builder();
if (partitions == null || partitions.isEmpty()) {
return builder.build();
}
List<PartitionSpec> partitionSpecs =
partitions.stream().map(Partition::getPartitionSpec).collect(Collectors.toList());
List<String> keys = new ArrayList<>(partitionSpecs.get(0).keys());
for (PartitionSpec partitionSpec : partitionSpecs) {
boolean present = true;
for (int index = 0; index < keys.size(); index++) {
String value = keys.get(index);
if (partitionValues.get(index).isPresent() && partitionSpec.get(value) != null) {
if (!partitionSpec.get(value).equals(partitionValues.get(index).get())) {
present = false;
break;
}
}
}
if (present) {
builder.add(partitionSpec.toString(false, true));
}
}
return builder.build();
} | @Test
public void testListPartitionNamesByValue() {
List<String> partitions = odpsMetadata.listPartitionNamesByValue("project", "tableName",
ImmutableList.of(Optional.of("a"), Optional.empty()));
Assert.assertEquals(Collections.singletonList("p1=a/p2=b"), partitions);
partitions = odpsMetadata.listPartitionNamesByValue("project", "tableName",
ImmutableList.of(Optional.empty(), Optional.of("b")));
Assert.assertEquals(Collections.singletonList("p1=a/p2=b"), partitions);
} |
public static <K, V> Reshuffle<K, V> of() {
return new Reshuffle<>();
} | @Test
public void testRequestOldUpdateCompatibility() {
pipeline.enableAbandonedNodeEnforcement(false);
pipeline.getOptions().as(StreamingOptions.class).setUpdateCompatibilityVersion("2.53.0");
pipeline.apply(Create.of(KV.of("arbitrary", "kv"))).apply(Reshuffle.of());
OldTransformSeeker seeker = new OldTransformSeeker();
pipeline.traverseTopologically(seeker);
assertTrue(seeker.isOldTransformFound);
} |
public static TypeDescriptor javaTypeForFieldType(FieldType fieldType) {
switch (fieldType.getTypeName()) {
case LOGICAL_TYPE:
// TODO: shouldn't we handle this differently?
return javaTypeForFieldType(fieldType.getLogicalType().getBaseType());
case ARRAY:
return TypeDescriptors.lists(javaTypeForFieldType(fieldType.getCollectionElementType()));
case ITERABLE:
return TypeDescriptors.iterables(
javaTypeForFieldType(fieldType.getCollectionElementType()));
case MAP:
return TypeDescriptors.maps(
javaTypeForFieldType(fieldType.getMapKeyType()),
javaTypeForFieldType(fieldType.getMapValueType()));
case ROW:
return TypeDescriptors.rows();
default:
return PRIMITIVE_MAPPING.get(fieldType.getTypeName());
}
} | @Test
public void testMapTypeToJavaType() {
assertEquals(
TypeDescriptors.maps(TypeDescriptors.strings(), TypeDescriptors.longs()),
FieldTypeDescriptors.javaTypeForFieldType(
FieldType.map(FieldType.STRING, FieldType.INT64)));
assertEquals(
TypeDescriptors.maps(
TypeDescriptors.strings(), TypeDescriptors.lists(TypeDescriptors.longs())),
FieldTypeDescriptors.javaTypeForFieldType(
FieldType.map(FieldType.STRING, FieldType.array(FieldType.INT64))));
} |
public TopicConnection topicConnection(TopicConnection connection) {
// It is common to implement both interfaces
if (connection instanceof XATopicConnection) {
return xaTopicConnection((XATopicConnection) connection);
}
return TracingConnection.create(connection, this);
} | @Test void topicConnection_doesntDoubleWrap() {
TopicConnection wrapped = jmsTracing.topicConnection(mock(TopicConnection.class));
assertThat(jmsTracing.topicConnection(wrapped))
.isSameAs(wrapped);
} |
@Override
public int deleteById(final String userId) {
final User user = loadById(userId);
if (user == null) {
return 0;
}
DBObject query = new BasicDBObject();
query.put("_id", new ObjectId(userId));
final int deleteCount = destroy(query, UserImpl.COLLECTION_NAME);
accesstokenService.deleteAllForUser(user.getName()); //TODO: probably should go through listener subscribing to delete event
serverEventBus.post(UserDeletedEvent.create(userId, user.getName()));
return deleteCount;
} | @Test
@MongoDBFixtures("UserServiceImplTest.json")
public void testDeleteById() throws Exception {
assertThat(userService.deleteById("54e3deadbeefdeadbeef0001")).isEqualTo(1);
assertThat(userService.deleteById("54e3deadbeefdeadbeef0003")).isEqualTo(1);
assertThat(userService.deleteById("00000eadbeefdeadbee00000")).isEqualTo(0);
} |
public String getClientLatency() {
if (!enabled) {
return null;
}
Instant trackerStart = Instant.now();
String latencyDetails = queue.poll(); // non-blocking pop
if (LOG.isDebugEnabled()) {
Instant stop = Instant.now();
long elapsed = Duration.between(trackerStart, stop).toMillis();
LOG.debug("Dequeued latency info [{} ms]: {}", elapsed, latencyDetails);
}
return latencyDetails;
} | @Test
public void verifyGettingLatencyRecordsIsCheapWhenEnabled() throws Exception {
final double maxLatencyWhenDisabledMs = 5000;
final double minLatencyWhenDisabledMs = 0;
final long numTasks = 1000;
long aggregateLatency = 0;
AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, true);
List<Callable<Long>> tasks = new ArrayList<>();
for (int i = 0; i < numTasks; i++) {
tasks.add(() -> {
Instant startRecord = Instant.now();
abfsPerfTracker.getClientLatency();
long latencyRecord = Duration.between(startRecord, Instant.now()).toMillis();
LOG.debug("Spent {} ms in recording latency.", latencyRecord);
return latencyRecord;
});
}
for (Future<Long> fr: executorService.invokeAll(tasks)) {
aggregateLatency += fr.get();
}
double averageRecordLatency = aggregateLatency / numTasks;
assertThat(averageRecordLatency).describedAs("Average time for getting latency records should be bounded")
.isBetween(minLatencyWhenDisabledMs, maxLatencyWhenDisabledMs);
} |
@Override
public String format(final Schema schema) {
final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema);
return options.contains(Option.AS_COLUMN_LIST)
? stripTopLevelStruct(converted)
: converted;
} | @Test
public void shouldFormatRequiredStructAsColumns() {
// Given:
final Schema structSchema = SchemaBuilder.struct()
.field("COL1", Schema.OPTIONAL_STRING_SCHEMA)
.field("COL4", SchemaBuilder
.array(Schema.OPTIONAL_FLOAT64_SCHEMA)
.optional()
.build())
.field("COL5", SchemaBuilder
.map(Schema.OPTIONAL_STRING_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA)
.optional()
.build())
.build();
final SqlSchemaFormatter formatter = new SqlSchemaFormatter(
DO_NOT_ESCAPE_COLUMN_NAMES,
Option.AS_COLUMN_LIST,
Option.APPEND_NOT_NULL
);
// When:
final String result = formatter.format(structSchema);
// Then:
assertThat(result, is(
"COL1 VARCHAR, "
+ "COL4 ARRAY<DOUBLE>, "
+ "COL5 MAP<VARCHAR, DOUBLE>"));
} |
public Histogram(BinScheme binScheme) {
this.hist = new float[binScheme.bins()];
this.count = 0.0f;
this.binScheme = binScheme;
} | @Test
public void testHistogram() {
BinScheme scheme = new ConstantBinScheme(10, -5, 5);
Histogram hist = new Histogram(scheme);
for (int i = -5; i < 5; i++)
hist.record(i);
for (int i = 0; i < 10; i++)
assertEquals(scheme.fromBin(i), hist.value(i / 10.0 + EPS), EPS);
} |
public List<String> getTaintRepositories() {
return taintRepositories;
} | @Test
public void test_getTaintRepositories_withExtraReposFromConfiguration() {
when(configuration.hasKey(EXTRA_TAINT_REPOSITORIES)).thenReturn(true);
when(configuration.getStringArray(EXTRA_TAINT_REPOSITORIES)).thenReturn(new String[]{"extra-1", "extra-2"});
TaintChecker underTest = new TaintChecker(configuration);
assertThat(underTest.getTaintRepositories())
.hasSize(8)
.containsExactlyInAnyOrder("roslyn.sonaranalyzer.security.cs", "javasecurity", "jssecurity",
"tssecurity", "phpsecurity", "pythonsecurity", "extra-1", "extra-2");
} |
@Override
public double rand() {
if (rng == null) {
rng = new RejectionLogLogistic();
}
return rng.rand();
} | @Test
public void testSd() {
System.out.println("sd");
BetaDistribution instance = new BetaDistribution(2, 5);
instance.rand();
assertEquals(0.1597191, instance.sd(), 1E-7);
} |
static long getNodeHashRangeOnLevel(int level) {
int nodesOnLevel = getNodesOnLevel(level);
return INT_RANGE / nodesOnLevel;
} | @Test
public void testGetHashStepForLevel() {
assertEquals(1L << 32, MerkleTreeUtil.getNodeHashRangeOnLevel(0));
assertEquals(1L << 31, MerkleTreeUtil.getNodeHashRangeOnLevel(1));
assertEquals(1L << 30, MerkleTreeUtil.getNodeHashRangeOnLevel(2));
assertEquals(1L << 29, MerkleTreeUtil.getNodeHashRangeOnLevel(3));
assertEquals(1L << 28, MerkleTreeUtil.getNodeHashRangeOnLevel(4));
} |
static long deleteObsoleteJRobinFiles(String application) {
final Calendar nowMinusThreeMonthsAndADay = Calendar.getInstance();
nowMinusThreeMonthsAndADay.add(Calendar.DAY_OF_YEAR, -getObsoleteGraphsDays());
nowMinusThreeMonthsAndADay.add(Calendar.DAY_OF_YEAR, -1);
final long timestamp = Util.getTimestamp(nowMinusThreeMonthsAndADay);
final int counterRequestIdLength = new CounterRequest("", "").getId().length();
long diskUsage = 0;
final Map<String, Long> lastUpdateTimesByPath = new HashMap<>();
final List<File> rrdFiles = new ArrayList<>(listRrdFiles(application));
for (final File file : rrdFiles) {
// on ne supprime que les fichiers rrd de requêtes (les autres sont peu nombreux)
if (file.getName().length() > counterRequestIdLength
&& file.lastModified() < nowMinusThreeMonthsAndADay.getTimeInMillis()) {
final long lastUpdateTime = getLastUpdateTime(file);
lastUpdateTimesByPath.put(file.getPath(), lastUpdateTime);
final boolean obsolete = lastUpdateTime < timestamp;
boolean deleted = false;
if (obsolete) {
deleted = file.delete();
}
if (!deleted) {
diskUsage += file.length();
}
} else {
diskUsage += file.length();
}
}
final long maxRrdDiskUsage = getMaxRrdDiskUsageMb() * 1024L * 1024L;
if (diskUsage > maxRrdDiskUsage) {
// sort rrd files from least to most recently used
for (final File file : rrdFiles) {
if (lastUpdateTimesByPath.get(file.getPath()) == null) {
lastUpdateTimesByPath.put(file.getPath(), getLastUpdateTime(file));
}
}
rrdFiles.sort(Comparator.comparing(file -> lastUpdateTimesByPath.get(file.getPath())));
// delete least recently used rrd files until rrd disk usage < 20 MB
for (final File file : rrdFiles) {
if (diskUsage < maxRrdDiskUsage) {
break;
}
if (file.getName().length() > counterRequestIdLength) {
final long length = file.length();
if (file.delete()) {
diskUsage -= length;
}
}
}
}
return diskUsage;
} | @Test
public void testDeleteObsoleteJRobinFiles() {
JRobin.deleteObsoleteJRobinFiles(TEST_APPLICATION);
Utils.setProperty(Parameter.OBSOLETE_GRAPHS_DAYS, "1");
JRobin.deleteObsoleteJRobinFiles(TEST_APPLICATION);
} |
public Optional<ScimGroupDto> findByGroupUuid(DbSession dbSession, String groupUuid) {
return Optional.ofNullable(mapper(dbSession).findByGroupUuid(groupUuid));
} | @Test
void findByGroupUuid_whenScimUuidFound_shouldReturnDto() {
ScimGroupDto scimGroupDto = db.users().insertScimGroup(db.users().insertGroup());
db.users().insertScimGroup(db.users().insertGroup());
ScimGroupDto underTest = scimGroupDao.findByGroupUuid(db.getSession(), scimGroupDto.getGroupUuid())
.orElseGet(() -> fail("Group not found"));
assertThat(underTest.getScimGroupUuid()).isEqualTo(scimGroupDto.getScimGroupUuid());
assertThat(underTest.getGroupUuid()).isEqualTo(scimGroupDto.getGroupUuid());
} |
public boolean shouldRestartTask(TaskStatus status) {
return includeTasks && (!onlyFailed || status.state() == AbstractStatus.State.FAILED);
} | @Test
public void doNotRestartTasks() {
RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, false);
assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.FAILED)));
assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.RUNNING)));
restartRequest = new RestartRequest(CONNECTOR_NAME, true, false);
assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.FAILED)));
assertFalse(restartRequest.shouldRestartTask(createTaskStatus(AbstractStatus.State.RUNNING)));
} |
@Operation(summary = "get", description = "GET_WORKFLOWS_NOTES")
@GetMapping(value = "/{code}")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_PROCESS_DEFINITION_LIST)
public Result<ProcessDefinition> getWorkflow(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@PathVariable("code") Long code) {
ProcessDefinition processDefinition = processDefinitionService.getProcessDefinition(loginUser, code);
return Result.success(processDefinition);
} | @Test
public void testGetWorkflow() {
Mockito.when(processDefinitionService.getProcessDefinition(user, 1L))
.thenReturn(this.getProcessDefinition(name));
Result<ProcessDefinition> resourceResponse = workflowV2Controller.getWorkflow(user, 1L);
Assertions.assertEquals(this.getProcessDefinition(name), resourceResponse.getData());
} |
public List<String> toPrefix(String in) {
List<String> tokens = buildTokens(alignINClause(in));
List<String> output = new ArrayList<>();
List<String> stack = new ArrayList<>();
for (String token : tokens) {
if (isOperand(token)) {
if (token.equals(")")) {
while (openParanthesesFound(stack)) {
output.add(stack.remove(stack.size() - 1));
}
if (!stack.isEmpty()) {
// temporarily fix for issue #189
stack.remove(stack.size() - 1);
}
} else {
while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) {
output.add(stack.remove(stack.size() - 1));
}
stack.add(token);
}
} else {
output.add(token);
}
}
while (!stack.isEmpty()) {
output.add(stack.remove(stack.size() - 1));
}
return output;
} | @Test
public void testComplexStatementWithGreaterAndEquals() {
String s = "age>=5 AND ((( active = true ) AND (age = 23 )) OR age > 40) AND( salary>10 ) OR age=10";
List<String> list = parser.toPrefix(s);
assertEquals(Arrays.asList("age", "5", ">=", "active", "true", "=", "age", "23", "=", "AND", "age", "40", ">", "OR",
"AND", "salary", "10", ">", "AND", "age", "10", "=", "OR"), list);
} |
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowBadTransformerSupplierOnFlatTransformWithNamedAndStores() {
final org.apache.kafka.streams.kstream.Transformer<String, String, Iterable<KeyValue<String, String>>> transformer = flatTransformerSupplier.get();
final IllegalArgumentException exception = assertThrows(
IllegalArgumentException.class,
() -> testStream.flatTransform(() -> transformer, Named.as("flatTransformer"), "storeName")
);
assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called."));
} |
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = null;
try {
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanupWithLogger(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
} | @SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testCopyFromHostCompressFailure() throws Exception {
InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(immo);
doThrow(new java.lang.InternalError()).when(immo)
.shuffle(any(MapHost.class), any(InputStream.class), anyLong(),
anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));
underTest.copyFromHost(host);
verify(connection)
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
encHash);
verify(ss, times(1)).copyFailed(map1ID, host, true, false);
} |
public void setPrefix(String prefix) {
this.prefix = prefix;
} | @Test
public void customMetricsPrefix() throws Exception {
iqtp.setPrefix(PREFIX);
iqtp.start();
assertThat(metricRegistry.getNames())
.overridingErrorMessage("Custom metrics prefix doesn't match")
.allSatisfy(name -> assertThat(name).startsWith(PREFIX));
iqtp.stop();
assertThat(metricRegistry.getMetrics())
.overridingErrorMessage("The default metrics prefix was changed")
.isEmpty();
} |
public TrackingResult track(Component component, Input<DefaultIssue> rawInput, @Nullable Input<DefaultIssue> targetInput) {
if (analysisMetadataHolder.isPullRequest()) {
return standardResult(pullRequestTracker.track(component, rawInput, targetInput));
}
if (isFirstAnalysisSecondaryBranch()) {
Tracking<DefaultIssue, DefaultIssue> tracking = referenceBranchTracker.track(component, rawInput);
return new TrackingResult(tracking.getMatchedRaws(), emptyMap(), empty(), tracking.getUnmatchedRaws());
}
return standardResult(tracker.track(component, rawInput));
} | @Test
public void delegate_pull_request_tracker() {
Branch branch = mock(Branch.class);
when(branch.getType()).thenReturn(BranchType.PULL_REQUEST);
when(analysisMetadataHolder.getBranch()).thenReturn(mock(Branch.class));
when(analysisMetadataHolder.isPullRequest()).thenReturn(true);
underTest.track(component, rawInput, targetInput);
verify(prBranchTracker).track(component, rawInput, targetInput);
verifyNoInteractions(tracker);
verifyNoInteractions(mergeBranchTracker);
} |
public static Permission getPermission(String name, String serviceName, String... actions) {
PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName);
if (permissionFactory == null) {
throw new IllegalArgumentException("No permissions found for service: " + serviceName);
}
return permissionFactory.create(name, actions);
} | @Test
public void getPermission_Semaphore() {
Permission permission = ActionConstants.getPermission("foo", SemaphoreServiceUtil.SERVICE_NAME);
assertNotNull(permission);
assertTrue(permission instanceof SemaphorePermission);
} |
static boolean isProviderEnabled(Configuration configuration, String serviceName) {
return SecurityOptions.forProvider(configuration, serviceName)
.get(DELEGATION_TOKEN_PROVIDER_ENABLED);
} | @Test
public void isProviderEnabledMustGiveBackFalseWhenDisabled() {
Configuration configuration = new Configuration();
configuration.setBoolean(CONFIG_PREFIX + ".test.enabled", false);
assertFalse(DefaultDelegationTokenManager.isProviderEnabled(configuration, "test"));
} |
@Override
public <T> void register(Class<T> remoteInterface, T object) {
register(remoteInterface, object, 1);
} | @Test
public void testNoAckWithResultInvocations() throws InterruptedException {
RedissonClient server = createInstance();
RedissonClient client = createInstance();
try {
server.getRemoteService().register(RemoteInterface.class, new RemoteImpl());
// no ack but an execution timeout of 1 second
RemoteInvocationOptions options = RemoteInvocationOptions.defaults().noAck().expectResultWithin(1, TimeUnit.SECONDS);
RemoteInterface service = client.getRemoteService().get(RemoteInterface.class, options);
service.voidMethod("noAck", 100L);
assertThat(service.resultMethod(21L)).isEqualTo(42);
try {
service.errorMethod();
Assertions.fail();
} catch (IOException e) {
assertThat(e.getMessage()).isEqualTo("Checking error throw");
}
try {
service.errorMethodWithCause();
Assertions.fail();
} catch (Exception e) {
assertThat(e.getCause()).isInstanceOf(ArithmeticException.class);
assertThat(e.getCause().getMessage()).isEqualTo("/ by zero");
}
try {
service.timeoutMethod();
Assertions.fail("noAck option should still wait for the server to return a response and throw if the execution timeout is exceeded");
} catch (Exception e) {
assertThat(e).isInstanceOf(RemoteServiceTimeoutException.class);
}
} finally {
client.shutdown();
server.shutdown();
}
} |
@Override
public Mono<GetUnversionedProfileResponse> getUnversionedProfile(final GetUnversionedProfileAnonymousRequest request) {
final ServiceIdentifier targetIdentifier =
ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getRequest().getServiceIdentifier());
// Callers must be authenticated to request unversioned profiles by PNI
if (targetIdentifier.identityType() == IdentityType.PNI) {
throw Status.UNAUTHENTICATED.asRuntimeException();
}
final Mono<Account> account = switch (request.getAuthenticationCase()) {
case GROUP_SEND_TOKEN ->
groupSendTokenUtil.checkGroupSendToken(request.getGroupSendToken(), List.of(targetIdentifier))
.then(Mono.fromFuture(() -> accountsManager.getByServiceIdentifierAsync(targetIdentifier)))
.flatMap(Mono::justOrEmpty)
.switchIfEmpty(Mono.error(Status.NOT_FOUND.asException()));
case UNIDENTIFIED_ACCESS_KEY ->
getTargetAccountAndValidateUnidentifiedAccess(targetIdentifier, request.getUnidentifiedAccessKey().toByteArray());
default -> Mono.error(Status.INVALID_ARGUMENT.asException());
};
return account.map(targetAccount -> ProfileGrpcHelper.buildUnversionedProfileResponse(targetIdentifier,
null,
targetAccount,
profileBadgeConverter));
} | @Test
void getUnversionedProfileExpiredGroupSendEndorsement() throws Exception {
final AciServiceIdentifier serviceIdentifier = new AciServiceIdentifier(UUID.randomUUID());
// Expirations must be on a day boundary; pick one in the recent past
final Instant expiration = Instant.now().truncatedTo(ChronoUnit.DAYS);
final byte[] token = AuthHelper.validGroupSendToken(SERVER_SECRET_PARAMS, List.of(serviceIdentifier), expiration);
final GetUnversionedProfileAnonymousRequest request = GetUnversionedProfileAnonymousRequest.newBuilder()
.setGroupSendToken(ByteString.copyFrom(token))
.setRequest(GetUnversionedProfileRequest.newBuilder()
.setServiceIdentifier(
ServiceIdentifierUtil.toGrpcServiceIdentifier(serviceIdentifier)))
.build();
assertStatusException(Status.UNAUTHENTICATED, () -> unauthenticatedServiceStub().getUnversionedProfile(request));
} |
public RuleData obtainRuleData(final String pluginName, final String path) {
final Map<String, RuleData> lruMap = RULE_DATA_MAP.get(pluginName);
return Optional.ofNullable(lruMap).orElse(Maps.newHashMap()).get(path);
} | @Test
public void testObtainRuleData() throws NoSuchFieldException, IllegalAccessException {
RuleData cacheRuleData = RuleData.builder().id("1").pluginName(mockPluginName1).sort(1).build();
ConcurrentHashMap<String, WindowTinyLFUMap<String, RuleData>> ruleMap = getFieldByName(ruleMapStr);
ruleMap.put(mockPluginName1, new WindowTinyLFUMap<>(100, 100, Boolean.FALSE));
ruleMap.get(mockPluginName1).put(path1, cacheRuleData);
RuleData firstRuleDataCache = MatchDataCache.getInstance().obtainRuleData(mockPluginName1, path1);
assertEquals(cacheRuleData, firstRuleDataCache);
ruleMap.clear();
} |
public static BadRequestException accessKeyNotExists() {
return new BadRequestException("accessKey not exist.");
} | @Test
public void testAccessKeyNotExists(){
BadRequestException accessKeyNotExists = BadRequestException.accessKeyNotExists();
assertEquals("accessKey not exist.", accessKeyNotExists.getMessage());
} |
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) {
FunctionConfig mergedConfig = existingConfig.toBuilder().build();
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getJar())) {
mergedConfig.setJar(newConfig.getJar());
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getCustomSerdeInputs() != null) {
newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getCustomSchemaInputs() != null) {
newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
mergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName()
.equals(existingConfig.getOutputSerdeClassName())) {
throw new IllegalArgumentException("Output Serde mismatch");
}
if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType()
.equals(existingConfig.getOutputSchemaType())) {
throw new IllegalArgumentException("Output Schema mismatch");
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (!StringUtils.isEmpty(newConfig.getOutput())) {
mergedConfig.setOutput(newConfig.getOutput());
}
if (newConfig.getUserConfig() != null) {
mergedConfig.setUserConfig(newConfig.getUserConfig());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) {
throw new IllegalArgumentException("Runtime cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getMaxMessageRetries() != null) {
mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries());
}
if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) {
mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic());
}
if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName()
.equals(existingConfig.getSubName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getWindowConfig() != null) {
mergedConfig.setWindowConfig(newConfig.getWindowConfig());
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "isRegexPattern for input topic test-input cannot be altered")
public void testMergeDifferentInputSpecWithRegexChange() {
FunctionConfig functionConfig = createFunctionConfig();
Map<String, ConsumerConfig> inputSpecs = new HashMap<>();
inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(false).serdeClassName("my-serde").build());
FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("inputSpecs", inputSpecs);
FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig);
} |
static String encodeNumeric(NumericType numericType) {
byte[] rawValue = toByteArray(numericType);
byte paddingValue = getPaddingValue(numericType);
byte[] paddedRawValue = new byte[MAX_BYTE_LENGTH];
if (paddingValue != 0) {
for (int i = 0; i < paddedRawValue.length; i++) {
paddedRawValue[i] = paddingValue;
}
}
System.arraycopy(
rawValue, 0, paddedRawValue, MAX_BYTE_LENGTH - rawValue.length, rawValue.length);
return Numeric.toHexStringNoPrefix(paddedRawValue);
} | @Test
public void testIntEncode() {
Int zero8 = new Int8(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero8),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max8 = new Int8(BigInteger.valueOf(127));
assertEquals(
TypeEncoder.encodeNumeric(max8),
("000000000000000000000000000000000000000000000000000000000000007f"));
Int min8 = new Int8(BigInteger.valueOf(-128));
assertEquals(
TypeEncoder.encodeNumeric(min8),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80"));
Int zero16 = new Int16(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero16),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max16 = new Int16(BigInteger.valueOf(32767));
assertEquals(
TypeEncoder.encodeNumeric(max16),
("0000000000000000000000000000000000000000000000000000000000007fff"));
Int min16 = new Int16(BigInteger.valueOf(-32768));
assertEquals(
TypeEncoder.encodeNumeric(min16),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000"));
Int zero24 = new Int24(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero24),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max24 = new Int24(BigInteger.valueOf(8388607));
assertEquals(
TypeEncoder.encodeNumeric(max24),
("00000000000000000000000000000000000000000000000000000000007fffff"));
Int min24 = new Int24(BigInteger.valueOf(-8388608));
assertEquals(
TypeEncoder.encodeNumeric(min24),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff800000"));
Int zero32 = new Int32(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero32),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max32 = new Int32(BigInteger.valueOf(2147483647));
assertEquals(
TypeEncoder.encodeNumeric(max32),
("000000000000000000000000000000000000000000000000000000007fffffff"));
Int min32 = new Int32(BigInteger.valueOf(-2147483648));
assertEquals(
TypeEncoder.encodeNumeric(min32),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000"));
Int zero40 = new Int40(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero40),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max40 = new Int40(BigInteger.valueOf(549755813887L));
assertEquals(
TypeEncoder.encodeNumeric(max40),
("0000000000000000000000000000000000000000000000000000007fffffffff"));
Int min40 = new Int40(BigInteger.valueOf(-549755813888L));
assertEquals(
TypeEncoder.encodeNumeric(min40),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffff8000000000"));
Int zero48 = new Int48(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero48),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max48 = new Int48(BigInteger.valueOf(140737488355327L));
assertEquals(
TypeEncoder.encodeNumeric(max48),
("00000000000000000000000000000000000000000000000000007fffffffffff"));
Int min48 = new Int48(BigInteger.valueOf(-140737488355328L));
assertEquals(
TypeEncoder.encodeNumeric(min48),
("ffffffffffffffffffffffffffffffffffffffffffffffffffff800000000000"));
Int zero56 = new Int48(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero56),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max56 = new Int56(BigInteger.valueOf(36028797018963967L));
assertEquals(
TypeEncoder.encodeNumeric(max56),
("000000000000000000000000000000000000000000000000007fffffffffffff"));
Int min56 = new Int56(BigInteger.valueOf(-36028797018963968L));
assertEquals(
TypeEncoder.encodeNumeric(min56),
("ffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000"));
Int zero64 = new Int64(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero64),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max64 = new Int64(BigInteger.valueOf(java.lang.Long.MAX_VALUE));
assertEquals(
TypeEncoder.encodeNumeric(max64),
("0000000000000000000000000000000000000000000000007fffffffffffffff"));
Int min64 = new Int64(BigInteger.valueOf(java.lang.Long.MIN_VALUE));
assertEquals(
TypeEncoder.encodeNumeric(min64),
("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000"));
Int zero72 = new Int72(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero72),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max72 = new Int72(new BigInteger("2361183241434822606847"));
assertEquals(
TypeEncoder.encodeNumeric(max72),
("00000000000000000000000000000000000000000000007fffffffffffffffff"));
Int min72 = new Int72(new BigInteger("-2361183241434822606848"));
assertEquals(
TypeEncoder.encodeNumeric(min72),
("ffffffffffffffffffffffffffffffffffffffffffffff800000000000000000"));
Int zero80 = new Int80(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero80),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max80 = new Int80(new BigInteger("604462909807314587353087"));
assertEquals(
TypeEncoder.encodeNumeric(max80),
("000000000000000000000000000000000000000000007fffffffffffffffffff"));
Int min80 = new Int80(new BigInteger("-604462909807314587353088"));
assertEquals(
TypeEncoder.encodeNumeric(min80),
("ffffffffffffffffffffffffffffffffffffffffffff80000000000000000000"));
Int zero88 = new Int88(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero88),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max88 = new Int88(new BigInteger("154742504910672534362390527"));
assertEquals(
TypeEncoder.encodeNumeric(max88),
("0000000000000000000000000000000000000000007fffffffffffffffffffff"));
Int min88 = new Int88(new BigInteger("-154742504910672534362390528"));
assertEquals(
TypeEncoder.encodeNumeric(min88),
("ffffffffffffffffffffffffffffffffffffffffff8000000000000000000000"));
Int zero96 = new Int96(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero96),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max96 = new Int96(new BigInteger("39614081257132168796771975167"));
assertEquals(
TypeEncoder.encodeNumeric(max96),
("00000000000000000000000000000000000000007fffffffffffffffffffffff"));
Int min96 = new Int96(new BigInteger("-39614081257132168796771975168"));
assertEquals(
TypeEncoder.encodeNumeric(min96),
("ffffffffffffffffffffffffffffffffffffffff800000000000000000000000"));
Int zero104 = new Int104(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero104),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max104 = new Int104(new BigInteger("10141204801825835211973625643007"));
assertEquals(
TypeEncoder.encodeNumeric(max104),
("000000000000000000000000000000000000007fffffffffffffffffffffffff"));
Int min104 = new Int104(new BigInteger("-10141204801825835211973625643008"));
assertEquals(
TypeEncoder.encodeNumeric(min104),
("ffffffffffffffffffffffffffffffffffffff80000000000000000000000000"));
Int zero112 = new Int112(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero112),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max112 = new Int112(new BigInteger("2596148429267413814265248164610047"));
assertEquals(
TypeEncoder.encodeNumeric(max112),
("0000000000000000000000000000000000007fffffffffffffffffffffffffff"));
Int min112 = new Int112(new BigInteger("-2596148429267413814265248164610048"));
assertEquals(
TypeEncoder.encodeNumeric(min112),
("ffffffffffffffffffffffffffffffffffff8000000000000000000000000000"));
Int zero120 = new Int120(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero120),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max120 = new Int120(new BigInteger("664613997892457936451903530140172287"));
assertEquals(
TypeEncoder.encodeNumeric(max120),
("00000000000000000000000000000000007fffffffffffffffffffffffffffff"));
Int min120 = new Int120(new BigInteger("-664613997892457936451903530140172288"));
assertEquals(
TypeEncoder.encodeNumeric(min120),
("ffffffffffffffffffffffffffffffffff800000000000000000000000000000"));
Int zero128 = new Int128(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero128),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max128 = new Int128(new BigInteger("170141183460469231731687303715884105727"));
assertEquals(
TypeEncoder.encodeNumeric(max128),
("000000000000000000000000000000007fffffffffffffffffffffffffffffff"));
Int min128 = new Int128(new BigInteger("-170141183460469231731687303715884105728"));
assertEquals(
TypeEncoder.encodeNumeric(min128),
("ffffffffffffffffffffffffffffffff80000000000000000000000000000000"));
Int zero136 = new Int136(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero136),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max136 = new Int136(new BigInteger("43556142965880123323311949751266331066367"));
assertEquals(
TypeEncoder.encodeNumeric(max136),
("0000000000000000000000000000007fffffffffffffffffffffffffffffffff"));
Int min136 = new Int136(new BigInteger("-43556142965880123323311949751266331066368"));
assertEquals(
TypeEncoder.encodeNumeric(min136),
("ffffffffffffffffffffffffffffff8000000000000000000000000000000000"));
Int zero144 = new Int144(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero144),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max144 = new Int144(new BigInteger("11150372599265311570767859136324180752990207"));
assertEquals(
TypeEncoder.encodeNumeric(max144),
("00000000000000000000000000007fffffffffffffffffffffffffffffffffff"));
Int min144 = new Int144(new BigInteger("-11150372599265311570767859136324180752990208"));
assertEquals(
TypeEncoder.encodeNumeric(min144),
("ffffffffffffffffffffffffffff800000000000000000000000000000000000"));
Int zero152 = new Int152(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero152),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max152 = new Int152(new BigInteger("2854495385411919762116571938898990272765493247"));
assertEquals(
TypeEncoder.encodeNumeric(max152),
("000000000000000000000000007fffffffffffffffffffffffffffffffffffff"));
Int min152 = new Int152(new BigInteger("-2854495385411919762116571938898990272765493248"));
assertEquals(
TypeEncoder.encodeNumeric(min152),
("ffffffffffffffffffffffffff80000000000000000000000000000000000000"));
Int zero160 = new Int160(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero160),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max160 = new Int160(new BigInteger("730750818665451459101842416358141509827966271487"));
assertEquals(
TypeEncoder.encodeNumeric(max160),
("0000000000000000000000007fffffffffffffffffffffffffffffffffffffff"));
Int min160 =
new Int160(new BigInteger("-730750818665451459101842416358141509827966271488"));
assertEquals(
TypeEncoder.encodeNumeric(min160),
("ffffffffffffffffffffffff8000000000000000000000000000000000000000"));
Int zero168 = new Int168(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero168),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max168 =
new Int168(new BigInteger("187072209578355573530071658587684226515959365500927"));
assertEquals(
TypeEncoder.encodeNumeric(max168),
("00000000000000000000007fffffffffffffffffffffffffffffffffffffffff"));
Int min168 =
new Int168(new BigInteger("-187072209578355573530071658587684226515959365500928"));
assertEquals(
TypeEncoder.encodeNumeric(min168),
("ffffffffffffffffffffff800000000000000000000000000000000000000000"));
Int zero176 = new Int176(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero176),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max176 =
new Int176(new BigInteger("47890485652059026823698344598447161988085597568237567"));
assertEquals(
TypeEncoder.encodeNumeric(max176),
("000000000000000000007fffffffffffffffffffffffffffffffffffffffffff"));
Int min176 =
new Int176(
new BigInteger("-47890485652059026823698344598447161988085597568237568"));
assertEquals(
TypeEncoder.encodeNumeric(min176),
("ffffffffffffffffffff80000000000000000000000000000000000000000000"));
Int zero184 = new Int184(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero184),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max184 =
new Int184(
new BigInteger("12259964326927110866866776217202473468949912977468817407"));
assertEquals(
TypeEncoder.encodeNumeric(max184),
("0000000000000000007fffffffffffffffffffffffffffffffffffffffffffff"));
Int min184 =
new Int184(
new BigInteger(
"-12259964326927110866866776217202473468949912977468817408"));
assertEquals(
TypeEncoder.encodeNumeric(min184),
("ffffffffffffffffff8000000000000000000000000000000000000000000000"));
Int zero192 = new Int192(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero192),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max192 =
new Int192(
new BigInteger(
"3138550867693340381917894711603833208051177722232017256447"));
assertEquals(
TypeEncoder.encodeNumeric(max192),
("00000000000000007fffffffffffffffffffffffffffffffffffffffffffffff"));
Int min192 =
new Int192(
new BigInteger(
"-3138550867693340381917894711603833208051177722232017256448"));
assertEquals(
TypeEncoder.encodeNumeric(min192),
("ffffffffffffffff800000000000000000000000000000000000000000000000"));
Int zero200 = new Int200(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero200),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max200 =
new Int200(
new BigInteger(
"803469022129495137770981046170581301261101496891396417650687"));
assertEquals(
TypeEncoder.encodeNumeric(max200),
("000000000000007fffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min200 =
new Int200(
new BigInteger(
"-803469022129495137770981046170581301261101496891396417650688"));
assertEquals(
TypeEncoder.encodeNumeric(min200),
("ffffffffffffff80000000000000000000000000000000000000000000000000"));
Int zero208 = new Int208(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero208),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max208 =
new Int208(
new BigInteger(
"205688069665150755269371147819668813122841983204197482918576127"));
assertEquals(
TypeEncoder.encodeNumeric(max208),
("0000000000007fffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min208 =
new Int208(
new BigInteger(
"-205688069665150755269371147819668813122841983204197482918576128"));
assertEquals(
TypeEncoder.encodeNumeric(min208),
("ffffffffffff8000000000000000000000000000000000000000000000000000"));
Int zero216 = new Int216(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero216),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max216 =
new Int216(
new BigInteger(
"52656145834278593348959013841835216159447547700274555627155488767"));
assertEquals(
TypeEncoder.encodeNumeric(max216),
("00000000007fffffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min216 =
new Int216(
new BigInteger(
"-52656145834278593348959013841835216159447547700274555627155488768"));
assertEquals(
TypeEncoder.encodeNumeric(min216),
("ffffffffff800000000000000000000000000000000000000000000000000000"));
Int zero224 = new Int224(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero224),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max224 =
new Int224(
new BigInteger(
"13479973333575319897333507543509815336818572211270286240551805124607"));
assertEquals(
TypeEncoder.encodeNumeric(max224),
("000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min224 =
new Int224(
new BigInteger(
"-13479973333575319897333507543509815336818572211270286240551805124608"));
assertEquals(
TypeEncoder.encodeNumeric(min224),
("ffffffff80000000000000000000000000000000000000000000000000000000"));
Int zero232 = new Int232(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero232),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max232 =
new Int232(
new BigInteger(
"3450873173395281893717377931138512726225554486085193277581262111899647"));
assertEquals(
TypeEncoder.encodeNumeric(max232),
("0000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min232 =
new Int232(
new BigInteger(
"-3450873173395281893717377931138512726225554486085193277581262111899648"));
assertEquals(
TypeEncoder.encodeNumeric(min232),
("ffffff8000000000000000000000000000000000000000000000000000000000"));
Int zero240 = new Int240(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero240),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max240 =
new Int240(
new BigInteger(
"883423532389192164791648750371459257913741948437809479060803100646309887"));
assertEquals(
TypeEncoder.encodeNumeric(max240),
("00007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min240 =
new Int240(
new BigInteger(
"-883423532389192164791648750371459257913741948437809479060803100646309888"));
assertEquals(
TypeEncoder.encodeNumeric(min240),
("ffff800000000000000000000000000000000000000000000000000000000000"));
Int zero248 = new Int248(BigInteger.ZERO);
assertEquals(
TypeEncoder.encodeNumeric(zero248),
("0000000000000000000000000000000000000000000000000000000000000000"));
Int max248 =
new Int248(
new BigInteger(
"226156424291633194186662080095093570025917938800079226639565593765455331327"));
assertEquals(
TypeEncoder.encodeNumeric(max248),
("007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
Int min248 =
new Int248(
new BigInteger(
"-226156424291633194186662080095093570025917938800079226639565593765455331328"));
assertEquals(
TypeEncoder.encodeNumeric(min248),
("ff80000000000000000000000000000000000000000000000000000000000000"));
Int minusOne = new Int(BigInteger.valueOf(-1));
assertEquals(
TypeEncoder.encodeNumeric(minusOne),
("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.