focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public PackageRepository findPackageRepositoryWithPackageIdOrBomb(String packageId) {
PackageRepository packageRepository = findPackageRepositoryHaving(packageId);
if (packageRepository == null){
throw new RuntimeException(format("Could not find repository for given package id:[%s]", packageId));
}
return packageRepository;
} | @Test
void shouldThrowExceptionWhenRepositoryForGivenPackageNotFound() throws Exception {
PackageRepositories packageRepositories = new PackageRepositories();
try {
packageRepositories.findPackageRepositoryWithPackageIdOrBomb("invalid");
fail("should have thrown exception for not finding package repository");
} catch (RuntimeException e) {
assertThat(e.getMessage()).isEqualTo("Could not find repository for given package id:[invalid]");
}
} |
public static long estimateSize(StructType tableSchema, long totalRecords) {
if (totalRecords == Long.MAX_VALUE) {
return totalRecords;
}
long result;
try {
result = LongMath.checkedMultiply(tableSchema.defaultSize(), totalRecords);
} catch (ArithmeticException e) {
result = Long.MAX_VALUE;
}
return result;
} | @Test
public void testEstimateSize() throws IOException {
long tableSize = SparkSchemaUtil.estimateSize(SparkSchemaUtil.convert(TEST_SCHEMA), 1);
Assert.assertEquals("estimateSize matches with expected approximation", 24, tableSize);
} |
@Override
public String getFileId(final Path file) throws BackgroundException {
try {
if(StringUtils.isNotBlank(file.attributes().getFileId())) {
return file.attributes().getFileId();
}
final String cached = super.getFileId(file);
if(cached != null) {
if(log.isDebugEnabled()) {
log.debug(String.format("Return cached fileid %s for file %s", cached, file));
}
return cached;
}
if(file.isRoot()) {
return ROOT;
}
int offset = 0;
UiFsModel fsModel;
final int chunksize = new HostPreferences(session.getHost()).getInteger("eue.listing.chunksize");
do {
final String parentResourceId = this.getFileId(file.getParent());
switch(parentResourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
fsModel = new ListResourceAliasApi(new EueApiClient(session)).resourceAliasAliasGet(parentResourceId,
null, null, null, null, chunksize, offset, null, null);
break;
default:
fsModel = new ListResourceApi(new EueApiClient(session)).resourceResourceIdGet(parentResourceId,
null, null, null, null, chunksize, offset, null, null);
}
for(Children child : fsModel.getUifs().getChildren()) {
// Case insensitive
if(child.getUifs().getName().equalsIgnoreCase(normalizer.normalize(file.getName()).toString())) {
return getResourceIdFromResourceUri(child.getUifs().getResourceURI());
}
}
offset += chunksize;
}
while(fsModel.getUifs().getChildren().size() == chunksize);
throw new NotfoundException(file.getAbsolute());
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void getFileIdTrash() throws Exception {
assertEquals(EueResourceIdProvider.TRASH, new EueResourceIdProvider(session).getFileId(new Path("Gelöschte Dateien", EnumSet.of(directory))));
} |
public ExecutionContext generateExecutionContext(final QueryContext queryContext, final RuleMetaData globalRuleMetaData,
final ConfigurationProperties props, final ConnectionContext connectionContext) {
check(queryContext);
RouteContext routeContext = route(queryContext, globalRuleMetaData, props, connectionContext);
SQLRewriteResult rewriteResult = rewrite(queryContext, globalRuleMetaData, props, routeContext, connectionContext);
ExecutionContext result = createExecutionContext(queryContext, routeContext, rewriteResult);
logSQL(queryContext, props, result);
return result;
} | @Test
void assertGenerateExecutionContext() {
DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "FIXTURE");
SQLStatementContext sqlStatementContext = mock(CommonSQLStatementContext.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(mock(SelectStatement.class));
when(sqlStatementContext.getDatabaseType()).thenReturn(databaseType);
ConnectionContext connectionContext = mock(ConnectionContext.class);
when(connectionContext.getCurrentDatabaseName()).thenReturn(Optional.of(DefaultDatabase.LOGIC_NAME));
ShardingSphereMetaData metaData = mock(ShardingSphereMetaData.class);
ResourceMetaData resourceMetaData = mock(ResourceMetaData.class, RETURNS_DEEP_STUBS);
when(resourceMetaData.getStorageUnits()).thenReturn(Collections.emptyMap());
ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME, databaseType, resourceMetaData, new RuleMetaData(mockShardingSphereRule()), Collections.emptyMap());
when(metaData.containsDatabase(DefaultDatabase.LOGIC_NAME)).thenReturn(true);
when(metaData.getDatabase(DefaultDatabase.LOGIC_NAME)).thenReturn(database);
QueryContext queryContext = new QueryContext(sqlStatementContext, "SELECT * FROM tbl", Collections.emptyList(), new HintValueContext(), connectionContext, metaData);
ConfigurationProperties props = new ConfigurationProperties(PropertiesBuilder.build(new Property(ConfigurationPropertyKey.SQL_SHOW.getKey(), Boolean.TRUE.toString())));
ExecutionContext actual = new KernelProcessor().generateExecutionContext(queryContext, new RuleMetaData(mockShardingSphereRule()), props, connectionContext);
assertThat(actual.getExecutionUnits().size(), is(1));
} |
public List<ParsedTerm> filterElementsContainingUsefulInformation(final Map<String, List<ParsedTerm>> parsedTermsGroupedByField) {
return parsedTermsGroupedByField.values()
.stream()
.map(this::filterElementsContainingUsefulInformation)
.flatMap(Collection::stream)
.collect(Collectors.toList());
} | @Test
void limitsToOneTermOnlyIfMultipleTermsWithoutPosition() {
final Map<String, List<ParsedTerm>> fieldTerms = Map.of("field", List.of(
ParsedTerm.create("field", "oh!"),
ParsedTerm.create("field", "ah!"),
ParsedTerm.create("field", "eh!")
));
assertThat(toTest.filterElementsContainingUsefulInformation(fieldTerms))
.hasSize(1)
.contains(ParsedTerm.create("field", "oh!"));
} |
@Override
protected void copy(List<AzfsResourceId> srcPaths, List<AzfsResourceId> destPaths)
throws IOException {
checkArgument(
srcPaths.size() == destPaths.size(),
"sizes of source paths and destination paths do not match");
Iterator<AzfsResourceId> sourcePathsIterator = srcPaths.iterator();
Iterator<AzfsResourceId> destinationPathsIterator = destPaths.iterator();
while (sourcePathsIterator.hasNext()) {
final AzfsResourceId sourcePath = sourcePathsIterator.next();
final AzfsResourceId destinationPath = destinationPathsIterator.next();
copy(sourcePath, destinationPath);
}
} | @Test
@SuppressWarnings("CheckReturnValue")
public void testCopy() throws IOException {
List<AzfsResourceId> src =
new ArrayList<>(
Arrays.asList(AzfsResourceId.fromComponents("account", "container", "from")));
List<AzfsResourceId> dest =
new ArrayList<>(Arrays.asList(AzfsResourceId.fromComponents("account", "container", "to")));
when(mockedBlobClient.exists()).thenReturn(true);
azureBlobStoreFileSystem.copy(src, dest);
verify(mockedBlobClient, times(1)).copyFromUrl(any(String.class));
} |
public void verifyAndValidate(final String jwt) {
try {
Jws<Claims> claimsJws = Jwts.parser()
.verifyWith(tokenConfigurationParameter.getPublicKey())
.build()
.parseSignedClaims(jwt);
// Log the claims for debugging purposes
Claims claims = claimsJws.getPayload();
log.info("Token claims: {}", claims);
// Additional checks (e.g., expiration, issuer, etc.)
if (claims.getExpiration().before(new Date())) {
throw new JwtException("Token has expired");
}
log.info("Token is valid");
} catch (ExpiredJwtException e) {
log.error("Token has expired", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Token has expired", e);
} catch (JwtException e) {
log.error("Invalid JWT token", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Invalid JWT token", e);
} catch (Exception e) {
log.error("Error validating token", e);
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error validating token", e);
}
} | @Test
void givenTokens_whenVerifyAndValidate_thenValidateEachToken() {
// Given
Set<String> tokens = Set.of(
Jwts.builder().claim("user_id", "12345").issuedAt(new Date()).expiration(new Date(System.currentTimeMillis() + 86400000L)).signWith(keyPair.getPrivate()).compact(),
Jwts.builder().claim("user_id", "67890").issuedAt(new Date()).expiration(new Date(System.currentTimeMillis() + 86400000L)).signWith(keyPair.getPrivate()).compact()
);
// When
tokenService.verifyAndValidate(tokens);
} |
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) {
this.fieldLengths = fieldLengths;
return this;
} | @Test
public void shouldConfigureHeaderExtractionEnabled() {
UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat()
.setFieldLengths(new int[] { 1, 2, 3 })
.setHeaderExtractionEnabled(true);
assertTrue(dataFormat.getHeaderExtractionEnabled());
assertTrue(dataFormat.createAndConfigureParserSettings().isHeaderExtractionEnabled());
} |
public H3IndexResolution getResolution() {
return _resolution;
} | @Test
public void withDisabledNull()
throws JsonProcessingException {
String confStr = "{\"disabled\": null}";
H3IndexConfig config = JsonUtils.stringToObject(confStr, H3IndexConfig.class);
assertFalse(config.isDisabled(), "Unexpected disabled");
assertNull(config.getResolution(), "Unexpected resolution");
} |
public ConcurrentHashMap<String, DefaultExecutor> getExecutorMap() {
return executorMap;
} | @Test
void testInvalidCommand() throws InterpreterException {
if (System.getProperty("os.name").startsWith("Windows")) {
result = shell.interpret("invalid_command\ndir", context);
} else {
result = shell.interpret("invalid_command\nls", context);
}
assertEquals(Code.SUCCESS, result.code());
assertTrue(shell.getExecutorMap().isEmpty());
} |
@Override
public byte[] getValueFromText(String text) {
throw new UnsupportedOperationException(format("Type of input not handled: %s", JMSPublisherGui.USE_TEXT_RSC));
} | @Test
public void getValueFromText() {
Assertions.assertThrows(UnsupportedOperationException.class, () -> render.getValueFromText(""));
} |
@Override
public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException {
this.config = TbNodeUtils.convert(configuration, TbCheckRelationNodeConfiguration.class);
if (config.isCheckForSingleEntity()) {
if (StringUtils.isEmpty(config.getEntityType()) || StringUtils.isEmpty(config.getEntityId())) {
throw new TbNodeException("Entity should be specified!");
}
this.singleEntityId = EntityIdFactory.getByTypeAndId(config.getEntityType(), config.getEntityId());
ctx.checkTenantEntity(singleEntityId);
}
} | @Test
void givenDefaultConfig_whenInit_then_throwException() {
// GIVEN
var config = new TbCheckRelationNodeConfiguration().defaultConfiguration();
// WHEN
var exception = assertThrows(TbNodeException.class, () -> node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config))));
// THEN
assertThat(exception.getMessage()).isEqualTo("Entity should be specified!");
} |
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
} | @Test
void beansWithMethodsUsingJobContextAnnotatedWithRecurringCronAnnotationWillAutomaticallyBeRegistered() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN
recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringCronJobUsingJobContext(), "not important");
// THEN
verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), jobDetailsArgumentCaptor.capture(), eq(CronExpression.create("0 0/15 * * *")), any(ZoneId.class));
final JobDetails actualJobDetails = jobDetailsArgumentCaptor.getValue();
assertThat(actualJobDetails)
.isCacheable()
.hasClassName(MyServiceWithRecurringCronJobUsingJobContext.class.getName())
.hasMethodName("myRecurringMethod")
.hasJobContextArg();
} |
public CompletableFuture<Integer> read(ByteBuffer buf, long offset, long len, FileId fileId,
String ufsPath, UfsReadOptions options) {
Objects.requireNonNull(buf);
if (offset < 0 || len < 0 || len > buf.remaining()) {
throw new OutOfRangeRuntimeException(String.format(
"offset is negative, len is negative, or len is greater than buf remaining. "
+ "offset: %s, len: %s, buf remaining: %s", offset, len, buf.remaining()));
}
if (mReadQueue.size() >= READ_CAPACITY) {
throw new ResourceExhaustedRuntimeException("UFS read at capacity", true);
}
CompletableFuture<Integer> future = new CompletableFuture<>();
if (len == 0) {
future.complete(0);
return future;
}
Meter meter = mUfsBytesReadThroughputMetrics.computeIfAbsent(mUfsClient.getUfsMountPointUri(),
uri -> MetricsSystem.meterWithTags(MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.getName(),
MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.isClusterAggregated(), MetricInfo.TAG_UFS,
MetricsSystem.escape(mUfsClient.getUfsMountPointUri()), MetricInfo.TAG_USER,
options.getTag()));
mReadQueue.add(new ReadTask(buf, ufsPath, fileId, offset,
len, options, future, meter));
return future;
} | @Test
public void readOverlap() throws Exception {
mUfsIOManager.read(TEST_BUF, 2, TEST_BLOCK_SIZE - 2, FIRST_BLOCK_ID, mTestFilePath,
UfsReadOptions.getDefaultInstance()).get();
assertTrue(checkBuf(2, (int) TEST_BLOCK_SIZE - 2, TEST_BUF));
TEST_BUF.clear();
mUfsIOManager.read(TEST_BUF, TEST_BLOCK_SIZE, TEST_BLOCK_SIZE, SECOND_BLOCK_ID, mTestFilePath,
UfsReadOptions.getDefaultInstance()).get();
assertTrue(checkBuf((int) TEST_BLOCK_SIZE, (int) TEST_BLOCK_SIZE, TEST_BUF));
TEST_BUF.clear();
mUfsIOManager.read(TEST_BUF, 2 + TEST_BLOCK_SIZE, TEST_BLOCK_SIZE - 2, SECOND_BLOCK_ID,
mTestFilePath, UfsReadOptions.getDefaultInstance()).get();
assertTrue(checkBuf((int) TEST_BLOCK_SIZE + 2, (int) TEST_BLOCK_SIZE - 2, TEST_BUF));
TEST_BUF.clear();
} |
public boolean shouldDropFrame(final InetSocketAddress address, final UnsafeBuffer buffer, final int length)
{
return false;
} | @Test
void multiSmallGap()
{
final MultiGapLossGenerator generator = new MultiGapLossGenerator(0, 16, 8, 4);
assertFalse(generator.shouldDropFrame(null, null, 123, 456, 0, 0, 8));
assertFalse(generator.shouldDropFrame(null, null, 123, 456, 0, 8, 8));
assertTrue(generator.shouldDropFrame(null, null, 123, 456, 0, 16, 8));
assertFalse(generator.shouldDropFrame(null, null, 123, 456, 0, 24, 8));
assertTrue(generator.shouldDropFrame(null, null, 123, 456, 0, 32, 8));
assertFalse(generator.shouldDropFrame(null, null, 123, 456, 0, 40, 8));
assertTrue(generator.shouldDropFrame(null, null, 123, 456, 0, 48, 4));
assertTrue(generator.shouldDropFrame(null, null, 123, 456, 0, 52, 8));
assertFalse(generator.shouldDropFrame(null, null, 123, 456, 0, 60, 2));
assertTrue(generator.shouldDropFrame(null, null, 123, 456, 0, 62, 10));
} |
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
} | @Test
public void testAbsoluteMemoryNegativeFractional() throws Exception {
expectNegativeValueOfResource("memory");
parseResourceConfigValue(" -5120.3 mb, 2.35 vcores ");
} |
public boolean matchStage(StageConfigIdentifier stageIdentifier, StageEvent event) {
return this.event.include(event) && appliesTo(stageIdentifier.getPipelineName(), stageIdentifier.getStageName());
} | @Test
void allEventShouldMatchAnyEvents() {
NotificationFilter filter = new NotificationFilter("cruise", "dev", StageEvent.All, false);
assertThat(filter.matchStage(new StageConfigIdentifier("cruise", "dev"), StageEvent.Breaks)).isTrue();
} |
public static QueryableRequestSpecification query(RequestSpecification specification) {
if (specification instanceof QueryableRequestSpecification) {
return (QueryableRequestSpecification) specification;
}
throw new IllegalArgumentException("Cannot convert " + specification + " to a " + QueryableRequestSpecification.class.getName());
} | @Test public void
specification_querier_allows_querying_request_specifications() {
// Given
RequestSpecification spec = new RequestSpecBuilder().addHeader("header", "value").addCookie("cookie", "cookieValue").addParam("someparam", "somevalue").build();
// When
QueryableRequestSpecification queryable = SpecificationQuerier.query(spec);
// Then
softly.assertThat(queryable.getHeaders().getValue("header")).isEqualTo("value");
softly.assertThat(queryable.getCookies().getValue("cookie")).isEqualTo("cookieValue");
softly.assertThat(queryable.getRequestParams().get("someparam")).isEqualTo("somevalue");
} |
public static Optional<Boolean> parseBooleanExact(final String value) {
if (booleanStringMatches(value, true)) {
return Optional.of(true);
}
if (booleanStringMatches(value, false)) {
return Optional.of(false);
}
return Optional.empty();
} | @Test
public void shouldParseExactEverythingElseAsEmpty() {
assertThat(SqlBooleans.parseBooleanExact("No "), is(Optional.empty()));
assertThat(SqlBooleans.parseBooleanExact(" true"), is(Optional.empty()));
assertThat(SqlBooleans.parseBooleanExact(" f "), is(Optional.empty()));
assertThat(SqlBooleans.parseBooleanExact("/tfalse"), is(Optional.empty()));
assertThat(SqlBooleans.parseBooleanExact("what ever"), is(Optional.empty()));
} |
public RatingValue increment(Rating rating) {
if (value.compareTo(rating) > 0) {
value = rating;
}
this.set = true;
return this;
} | @Test
public void increment_has_no_effect_if_arg_is_null() {
verifyUnsetValue(new RatingValue().increment((RatingValue) null));
} |
public static URI getCanonicalUri(URI uri, int defaultPort) {
// skip if there is no authority, ie. "file" scheme or relative uri
String host = uri.getHost();
if (host == null) {
return uri;
}
String fqHost = canonicalizeHost(host);
int port = uri.getPort();
// short out if already canonical with a port
if (host.equals(fqHost) && port != -1) {
return uri;
}
// reconstruct the uri with the canonical host and port
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
fqHost, (port == -1) ? defaultPort : port,
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
return uri;
} | @Test
public void testCanonicalUriWithNoAuthority() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("scheme:/"), 2);
assertEquals("scheme:/", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:/path"), 2);
assertEquals("scheme:/path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:///"), 2);
assertEquals("scheme:///", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:///path"), 2);
assertEquals("scheme:///path", uri.toString());
} |
public static int toInt(final String str, final int defaultValue) {
if (str == null) {
return defaultValue;
}
try {
return Integer.parseInt(str);
} catch (NumberFormatException nfe) {
return defaultValue;
}
} | @Test
public void testToInReturnDefaultValueWithNull() {
Assertions.assertEquals(10, NumberUtils.toInt(null, 10));
} |
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
} | @Test
public void poolReSubscribesAndSkipsAllRecordsWithAtTimestampGreaterThanRecords()
throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis.stubSubscribeToShard("shard-000", eventWithRecords(3));
kinesis.stubSubscribeToShard("shard-001", eventWithRecords(11, 3));
Instant shard000ts = Instant.now().plus(Duration.standardHours(1));
Instant shard001ts = Instant.now().plus(Duration.standardHours(1));
KinesisReaderCheckpoint initialCheckpoint =
new KinesisReaderCheckpoint(
ImmutableList.of(
tsCheckpoint("shard-000", shard000ts), tsCheckpoint("shard-001", shard001ts)));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000").gives().withLastCheckpointSequenceNumber(2),
ShardAssertion.shard("shard-001").gives().withLastCheckpointSequenceNumber(13));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeAtTs("shard-000", shard000ts),
subscribeAtTs("shard-001", shard001ts),
subscribeAfterSeqNumber("shard-000", "2"),
subscribeAfterSeqNumber("shard-001", "13"));
} |
private boolean authenticate(final ChannelHandlerContext context, final ByteBuf message) {
try {
AuthenticationResult authResult = databaseProtocolFrontendEngine.getAuthenticationEngine().authenticate(context,
databaseProtocolFrontendEngine.getCodecEngine().createPacketPayload(message, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get()));
if (authResult.isFinished()) {
connectionSession.setGrantee(new Grantee(authResult.getUsername(), authResult.getHostname()));
connectionSession.setCurrentDatabaseName(authResult.getDatabase());
connectionSession.setProcessId(processEngine.connect(connectionSession.getUsedDatabaseName(), connectionSession.getConnectionContext().getGrantee()));
}
return authResult.isFinished();
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
if (ExpectedExceptions.isExpected(ex.getClass())) {
log.debug("Exception occur: ", ex);
} else {
log.error("Exception occur: ", ex);
}
context.writeAndFlush(databaseProtocolFrontendEngine.getCommandExecuteEngine().getErrorPacket(ex));
context.close();
} finally {
message.release();
}
return false;
} | @Test
void assertChannelReadNotAuthenticated() throws Exception {
channel.register();
AuthenticationResult authenticationResult = AuthenticationResultBuilder.finished("username", "hostname", "database");
when(authenticationEngine.authenticate(any(ChannelHandlerContext.class), any(PacketPayload.class))).thenReturn(authenticationResult);
channel.writeInbound(Unpooled.EMPTY_BUFFER);
assertThat(connectionSession.getConnectionContext().getGrantee(), is(new Grantee("username", "hostname")));
assertThat(connectionSession.getUsedDatabaseName(), is("database"));
} |
public synchronized void transfer(int accountA, int accountB, int amount) {
if (accounts[accountA] >= amount && accountA != accountB) {
accounts[accountB] += amount;
accounts[accountA] -= amount;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Transferred from account: {} to account: {} , amount: {} , bank balance at: {}, source account balance: {}, destination account balance: {}",
accountA,
accountB,
amount,
getBalance(),
getBalance(accountA),
getBalance(accountB));
}
}
} | @Test
void TransferMethodHaveToTransferAmountFromAnAccountToOtherAccount() {
bank.transfer(0, 1, 1000);
int[] accounts = bank.getAccounts();
assertEquals(0, accounts[0]);
assertEquals(2000, accounts[1]);
} |
@Scheduled(initialDelay = 60, fixedRate = 60, timeUnit = TimeUnit.SECONDS)
public void refreshLocalCache() {
// 情况一:如果缓存里没有数据,则直接刷新缓存
if (CollUtil.isEmpty(sensitiveWordCache)) {
initLocalCache();
return;
}
// 情况二,如果缓存里数据,则通过 updateTime 判断是否有数据变更,有变更则刷新缓存
LocalDateTime maxTime = getMaxValue(sensitiveWordCache, SensitiveWordDO::getUpdateTime);
if (sensitiveWordMapper.selectCountByUpdateTimeGt(maxTime) > 0) {
initLocalCache();
}
} | @Test
public void testRefreshLocalCache() {
// mock 数据
SensitiveWordDO wordDO1 = randomPojo(SensitiveWordDO.class, o -> o.setName("傻瓜")
.setTags(singletonList("论坛")).setStatus(CommonStatusEnum.ENABLE.getStatus()));
wordDO1.setUpdateTime(LocalDateTime.now());
sensitiveWordMapper.insert(wordDO1);
sensitiveWordService.initLocalCache();
// mock 数据 ②
SensitiveWordDO wordDO2 = randomPojo(SensitiveWordDO.class, o -> o.setName("笨蛋")
.setTags(singletonList("蔬菜")).setStatus(CommonStatusEnum.ENABLE.getStatus()));
wordDO2.setUpdateTime(LocalDateTimeUtils.addTime(Duration.ofMinutes(1))); // 避免时间相同
sensitiveWordMapper.insert(wordDO2);
// 调用
sensitiveWordService.refreshLocalCache();
// 断言 sensitiveWordTagsCache 缓存
assertEquals(SetUtils.asSet("论坛", "蔬菜"), sensitiveWordService.getSensitiveWordTagSet());
// 断言 sensitiveWordCache
assertEquals(2, sensitiveWordService.getSensitiveWordCache().size());
assertPojoEquals(wordDO1, sensitiveWordService.getSensitiveWordCache().get(0));
assertPojoEquals(wordDO2, sensitiveWordService.getSensitiveWordCache().get(1));
// 断言 tagSensitiveWordTries 缓存
assertNotNull(sensitiveWordService.getDefaultSensitiveWordTrie());
assertEquals(2, sensitiveWordService.getTagSensitiveWordTries().size());
assertNotNull(sensitiveWordService.getTagSensitiveWordTries().get("论坛"));
assertNotNull(sensitiveWordService.getTagSensitiveWordTries().get("蔬菜"));
} |
@Override
public ExpressionEvaluatorResult evaluateUnaryExpression(String rawExpression, Object resultValue, Class<?> resultClass) {
Map<String, Object> params = new HashMap<>();
params.put(ACTUAL_VALUE_IDENTIFIER, resultValue);
Object expressionResult = compileAndExecute(rawExpression, params);
if (!(expressionResult instanceof Boolean)) {
// try to compare via compare/equals operators
return ExpressionEvaluatorResult.of(compareValues(expressionResult, resultValue));
}
return ExpressionEvaluatorResult.of((boolean) expressionResult);
} | @Test
public void evaluateUnaryExpression() {
assertThat(evaluator.evaluateUnaryExpression(mvelExpression("java.util.Objects.equals(" + ACTUAL_VALUE_IDENTIFIER + ", \"Test\")"), "Test", String.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression("java.util.Objects.equals(" + ACTUAL_VALUE_IDENTIFIER + ", " + "\"Test\")"), "Test1", String.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression("1"), 1, Integer.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression("2"), 1, Integer.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(""), null, String.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(""), "", String.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(null), null, String.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(null), "null", String.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression("\"\""), "", String.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(null), "", String.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(ACTUAL_VALUE_IDENTIFIER + " == 123"), 123, Integer.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(ACTUAL_VALUE_IDENTIFIER + " != 123"), 321, Integer.class)).is(successful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(ACTUAL_VALUE_IDENTIFIER + " == 123"), 321, Integer.class)).is(notSuccessful);
assertThat(evaluator.evaluateUnaryExpression(mvelExpression(ACTUAL_VALUE_IDENTIFIER + " != 123"), 123, Integer.class)).is(notSuccessful);
assertThatThrownBy(() -> evaluator.evaluateUnaryExpression(null, "", String.class))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Malformed MVEL expression");
} |
@Override
public boolean hasPrivileges(final String database) {
return true;
} | @Test
void assertHasPrivileges() {
assertTrue(new AllPermittedPrivileges().hasPrivileges("foo_db"));
} |
String toLogMessage(TbMsg msg) {
return "\n" +
"Incoming message:\n" + msg.getData() + "\n" +
"Incoming metadata:\n" + JacksonUtil.toString(msg.getMetaData().getData());
} | @Test
void givenEmptyDataMsg_whenToLog_thenReturnString() {
TbLogNode node = new TbLogNode();
TbMsgMetaData metaData = new TbMsgMetaData(Collections.emptyMap());
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, metaData, "");
String logMessage = node.toLogMessage(msg);
log.info(logMessage);
assertThat(logMessage).isEqualTo("\n" +
"Incoming message:\n" +
"\n" +
"Incoming metadata:\n" +
"{}");
} |
public static DecimalType findMultiplicationDecimalType(
int precision1, int scale1, int precision2, int scale2) {
int scale = scale1 + scale2;
int precision = precision1 + precision2 + 1;
return adjustPrecisionScale(precision, scale);
} | @Test
void testFindMultiplicationDecimalType() {
assertThat(LogicalTypeMerging.findMultiplicationDecimalType(30, 10, 30, 10))
.hasPrecisionAndScale(38, 6);
assertThat(LogicalTypeMerging.findMultiplicationDecimalType(30, 20, 30, 20))
.hasPrecisionAndScale(38, 17);
assertThat(LogicalTypeMerging.findMultiplicationDecimalType(38, 2, 38, 3))
.hasPrecisionAndScale(38, 5);
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
try {
Alarm alarm = JacksonUtil.fromString(msg.getData(), Alarm.class);
Objects.requireNonNull(alarm, "alarm is null");
ListenableFuture<Alarm> latest = ctx.getAlarmService().findAlarmByIdAsync(ctx.getTenantId(), alarm.getId());
Futures.addCallback(latest, new FutureCallback<>() {
@Override
public void onSuccess(@Nullable Alarm result) {
if (result == null) {
ctx.tellFailure(msg, new TbNodeException("No such alarm found."));
return;
}
boolean isPresent = config.getAlarmStatusList().stream()
.anyMatch(alarmStatus -> result.getStatus() == alarmStatus);
ctx.tellNext(msg, isPresent ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
}
@Override
public void onFailure(Throwable t) {
ctx.tellFailure(msg, t);
}
}, ctx.getDbCallbackExecutor());
} catch (Exception e) {
if (e instanceof IllegalArgumentException || e instanceof NullPointerException) {
log.debug("[{}][{}] Failed to parse alarm: [{}] error [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e.getMessage());
} else {
log.error("[{}][{}] Failed to parse alarm: [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e);
}
throw new TbNodeException(e);
}
} | @Test
void givenActiveAlarm_whenOnMsg_then_True() throws TbNodeException {
// GIVEN
var alarm = new Alarm();
alarm.setId(ALARM_ID);
alarm.setOriginator(DEVICE_ID);
alarm.setType("General Alarm");
String msgData = JacksonUtil.toString(alarm);
TbMsg msg = getTbMsg(msgData);
when(alarmService.findAlarmByIdAsync(TENANT_ID, ALARM_ID)).thenReturn(Futures.immediateFuture(alarm));
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.TRUE));
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
} |
public float getProtectThreshold() {
return protectThreshold;
} | @Test
void testGetProtectThreshold() {
assertEquals(0.0F, serviceMetadata.getProtectThreshold(), 0);
} |
public static void ensureMarkFileLink(final File serviceDir, final File actualFile, final String linkFilename)
{
final String serviceDirPath;
final String markFileParentPath;
try
{
serviceDirPath = serviceDir.getCanonicalPath();
}
catch (final IOException ex)
{
throw new IllegalArgumentException("failed to resolve canonical path for archiveDir=" + serviceDir);
}
try
{
markFileParentPath = actualFile.getParentFile().getCanonicalPath();
}
catch (final IOException ex)
{
throw new IllegalArgumentException(
"failed to resolve canonical path for markFile parent dir of " + actualFile);
}
final Path linkFile = new File(serviceDirPath, linkFilename).toPath();
if (serviceDirPath.equals(markFileParentPath))
{
try
{
Files.deleteIfExists(linkFile);
}
catch (final IOException ex)
{
throw new RuntimeException("failed to remove old link file", ex);
}
}
else
{
try
{
Files.write(
linkFile,
markFileParentPath.getBytes(US_ASCII),
StandardOpenOption.CREATE,
StandardOpenOption.WRITE,
StandardOpenOption.TRUNCATE_EXISTING);
}
catch (final IOException ex)
{
throw new RuntimeException("failed to create link for mark file directory", ex);
}
}
} | @Test
void shouldRemoveLinkFileIfMarkFileIsInServiceDirectory() throws IOException
{
final String linkFilename = "markfile.lnk";
final File markFileLocation = new File(serviceDirectory, "markfile.dat");
final File linkFileLocation = new File(serviceDirectory, linkFilename);
assertTrue(linkFileLocation.createNewFile());
assertTrue(linkFileLocation.exists());
MarkFile.ensureMarkFileLink(serviceDirectory, markFileLocation, linkFilename);
assertFalse(linkFileLocation.exists());
} |
public static void format(Mode mode, AlluxioConfiguration alluxioConf) throws IOException {
NoopUfsManager noopUfsManager = new NoopUfsManager();
switch (mode) {
case MASTER:
URI journalLocation = JournalUtils.getJournalLocation();
LOG.info("Formatting master journal: {}", journalLocation);
JournalSystem journalSystem = new JournalSystem.Builder()
.setLocation(journalLocation).build(CommonUtils.ProcessType.MASTER);
for (String masterServiceName : ServiceUtils.getMasterServiceNames()) {
journalSystem.createJournal(new NoopMaster(masterServiceName, noopUfsManager));
}
journalSystem.format();
break;
case WORKER:
String workerDataFolder = Configuration.getString(PropertyKey.WORKER_DATA_FOLDER);
LOG.info("Formatting worker data folder: {}", workerDataFolder);
int storageLevels = Configuration.getInt(PropertyKey.WORKER_TIERED_STORE_LEVELS);
for (int level = 0; level < storageLevels; level++) {
PropertyKey tierLevelDirPath =
PropertyKey.Template.WORKER_TIERED_STORE_LEVEL_DIRS_PATH.format(level);
String[] dirPaths = Configuration.getString(tierLevelDirPath).split(",");
String name = "Data path for tier " + level;
for (String dirPath : dirPaths) {
String dirWorkerDataFolder = CommonUtils.getWorkerDataDirectory(dirPath, alluxioConf);
LOG.info("Formatting {}:{}", name, dirWorkerDataFolder);
formatWorkerDataFolder(dirWorkerDataFolder);
}
}
break;
default:
throw new RuntimeException(String.format("Unrecognized format mode: %s", mode));
}
} | @Test
public void formatWorkerDeleteFileSameName() throws Exception {
final int storageLevels = 1;
String workerDataFolder;
final File[] dirs = new File[] {
mTemporaryFolder.newFolder("level0")
};
// Have files of same name as the target worker data dir in each tier
for (File dir : dirs) {
workerDataFolder = CommonUtils.getWorkerDataDirectory(dir.getPath(),
Configuration.global());
FileUtils.createFile(workerDataFolder);
}
try (Closeable r = new ConfigurationRule(new HashMap<PropertyKey, Object>() {
{
put(PropertyKey.Template.WORKER_TIERED_STORE_LEVEL_DIRS_PATH.format(0), dirs[0].getPath());
put(PropertyKey.WORKER_TIERED_STORE_LEVELS, storageLevels);
}
}, Configuration.modifiableGlobal()).toResource()) {
final String perms = Configuration.getString(
PropertyKey.WORKER_DATA_FOLDER_PERMISSIONS);
Format.format(Format.Mode.WORKER, Configuration.global());
for (File dir : dirs) {
workerDataFolder = CommonUtils.getWorkerDataDirectory(dir.getPath(),
Configuration.global());
assertTrue(Files.isDirectory(Paths.get(workerDataFolder)));
assertEquals(PosixFilePermissions.fromString(perms), Files.getPosixFilePermissions(Paths
.get(workerDataFolder)));
try (DirectoryStream<Path> directoryStream =
Files.newDirectoryStream(Paths.get(workerDataFolder))) {
for (Path child : directoryStream) {
fail("No sub dirs or files are expected in " + child.toString());
}
}
}
}
} |
@Override
public WebhookDelivery call(Webhook webhook, WebhookPayload payload) {
WebhookDelivery.Builder builder = new WebhookDelivery.Builder();
long startedAt = system.now();
builder
.setAt(startedAt)
.setPayload(payload)
.setWebhook(webhook);
try {
HttpUrl url = HttpUrl.parse(webhook.getUrl());
if (url == null) {
throw new IllegalArgumentException("Webhook URL is not valid: " + webhook.getUrl());
}
builder.setEffectiveUrl(HttpUrlHelper.obfuscateCredentials(webhook.getUrl(), url));
Request request = buildHttpRequest(url, webhook, payload);
try (Response response = execute(request)) {
builder.setHttpStatus(response.code());
}
} catch (Exception e) {
builder.setError(e);
}
return builder
.setDurationInMs((int) (system.now() - startedAt))
.build();
} | @Test
public void silently_catch_error_when_external_server_does_not_answer() throws Exception {
Webhook webhook = new Webhook(WEBHOOK_UUID, PROJECT_UUID, CE_TASK_UUID,
randomAlphanumeric(40), "my-webhook", server.url("/ping").toString(), null);
server.shutdown();
WebhookDelivery delivery = newSender(false).call(webhook, PAYLOAD);
assertThat(delivery.getHttpStatus()).isEmpty();
assertThat(delivery.getDurationInMs().get()).isNotNegative();
// message can be "Connection refused" or "connect timed out"
assertThat(delivery.getErrorMessage().get()).matches("(.*Connection refused.*)|(.*connect timed out.*)");
assertThat(delivery.getAt()).isEqualTo(NOW);
assertThat(delivery.getWebhook()).isSameAs(webhook);
assertThat(delivery.getPayload()).isSameAs(PAYLOAD);
} |
public static boolean isPortable(SamzaPipelineOptions options) {
Map<String, String> override = options.getConfigOverride();
if (override == null) {
return false;
}
return Boolean.parseBoolean(override.getOrDefault(BEAM_PORTABLE_MODE, "false"));
} | @Test
public void testNonPortableModeNullConfig() {
SamzaPipelineOptions mockOptions = mock(SamzaPipelineOptions.class);
doReturn(null).when(mockOptions).getConfigOverride();
Assert.assertFalse(
"Expected false for portable mode ", PortableConfigUtils.isPortable(mockOptions));
} |
@Override
public synchronized void subscribe(final Subscriber<Collection<StreamedRow>> subscriber) {
final PullQuerySubscription subscription = new PullQuerySubscription(
exec, subscriber, result);
result.start();
final WebSocketSubscriber<StreamedRow> webSocketSubscriber =
(WebSocketSubscriber<StreamedRow>) subscriber;
webSocketSubscriber.onSubscribe(subscription, metricsCallbackHolder, startTimeNanos);
} | @Test
public void shouldSubscribe() {
// When:
publisher.subscribe(subscriber);
// Then:
verify(subscriber).onSubscribe(any(), any(), anyLong());
} |
public boolean denied(String name, MediaType mediaType) {
String suffix = (name.contains(".") ? name.substring(name.lastIndexOf(".") + 1) : "").toLowerCase(Locale.ROOT);
boolean defaultDeny = false;
if (CollectionUtils.isNotEmpty(denyFiles)) {
if (denyFiles.contains(suffix)) {
return true;
}
defaultDeny = false;
}
if (CollectionUtils.isNotEmpty(allowFiles)) {
if (allowFiles.contains(suffix)) {
return false;
}
defaultDeny = true;
}
if (CollectionUtils.isNotEmpty(denyMediaType)) {
if (denyMediaType.contains(mediaType.toString())) {
return true;
}
defaultDeny = false;
}
if (CollectionUtils.isNotEmpty(allowMediaType)) {
if (allowMediaType.contains(mediaType.toString())) {
return false;
}
defaultDeny = true;
}
return defaultDeny;
} | @Test
public void testDenyWithDeny(){
FileUploadProperties uploadProperties=new FileUploadProperties();
uploadProperties.setDenyFiles(new HashSet<>(Arrays.asList("exe")));
assertFalse(uploadProperties.denied("test.xls", MediaType.ALL));
assertTrue(uploadProperties.denied("test.exe", MediaType.ALL));
} |
@Override
public Local create(final Path file) {
return this.create(String.format("%s-%s", new AlphanumericRandomStringService().random(), file.getName()));
} | @Test
public void testPathNotTooLong() {
final String temp = StringUtils.removeEnd(System.getProperty("java.io.tmpdir"), File.separator);
final String testPathDirectory = "/Lorem/ipsum/dolor/sit/amet/consetetur/sadipscing/elitr/sed/diam/nonumy/eirmod/tempor";
final String testPathFile = "takimata.sanc";
final String testPath = String.format("%s/%s", testPathDirectory, testPathFile);
final String testPathMD5 = DigestUtils.md5Hex(testPathDirectory);
Path file = new Path(testPath, EnumSet.of(Path.Type.file));
file.attributes().setVersionId("2");
final Local local = new FlatTemporaryFileService().create("UID", file);
assertTrue(local.getParent().exists());
final String localFile = local.getAbsolute();
assertEquals(String.format("%s/%s/1744299885-%s", temp, "UID", testPathFile).replace('/', File.separatorChar), localFile);
assertNotEquals(String.format("%s/%s%s/2/1744299885-%s", temp, "UID", testPathMD5, testPathFile).replace('/', File.separatorChar), localFile);
} |
public ConfigData get(String path) {
if (allowedPaths == null) {
throw new IllegalStateException("The provider has not been configured yet.");
}
Map<String, String> data = new HashMap<>();
if (path == null || path.isEmpty()) {
return new ConfigData(data);
}
Path filePath = allowedPaths.parseUntrustedPath(path);
if (filePath == null) {
log.warn("The path {} is not allowed to be accessed", path);
return new ConfigData(data);
}
try (Reader reader = reader(filePath)) {
Properties properties = new Properties();
properties.load(reader);
Enumeration<Object> keys = properties.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement().toString();
String value = properties.getProperty(key);
if (value != null) {
data.put(key, value);
}
}
return new ConfigData(data);
} catch (IOException e) {
log.error("Could not read properties from file {}", path, e);
throw new ConfigException("Could not read properties from file " + path);
}
} | @Test
public void testGetOneKeyAtPath() {
ConfigData configData = configProvider.get("dummy", Collections.singleton("testKey"));
Map<String, String> result = new HashMap<>();
result.put("testKey", "testResult");
assertEquals(result, configData.data());
assertNull(configData.ttl());
} |
@Override
@SuppressWarnings({"CastCanBeRemovedNarrowingVariableType", "unchecked"})
public E poll() {
final E[] buffer = consumerBuffer;
final long index = consumerIndex;
final long mask = consumerMask;
final long offset = modifiedCalcElementOffset(index, mask);
Object e = lvElement(buffer, offset);// LoadLoad
if (e == null) {
if (index != lvProducerIndex(this)) {
// poll() == null iff queue is empty, null element is not strong enough indicator, so we
// must check the producer index. If the queue is indeed not empty we spin until element is
// visible.
do {
e = lvElement(buffer, offset);
} while (e == null);
} else {
return null;
}
}
if (e == JUMP) {
final E[] nextBuffer = getNextBuffer(buffer, mask);
return newBufferPoll(nextBuffer, index);
}
soElement(buffer, offset, null);
soConsumerIndex(this, index + 2);
return (E) e;
} | @Test(dataProvider = "populated")
public void poll_whenPopulated(MpscGrowableArrayQueue<Integer> queue) {
assertThat(queue.poll()).isNotNull();
assertThat(queue).hasSize(POPULATED_SIZE - 1);
} |
public BlockLease flatten(Block block)
{
requireNonNull(block, "block is null");
if (block instanceof DictionaryBlock) {
return flattenDictionaryBlock((DictionaryBlock) block);
}
if (block instanceof RunLengthEncodedBlock) {
return flattenRunLengthEncodedBlock((RunLengthEncodedBlock) block);
}
return newLease(block);
} | @Test
public void testNestedRLEs()
{
Block block = createTestRleBlock(createTestRleBlock(createTestRleBlock(createLongArrayBlock(5), 1), 1), 4);
assertEquals(block.getPositionCount(), 4);
try (BlockLease blockLease = flattener.flatten(block)) {
Block flattenedBlock = blockLease.get();
assertEquals(flattenedBlock.getClass(), RunLengthEncodedBlock.class);
assertEquals(flattenedBlock.getPositionCount(), block.getPositionCount());
assertEquals(flattenedBlock.getClass(), RunLengthEncodedBlock.class);
assertEquals(((RunLengthEncodedBlock) flattenedBlock).getValue().getClass(), LongArrayBlock.class);
Block innerBlock = ((RunLengthEncodedBlock) flattenedBlock).getValue();
assertEquals(innerBlock.getPositionCount(), 1);
}
} |
public long getBacklogBytes(String streamName, Instant countSince)
throws TransientKinesisException {
return getBacklogBytes(streamName, countSince, new Instant());
} | @Test
public void shouldNotCallCloudWatchWhenSpecifiedPeriodTooShort() throws Exception {
Instant countSince = new Instant("2017-04-06T10:00:00.000Z");
Instant countTo = new Instant("2017-04-06T10:00:02.000Z");
long backlogBytes = underTest.getBacklogBytes(STREAM, countSince, countTo);
assertThat(backlogBytes).isEqualTo(0L);
verifyNoInteractions(cloudWatch);
} |
public static boolean writeFile(File file, byte[] content, boolean append) {
try (FileOutputStream fos = new FileOutputStream(file, append);
FileChannel fileChannel = fos.getChannel()) {
ByteBuffer buffer = ByteBuffer.wrap(content);
fileChannel.write(buffer);
return true;
} catch (IOException ioe) {
if (ioe.getMessage() != null) {
String errMsg = ioe.getMessage();
if (NO_SPACE_CN.equals(errMsg) || NO_SPACE_EN.equals(errMsg) || errMsg.contains(DISK_QUOTA_CN) || errMsg
.contains(DISK_QUOTA_EN)) {
LOGGER.warn("磁盘满,自杀退出");
System.exit(0);
}
}
}
return false;
} | @Test
void writeFile() {
assertTrue(DiskUtils.writeFile(testFile, "unit test".getBytes(StandardCharsets.UTF_8), false));
assertEquals("unit test", DiskUtils.readFile(testFile));
} |
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
} | @Test
public void shouldHandleQualifiedSelectStarOnLeftJoinSource() {
// Given:
final SingleStatementContext stmt =
givenQuery("SELECT TEST1.* FROM TEST1 JOIN TEST2 WITHIN 1 SECOND ON TEST1.ID = TEST2.ID;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getSelect(),
is(new Select(ImmutableList.of(new AllColumns(Optional.of(TEST1_NAME))))));
} |
@Override
public Iterable<SingleTableInventoryCalculatedResult> calculate(final SingleTableInventoryCalculateParameter param) {
PipelineDataConsistencyCalculateSQLBuilder pipelineSQLBuilder = new PipelineDataConsistencyCalculateSQLBuilder(param.getDatabaseType());
List<CalculatedItem> calculatedItems = param.getColumnNames().stream().map(each -> calculateCRC32(pipelineSQLBuilder, param, each)).collect(Collectors.toList());
return Collections.singletonList(new CalculatedResult(calculatedItems.get(0).getRecordsCount(), calculatedItems.stream().map(CalculatedItem::getCrc32).collect(Collectors.toList())));
} | @Test
void assertCalculateFailed() throws SQLException {
when(connection.prepareStatement(anyString())).thenThrow(new SQLException(""));
assertThrows(PipelineTableDataConsistencyCheckLoadingFailedException.class, () -> new CRC32SingleTableInventoryCalculator().calculate(parameter));
} |
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) {
List<String> diagramLines = filterOutComments(rawDiagramLines);
Set<PlantUmlComponent> components = parseComponents(diagramLines);
PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components);
List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines);
return new PlantUmlDiagram.Builder(plantUmlComponents)
.withDependencies(dependencies)
.build();
} | @Test
public void rejects_a_component_with_an_illegal_alias() {
File file = TestDiagram.in(temporaryFolder)
.component("irrelevant").withAlias("ill[]egal").withStereoTypes("..irrelevant..")
.write();
assertThatThrownBy(() -> createDiagram(file))
.isInstanceOf(IllegalDiagramException.class)
.hasMessageContaining("Alias 'ill[]egal' should not contain character(s): '[' or ']' or '\"'");
} |
public Object toIdObject(String baseId) throws AmqpProtocolException {
if (baseId == null) {
return null;
}
try {
if (hasAmqpUuidPrefix(baseId)) {
String uuidString = strip(baseId, AMQP_UUID_PREFIX_LENGTH);
return UUID.fromString(uuidString);
} else if (hasAmqpUlongPrefix(baseId)) {
String longString = strip(baseId, AMQP_ULONG_PREFIX_LENGTH);
return UnsignedLong.valueOf(longString);
} else if (hasAmqpStringPrefix(baseId)) {
return strip(baseId, AMQP_STRING_PREFIX_LENGTH);
} else if (hasAmqpBinaryPrefix(baseId)) {
String hexString = strip(baseId, AMQP_BINARY_PREFIX_LENGTH);
byte[] bytes = convertHexStringToBinary(hexString);
return new Binary(bytes);
} else {
// We have a string without any type prefix, transmit it as-is.
return baseId;
}
} catch (IllegalArgumentException e) {
throw new AmqpProtocolException("Unable to convert ID value");
}
} | @Test
public void testToIdObjectWithEncodedUuid() throws Exception {
UUID uuid = UUID.randomUUID();
String provided = AMQPMessageIdHelper.AMQP_UUID_PREFIX + uuid.toString();
Object idObject = messageIdHelper.toIdObject(provided);
assertNotNull("null object should not have been returned", idObject);
assertEquals("expected id object was not returned", uuid, idObject);
} |
@Override
public void handleGlobalFailure(Throwable cause) {
final FailureEnricher.Context ctx =
DefaultFailureEnricherContext.forGlobalFailure(
jobInfo, jobManagerJobMetricGroup, ioExecutor, userCodeClassLoader);
final CompletableFuture<Map<String, String>> failureLabels =
FailureEnricherUtils.labelFailure(
cause, ctx, getMainThreadExecutor(), failureEnrichers);
state.handleGlobalFailure(cause, failureLabels);
} | @Test
void testExceptionHistoryWithGlobalFailureLabels() throws Exception {
final Exception expectedException = new Exception("Global Exception to label");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> scheduler.handleGlobalFailure(expectedException);
final TestingFailureEnricher failureEnricher = new TestingFailureEnricher();
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withTestLogic(testLogic)
.withFailureEnrichers(Collections.singletonList(failureEnricher))
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getTaskManagerLocation()).isNull();
assertThat(failure.getFailingTaskName()).isNull();
assertThat(failureEnricher.getSeenThrowables()).containsExactly(expectedException);
assertThat(failure.getFailureLabels()).isEqualTo(failureEnricher.getFailureLabels());
assertThat(failure.getException().deserializeError(classLoader))
.isEqualTo(expectedException);
} |
public void updateAcl( UIRepositoryObjectAcl aclToUpdate ) {
List<ObjectAce> aces = obj.getAces();
for ( ObjectAce ace : aces ) {
if ( ace.getRecipient().getName().equals( aclToUpdate.getRecipientName() ) ) {
ace.setPermissions( aclToUpdate.getPermissionSet() );
}
}
UIRepositoryObjectAcl acl = getAcl( aclToUpdate.getRecipientName() );
acl.setPermissionSet( aclToUpdate.getPermissionSet() );
this.firePropertyChange( "acls", null, getAcls() ); //$NON-NLS-1$
// above firePropertyChange replaces all elements in the listBox and therefore clears any selected elements;
// however, the selectedAclList field is never updated because no selectedIndices event is ever called; manually
// update it to reflect the selected state of the user/role list now (no selection)
selectedAclList.clear();
// Setting the selected index
List<UIRepositoryObjectAcl> aclList = new ArrayList<UIRepositoryObjectAcl>();
aclList.add( aclToUpdate );
setSelectedAclList( aclList );
setModelDirty( true );
} | @Test
public void testUpdateAcl() {
List<UIRepositoryObjectAcl> originalUIAcls = Arrays.asList( new UIRepositoryObjectAcl[] { objectAcl1, objectAcl2 } );
repositoryObjectAcls.addAcls( originalUIAcls );
objectAcl2.addPermission( RepositoryFilePermission.DELETE );
repositoryObjectAcls.updateAcl( objectAcl2 );
// Assert that the delete permissions is added
for ( UIRepositoryObjectAcl uiAcl : repositoryObjectAcls.getAcls() ) {
if ( objectAcl2.getRecipientName().equals( uiAcl.getRecipientName() ) ) {
assertEquals( "Delete permission was not added", objectAcl2.getPermissionSet(), uiAcl.getPermissionSet() );
}
}
} |
public static URI parse(String gluePath) {
requireNonNull(gluePath, "gluePath may not be null");
if (gluePath.isEmpty()) {
return rootPackageUri();
}
// Legacy from the Cucumber Eclipse plugin
// Older versions of Cucumber allowed it.
if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) {
return rootPackageUri();
}
if (nonStandardPathSeparatorInUse(gluePath)) {
String standardized = replaceNonStandardPathSeparator(gluePath);
return parseAssumeClasspathScheme(standardized);
}
if (isProbablyPackage(gluePath)) {
String path = resourceNameOfPackageName(gluePath);
return parseAssumeClasspathScheme(path);
}
return parseAssumeClasspathScheme(gluePath);
} | @Test
void can_parse_root_package() {
URI uri = GluePath.parse("classpath:/");
assertAll(
() -> assertThat(uri.getScheme(), is("classpath")),
() -> assertThat(uri.getSchemeSpecificPart(), is("/")));
} |
@Override
public String getMediaType() {
return firstNonNull(
mediaTypeFromUrl(source.getRequestURI()),
firstNonNull(
acceptedContentTypeInResponse(),
MediaTypes.DEFAULT));
} | @Test
public void default_media_type_is_octet_stream() {
when(source.getRequestURI()).thenReturn("/path/to/resource/search");
assertThat(underTest.getMediaType()).isEqualTo(MediaTypes.DEFAULT);
} |
public double getHeight() {
return this.bottom - this.top;
} | @Test
public void getHeightTest() {
Rectangle rectangle = create(1, 2, 3, 4);
Assert.assertEquals(2, rectangle.getHeight(), 0);
} |
@Override
protected URIRegisterDTO buildURIRegisterDTO(final ApplicationContext context,
final Map<String, ServiceFactoryBean> beans) {
return URIRegisterDTO.builder()
.contextPath(this.getContextPath())
.appName(this.getAppName())
.rpcType(RpcTypeEnum.SOFA.getName())
.eventType(EventType.REGISTER)
.host(super.getHost())
.port(Integer.parseInt(getPort()))
.build();
} | @Test
public void testBuildURIRegisterDTO() {
URIRegisterDTO expectedURIRegisterDTO = URIRegisterDTO.builder()
.contextPath(CONTEXT_PATH)
.appName(APP_NAME)
.rpcType(RpcTypeEnum.SOFA.getName())
.eventType(EventType.REGISTER)
.host(HOST)
.port(Integer.parseInt(PORT))
.build();
Map<String, ServiceFactoryBean> beans = new HashMap<>();
URIRegisterDTO realURIRegisterDTO = sofaServiceEventListener.buildURIRegisterDTO(applicationContext, beans);
assertEquals(expectedURIRegisterDTO, realURIRegisterDTO);
} |
public static SMAppService mainAppService() {
return CLASS.mainAppService();
} | @Test
public void testMainAppService() {
assumeFalse(Factory.Platform.osversion.matches("(10|11|12)\\..*"));
assertNotNull(SMAppService.mainAppService());
} |
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) {
final Map<String, Object> consumerProps = getCommonConsumerConfigs();
// Get main consumer override configs
final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX);
consumerProps.putAll(mainConsumerProps);
// this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting
consumerProps.put(APPLICATION_ID_CONFIG, groupId);
// add group id, client id with stream client id prefix, and group instance id
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id.
if (groupInstanceId != null) {
consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx);
}
// add configs required for stream partition assignor
consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG));
consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));
consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG));
consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG));
consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName());
consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG));
consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG));
// disable auto topic creation
consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
// verify that producer batch config is no larger than segment size, then add topic configs required for creating topics
final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false);
final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) &&
producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) {
final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString());
final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString());
if (segmentSize < batchSize) {
throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic",
segmentSize,
batchSize));
}
}
consumerProps.putAll(topicProps);
return consumerProps;
} | @SuppressWarnings("deprecation")
@Test
public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosAlpha() {
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE);
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
assertThat(consumerConfigs.get("internal.throw.on.fetch.stable.offset.unsupported"), is(nullValue()));
} |
public EnumSet<RepositoryFilePermission> processCheckboxes() {
return processCheckboxes( false );
} | @Test
public void testProcessCheckboxesReadCheckedEnableAppropriateTrue() {
when( readCheckbox.isChecked() ).thenReturn( true );
when( writeCheckbox.isChecked() ).thenReturn( false );
when( deleteCheckbox.isChecked() ).thenReturn( false );
when( manageCheckbox.isChecked() ).thenReturn( false );
assertEquals( EnumSet.of( RepositoryFilePermission.READ ), permissionsCheckboxHandler.processCheckboxes( true ) );
verify( readCheckbox, times( 1 ) ).setDisabled( true );
verify( writeCheckbox, times( 1 ) ).setDisabled( false );
verify( deleteCheckbox, times( 1 ) ).setDisabled( true );
verify( manageCheckbox, times( 1 ) ).setDisabled( true );
} |
static Result coerceUserList(
final Collection<Expression> expressions,
final ExpressionTypeManager typeManager
) {
return coerceUserList(expressions, typeManager, Collections.emptyMap());
} | @Test
public void shouldCoerceStringNumericWithDecimalPointToDecimals() {
// Given:
final ImmutableList<Expression> expressions = ImmutableList.of(
new IntegerLiteral(10),
new StringLiteral("1.0")
);
// When:
final Result result = CoercionUtil.coerceUserList(expressions, typeManager);
// Then:
assertThat(result.commonType(), is(Optional.of(SqlTypes.decimal(11, 1))));
assertThat(result.expressions(), is(ImmutableList.of(
new DecimalLiteral(new BigDecimal("10.0")),
new DecimalLiteral(new BigDecimal("1.0"))
)));
} |
public String start(WorkflowInstance instance) {
return workflowExecutor.startWorkflow(
translator.translate(instance),
Collections.singletonMap(
Constants.WORKFLOW_SUMMARY_FIELD,
workflowHelper.createWorkflowSummaryFromInstance(instance)),
null,
null,
String.valueOf(System.currentTimeMillis()), // use event field to keep enqueue time
Collections.emptyMap());
} | @Test
public void testStart() {
WorkflowInstance instance = new WorkflowInstance();
instance.setWorkflowId("test-workflow");
instance.setWorkflowVersionId(1);
instance.setRuntimeWorkflow(mock(Workflow.class));
instance.setRuntimeDag(Collections.singletonMap("step1", new StepTransition()));
Map<String, Map<String, ParamDefinition>> stepRunParams =
Collections.singletonMap(
"stepid",
Collections.singletonMap("p1", ParamDefinition.buildParamDefinition("p1", "d1")));
instance.setStepRunParams(stepRunParams);
assertEquals("test-uuid", runner.start(instance));
verify(translator, times(1)).translate(instance);
verify(workflowExecutor, times(1))
.startWorkflow(any(), anyMap(), any(), any(), anyString(), anyMap());
} |
public static String simplyEnvNameIfOverLimit(String envName) {
if (StringUtils.isNotBlank(envName) && envName.length() > MAX_ENV_NAME_LENGTH) {
return envName.substring(0, MAX_ENV_NAME_LENGTH) + MD5Utils.md5Hex(envName, "UTF-8");
}
return envName;
} | @Test
void testSimplyEnvNameNotOverLimit() {
String expect = "test";
assertEquals(expect, ParamUtil.simplyEnvNameIfOverLimit(expect));
} |
@GuardedBy("lock")
private boolean isLeader(ResourceManager<?> resourceManager) {
return running && this.leaderResourceManager == resourceManager;
} | @Test
void revokeLeadership_stopExistLeader() throws Exception {
final UUID leaderSessionId = UUID.randomUUID();
final CompletableFuture<UUID> terminateRmFuture = new CompletableFuture<>();
rmFactoryBuilder.setTerminateConsumer(terminateRmFuture::complete);
createAndStartResourceManager();
// grant leadership
leaderElection.isLeader(leaderSessionId).join();
// revoke leadership
leaderElection.notLeader();
// should terminate RM
assertThatFuture(terminateRmFuture).eventuallySucceeds().isSameAs(leaderSessionId);
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
} | @Test
void givenTypeCircle_whenOnMsg_thenTrue() throws TbNodeException {
// GIVEN
var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration();
config.setPerimeterType(PerimeterType.CIRCLE);
node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config)));
DeviceId deviceId = new DeviceId(UUID.randomUUID());
TbMsgMetaData metadata = getMetadataForNewVersionCirclePerimeter();
TbMsg msg = getTbMsg(deviceId, metadata,
POINT_INSIDE_CIRCLE.getLatitude(), POINT_INSIDE_CIRCLE.getLongitude());
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.TRUE));
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
} |
@Override
public String getConfigDirectory() {
final String configDir =
flinkConfig
.getOptional(DeploymentOptionsInternal.CONF_DIR)
.orElse(flinkConfig.get(KubernetesConfigOptions.FLINK_CONF_DIR));
checkNotNull(configDir);
return configDir;
} | @Test
void getConfigDirectory() {
final String confDir = "/path/of/flink-conf";
flinkConfig.set(DeploymentOptionsInternal.CONF_DIR, confDir);
assertThat(testingKubernetesParameters.getConfigDirectory()).isEqualTo(confDir);
} |
public void setOwnerBits(Bits bits) {
mOwnerBits = bits;
} | @Test
public void setOwnerBits() {
Mode mode = new Mode((short) 0000);
mode.setOwnerBits(Mode.Bits.READ_EXECUTE);
assertEquals(Mode.Bits.READ_EXECUTE, mode.getOwnerBits());
mode.setOwnerBits(Mode.Bits.WRITE);
assertEquals(Mode.Bits.WRITE, mode.getOwnerBits());
mode.setOwnerBits(Mode.Bits.ALL);
assertEquals(Mode.Bits.ALL, mode.getOwnerBits());
} |
@Override
public BigDecimal getDecimal(final int columnIndex) {
return values.getDecimal(columnIndex - 1);
} | @Test
public void shouldGetDecimal() {
assertThat(row.getDecimal("f_decimal"), is(new BigDecimal("12.21")));
} |
public Optional<Object> evaluate(final Map<String, Object> columnPairsMap, final String outputColumn,
final String regexField) {
boolean matching = true;
boolean isRegex =
regexField != null && columnValues.containsKey(regexField) && (boolean) columnValues.get(regexField);
for (Map.Entry<String, Object> columnPairEntry : columnPairsMap.entrySet()) {
Object value = columnValues.get(columnPairEntry.getKey());
matching = isRegex ? isRegexMatching(value.toString(), (String) columnPairEntry.getValue()) :
isMatching(value, columnPairEntry.getValue());
if (!matching) {
break;
}
}
return matching ? Optional.ofNullable(columnValues.get(outputColumn)) : Optional.empty();
} | @Test
void evaluateKeyFoundMatchingNoOutputColumnFound() {
KiePMMLRow kiePMMLRow = new KiePMMLRow(COLUMN_VALUES);
Optional<Object> retrieved = kiePMMLRow.evaluate(Collections.singletonMap("KEY-1", 1), "NOT-KEY", null);
assertThat(retrieved).isNotPresent();
} |
@Override
public boolean registerListener(Object listener) {
if (listener instanceof HazelcastInstanceAware aware) {
aware.setHazelcastInstance(node.hazelcastInstance);
}
if (listener instanceof ClusterVersionListener clusterVersionListener) {
clusterVersionListeners.add(clusterVersionListener);
// on registration, invoke once the listening method so version is properly initialized on the listener
clusterVersionListener.onClusterVersionChange(getClusterOrNodeVersion());
return true;
}
return false;
} | @Test
public void test_clusterVersionListener_invokedOnRegistration() {
final CountDownLatch latch = new CountDownLatch(1);
ClusterVersionListener listener = newVersion -> latch.countDown();
assertTrue(nodeExtension.registerListener(listener));
assertOpenEventually(latch);
} |
protected static boolean isSingleQuoted(String input) {
if (input == null || input.isBlank()) {
return false;
}
return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})");
} | @Test
public void testSingleQuoted2() {
assertTrue(isSingleQuoted("\"with space\""));
} |
@Override
public V get(final K key) {
Objects.requireNonNull(key, "key cannot be null");
try {
return maybeMeasureLatency(() -> outerValue(wrapped().get(keyBytes(key))), time, getSensor);
} catch (final ProcessorStateException e) {
final String message = String.format(e.getMessage(), key);
throw new ProcessorStateException(message, e);
}
} | @Test
public void shouldThrowNullPointerOnGetIfKeyIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> metered.get(null));
} |
@Override
public void createLoginLog(LoginLogCreateReqDTO reqDTO) {
LoginLogDO loginLog = BeanUtils.toBean(reqDTO, LoginLogDO.class);
loginLogMapper.insert(loginLog);
} | @Test
public void testCreateLoginLog() {
LoginLogCreateReqDTO reqDTO = randomPojo(LoginLogCreateReqDTO.class);
// 调用
loginLogService.createLoginLog(reqDTO);
// 断言
LoginLogDO loginLogDO = loginLogMapper.selectOne(null);
assertPojoEquals(reqDTO, loginLogDO);
} |
public Node chooseRandomWithStorageType(final String scope,
final Collection<Node> excludedNodes, StorageType type) {
netlock.readLock().lock();
try {
if (scope.startsWith("~")) {
return chooseRandomWithStorageType(
NodeBase.ROOT, scope.substring(1), excludedNodes, type);
} else {
return chooseRandomWithStorageType(
scope, null, excludedNodes, type);
}
} finally {
netlock.readLock().unlock();
}
} | @Test
public void testChooseRandomWithStorageType() throws Exception {
Node n;
DatanodeDescriptor dd;
// test the choose random can return desired storage type nodes without
// exclude
Set<String> diskUnderL1 =
new HashSet<>(Arrays.asList("host2", "host4", "host5", "host6"));
Set<String> archiveUnderL1 = new HashSet<>(Arrays.asList("host1", "host3"));
Set<String> ramdiskUnderL1 = new HashSet<>(Arrays.asList("host7"));
Set<String> ssdUnderL1 = new HashSet<>(Arrays.asList("host8"));
Set<String> nvdimmUnderL1 = new HashSet<>(Arrays.asList("host9"));
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.DISK);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(diskUnderL1.contains(dd.getHostName()));
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.RAM_DISK);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(ramdiskUnderL1.contains(dd.getHostName()));
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.ARCHIVE);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(archiveUnderL1.contains(dd.getHostName()));
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.SSD);
assertTrue(n instanceof DatanodeDescriptor);
dd = (DatanodeDescriptor) n;
assertTrue(ssdUnderL1.contains(dd.getHostName()));
}
} |
static double estimatePixelCount(final Image image, final double widthOverHeight) {
if (image.getHeight() == HEIGHT_UNKNOWN) {
if (image.getWidth() == WIDTH_UNKNOWN) {
// images whose size is completely unknown will be in their own subgroups, so
// any one of them will do, hence returning the same value for all of them
return 0;
} else {
return image.getWidth() * image.getWidth() / widthOverHeight;
}
} else if (image.getWidth() == WIDTH_UNKNOWN) {
return image.getHeight() * image.getHeight() * widthOverHeight;
} else {
return image.getHeight() * image.getWidth();
}
} | @Test
public void testEstimatePixelCountWidthUnknown() {
assertEquals( 10000.0, estimatePixelCount(img(100, WIDTH_UNKNOWN), 1.0 ), 0.0);
assertEquals( 20000.0, estimatePixelCount(img(200, WIDTH_UNKNOWN), 0.5 ), 0.0);
assertEquals( 12.0, estimatePixelCount(img( 1, WIDTH_UNKNOWN), 12.0 ), 0.0);
assertEquals(230400.0, estimatePixelCount(img(360, WIDTH_UNKNOWN), 16.0/9.0), 0.0);
} |
@Override
public Optional<DispatchEvent> build(final DataChangedEvent event) {
if (event.getKey().startsWith(ComputeNode.getOnlineInstanceNodePath())) {
return createInstanceEvent(event);
}
return Optional.empty();
} | @Test
void assertComputeNodeOnline() {
Optional<DispatchEvent> actual = new ComputeNodeOnlineDispatchEventBuilder()
.build(new DataChangedEvent("/nodes/compute_nodes/online/proxy/foo_instance_id", "{attribute: 127.0.0.1@3307,version: 1}", Type.ADDED));
assertTrue(actual.isPresent());
InstanceOnlineEvent event = (InstanceOnlineEvent) actual.get();
assertThat(event.getInstanceMetaData().getId(), is("foo_instance_id"));
assertThat(event.getInstanceMetaData().getIp(), is("127.0.0.1"));
assertThat(event.getInstanceMetaData().getType(), is(InstanceType.PROXY));
assertThat(event.getInstanceMetaData().getVersion(), is("1"));
assertThat(event.getInstanceMetaData().getAttributes(), is("127.0.0.1@3307"));
} |
public SmppConfiguration getConfiguration() {
return configuration;
} | @Test
public void constructorSmppConfigurationShouldSetTheSmppConfiguration() {
SmppConfiguration configuration = new SmppConfiguration();
binding = new SmppBinding(configuration);
assertSame(configuration, binding.getConfiguration());
} |
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) {
final List<FieldInfo> allFields = schema.columns().stream()
.map(EntityUtil::toFieldInfo)
.collect(Collectors.toList());
if (allFields.isEmpty()) {
throw new IllegalArgumentException("Root schema should contain columns: " + schema);
}
return allFields;
} | @Test
public void shouldBuildMiltipleFieldsCorrectly() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.valueColumn(ColumnName.of("field1"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("field2"), SqlTypes.BIGINT)
.build();
// When:
final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema);
// Then:
assertThat(fields, hasSize(2));
assertThat(fields.get(0).getName(), equalTo("field1"));
assertThat(fields.get(0).getSchema().getTypeName(), equalTo("INTEGER"));
assertThat(fields.get(1).getName(), equalTo("field2"));
assertThat(fields.get(1).getSchema().getTypeName(), equalTo("BIGINT"));
} |
@Override
public String format(LogRecord logRecord) {
Object[] arguments = new Object[6];
arguments[0] = logRecord.getThreadID();
String fullClassName = logRecord.getSourceClassName();
int lastDot = fullClassName.lastIndexOf('.');
String className = fullClassName.substring(lastDot + 1);
arguments[1] = className;
arguments[2] = logRecord.getSourceMethodName();
arguments[3] = DateTimeFormatter.ISO_LOCAL_TIME.format(
LocalDateTime.ofInstant(Instant.ofEpochMilli(logRecord.getMillis()), ZoneOffset.UTC));
arguments[4] = logRecord.getMessage();
if (logRecord.getThrown() != null) {
Writer result = new StringWriter();
logRecord.getThrown().printStackTrace(new PrintWriter(result));
arguments[5] = result.toString();
} else {
arguments[5] = "";
}
return messageFormat.format(arguments);
} | @Test
public void format() {
BriefLogFormatter formatter = new BriefLogFormatter();
LogRecord record = new LogRecord(Level.INFO, "message");
record.setSourceClassName("org.example.Class");
record.setSourceMethodName("method");
record.setMillis(Instant.EPOCH.toEpochMilli());
record.setThreadID(123);
assertEquals("00:00:00 123 Class.method: message\n", formatter.format(record));
} |
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)
|| monitoringDisabled || !instanceEnabled) {
// si ce n'est pas une requête http ou si le monitoring est désactivé, on fait suivre
chain.doFilter(request, response);
return;
}
final HttpServletRequest httpRequest = (HttpServletRequest) request;
final HttpServletResponse httpResponse = (HttpServletResponse) response;
if (httpRequest.getRequestURI().equals(getMonitoringUrl(httpRequest))) {
doMonitoring(httpRequest, httpResponse);
return;
}
if (!httpCounter.isDisplayed() || isRequestExcluded((HttpServletRequest) request)) {
// si cette url est exclue ou si le counter http est désactivé, on ne monitore pas cette requête http
chain.doFilter(request, response);
return;
}
doFilter(chain, httpRequest, httpResponse);
} | @Test
public void testDoFilter() throws ServletException, IOException {
// displayed-counters
setProperty(Parameter.DISPLAYED_COUNTERS, "sql");
try {
setUp();
HttpServletRequest request = createNiceMock(HttpServletRequest.class);
doFilter(request);
setProperty(Parameter.DISPLAYED_COUNTERS, "");
setUp();
request = createNiceMock(HttpServletRequest.class);
doFilter(request);
setProperty(Parameter.DISPLAYED_COUNTERS, "unknown");
try {
setUp();
request = createNiceMock(HttpServletRequest.class);
doFilter(request);
} catch (final IllegalArgumentException e) {
assertNotNull("ok", e);
}
} finally {
setProperty(Parameter.DISPLAYED_COUNTERS, null);
}
// url exclue
setProperty(Parameter.URL_EXCLUDE_PATTERN, ".*");
try {
setUp();
final HttpServletRequest request = createNiceMock(HttpServletRequest.class);
doFilter(request);
} finally {
setProperty(Parameter.URL_EXCLUDE_PATTERN, "");
}
// standard
setUp();
HttpServletRequest request = createNiceMock(HttpServletRequest.class);
doFilter(request);
// log
setUp();
setProperty(Parameter.LOG, TRUE);
try {
((Logger) org.slf4j.LoggerFactory.getLogger(FILTER_NAME)).setLevel(Level.WARN);
request = createNiceMock(HttpServletRequest.class);
doFilter(request);
((Logger) org.slf4j.LoggerFactory.getLogger(FILTER_NAME)).setLevel(Level.DEBUG);
request = createNiceMock(HttpServletRequest.class);
doFilter(request);
request = createNiceMock(HttpServletRequest.class);
expect(request.getHeader("X-Forwarded-For")).andReturn("me").anyTimes();
expect(request.getQueryString()).andReturn("param1=1").anyTimes();
doFilter(request);
} finally {
setProperty(Parameter.LOG, null);
}
// ajax
request = createNiceMock(HttpServletRequest.class);
expect(request.getHeader("X-Requested-With")).andReturn("XMLHttpRequest");
doFilter(request);
// spring mvc with @RequestMapping (read in CounterRequestContext.getHttpRequestName())
request = createNiceMock(HttpServletRequest.class);
expect(request
.getAttribute("org.springframework.web.servlet.HandlerMapping.bestMatchingPattern"))
.andReturn("/testspringmvc").anyTimes();
doFilter(request);
// erreur système http, avec log
setProperty(Parameter.LOG, TRUE);
try {
final String test = "test";
request = createNiceMock(HttpServletRequest.class);
doFilter(request, new UnknownError(test));
request = createNiceMock(HttpServletRequest.class);
doFilter(request, new IllegalStateException(test));
// pas possibles:
// doFilter(createNiceMock(HttpServletRequest.class), new IOException(test));
// doFilter(createNiceMock(HttpServletRequest.class), new ServletException(test));
// doFilter(createNiceMock(HttpServletRequest.class), new Exception(test));
} finally {
setProperty(Parameter.LOG, null);
}
} |
public static int toUnix(String time, String pattern) {
LocalDateTime formatted = LocalDateTime.parse(time, DateTimeFormatter.ofPattern(pattern));
return (int) formatted.atZone(ZoneId.systemDefault()).toInstant().getEpochSecond();
} | @Test
public void testToUnix() {
int unixTime = DateKit.toUnix(date);
Assert.assertEquals(1505892470, unixTime);
unixTime = DateKit.toUnix("2017-09-09 11:22:33");
Assert.assertEquals(1504927353, unixTime);
unixTime = DateKit.toUnix("2017-09-09 11:22", "yyyy-MM-dd HH:mm");
Assert.assertEquals(1504927320, unixTime);
unixTime = DateKit.nowUnix();
Assert.assertNotNull(unixTime);
} |
@Override
public void validateDictDataList(String dictType, Collection<String> values) {
if (CollUtil.isEmpty(values)) {
return;
}
Map<String, DictDataDO> dictDataMap = CollectionUtils.convertMap(
dictDataMapper.selectByDictTypeAndValues(dictType, values), DictDataDO::getValue);
// 校验
values.forEach(value -> {
DictDataDO dictData = dictDataMap.get(value);
if (dictData == null) {
throw exception(DICT_DATA_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictData.getStatus())) {
throw exception(DICT_DATA_NOT_ENABLE, dictData.getLabel());
}
});
} | @Test
public void testValidateDictDataList_success() {
// mock 数据
DictDataDO dictDataDO = randomDictDataDO().setStatus(CommonStatusEnum.ENABLE.getStatus());
dictDataMapper.insert(dictDataDO);
// 准备参数
String dictType = dictDataDO.getDictType();
List<String> values = singletonList(dictDataDO.getValue());
// 调用,无需断言
dictDataService.validateDictDataList(dictType, values);
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitSSSMultipleDelimCharsWithDefaultValue() {
// Multiple delimiters
assertThat(JOrphanUtils.split("a,b;c,,", ",;", "?"),
CoreMatchers.equalTo(new String[]{"a", "b", "c", "?", "?"}));
} |
@Override
public CompletableFuture<AckResult> ackMessage(ProxyContext ctx, ReceiptHandle handle, String messageId,
AckMessageRequestHeader requestHeader, long timeoutMillis) {
return this.mqClientAPIFactory.getClient().ackMessageAsync(
this.resolveBrokerAddrInReceiptHandle(ctx, handle),
requestHeader,
timeoutMillis
);
} | @Test
public void testAckMessageByInvalidBrokerNameHandle() throws Exception {
when(topicRouteService.getBrokerAddr(any(), anyString())).thenThrow(new MQClientException(ResponseCode.TOPIC_NOT_EXIST, ""));
try {
this.clusterMessageService.ackMessage(
ProxyContext.create(),
ReceiptHandle.builder()
.startOffset(0L)
.retrieveTime(System.currentTimeMillis())
.invisibleTime(3000)
.reviveQueueId(1)
.topicType(ReceiptHandle.NORMAL_TOPIC)
.brokerName("notExistBroker")
.queueId(0)
.offset(123)
.commitLogOffset(0L)
.build(),
MessageClientIDSetter.createUniqID(),
new AckMessageRequestHeader(),
3000);
fail();
} catch (Exception e) {
assertTrue(e instanceof ProxyException);
ProxyException proxyException = (ProxyException) e;
assertEquals(ProxyExceptionCode.INVALID_RECEIPT_HANDLE, proxyException.getCode());
}
} |
@JsonProperty("type")
public FSTType getFstType() {
return _fstType;
} | @Test
public void withDisabledTrue()
throws JsonProcessingException {
String confStr = "{\"disabled\": true}";
FstIndexConfig config = JsonUtils.stringToObject(confStr, FstIndexConfig.class);
assertTrue(config.isDisabled(), "Unexpected disabled");
assertNull(config.getFstType(), "Unexpected type");
} |
static String getSparkInternalAccumulatorKey(final String prestoKey)
{
if (prestoKey.contains(SPARK_INTERNAL_ACCUMULATOR_PREFIX)) {
int index = prestoKey.indexOf(PRESTO_NATIVE_OPERATOR_STATS_SEP);
return prestoKey.substring(index);
}
String[] prestoKeyParts = prestoKey.split("\\.");
int prestoKeyPartsLength = prestoKeyParts.length;
if (prestoKeyPartsLength < 2) {
log.debug("Fail to build spark internal key for %s format not supported", prestoKey);
return "";
}
String prestoNewKey = String.format("%1$s%2$s", prestoKeyParts[0], prestoKeyParts[prestoKeyPartsLength - 1]);
if (prestoNewKey.contains("_")) {
prestoNewKey = CaseUtils.toCamelCase(prestoKey, false, '_');
}
return String.format("%1$s%2$s%3$s", SPARK_INTERNAL_ACCUMULATOR_PREFIX,
PRESTO_NATIVE_OPERATOR_STATS_PREFIX, prestoNewKey);
} | @Test
public void getSparkInternalAccumulatorKeyInternalKeyTest()
{
String expected = "internal.metrics.appname.writerRejectedPackageRawBytes";
String prestoKey = "ShuffleWrite.root.internal.metrics.appname.writerRejectedPackageRawBytes";
String actual = getSparkInternalAccumulatorKey(prestoKey);
assertEquals(actual, expected);
} |
@Override
public void setSequence(long update) {
sequence.set(update);
} | @Test
public void testSetSequence() {
sequencer.setSequence(23);
assertEquals(23, sequencer.getSequence());
} |
@Override
@ManagedOperation(description = "Does the store contain the given key")
public boolean contains(String key) {
return cache.asMap().containsKey(key);
} | @Test
void testContains() {
assertFalse(repo.contains(key01));
// add key and check again
assertTrue(repo.add(key01));
assertTrue(repo.contains(key01));
} |
@Override
public V poll(long timeout, TimeUnit unit) throws InterruptedException {
return commandExecutor.getInterrupted(pollAsync(timeout, unit));
} | @Test
@Timeout(3)
public void testShortPoll() throws InterruptedException {
RBlockingQueue<Integer> queue = getQueue();
queue.poll(500, TimeUnit.MILLISECONDS);
queue.poll(10, TimeUnit.MICROSECONDS);
} |
@Override
public void startBundles(List<Bundle> bundles, boolean privileged) throws BundleException {
throw newException();
} | @Test
void require_that_startBundles_throws_exception() throws BundleException {
assertThrows(RuntimeException.class, () -> {
new DisableOsgiFramework().startBundles(null, true);
});
} |
public static int indexOf(CharSequence str, char searchChar) {
return indexOf(str, searchChar, 0);
} | @Test
public void indexOfTest2() {
int index = CharSequenceUtil.indexOf("abc123", '1', 0, 3);
assertEquals(-1, index);
index = CharSequenceUtil.indexOf("abc123", 'b', 0, 3);
assertEquals(1, index);
} |
@Override
public ScheduledFuture<?> scheduleAtFixedRate(
ProcessingTimeCallback callback, long initialDelay, long period) {
return scheduleRepeatedly(callback, initialDelay, period, false);
} | @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS)
@Test
void testScheduleAtFixedRate() throws Exception {
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final long period = 10L;
final int countDown = 3;
final SystemProcessingTimeService timer = createSystemProcessingTimeService(errorRef);
final CountDownLatch countDownLatch = new CountDownLatch(countDown);
try {
timer.scheduleAtFixedRate(timestamp -> countDownLatch.countDown(), 0L, period);
countDownLatch.await();
assertThat(errorRef.get()).isNull();
} finally {
timer.shutdownService();
}
} |
@VisibleForTesting
static boolean isCompressed(String contentEncoding) {
return contentEncoding.contains(HttpHeaderValues.GZIP.toString())
|| contentEncoding.contains(HttpHeaderValues.DEFLATE.toString())
|| contentEncoding.contains(HttpHeaderValues.BR.toString())
|| contentEncoding.contains(HttpHeaderValues.COMPRESS.toString());
} | @Test
void detectsGzipAmongOtherEncodings() {
assertTrue(HttpUtils.isCompressed("gzip, deflate"));
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
try {
final FileAttributes stat;
if(file.isSymbolicLink()) {
stat = session.sftp().lstat(file.getAbsolute());
}
else {
stat = session.sftp().stat(file.getAbsolute());
}
switch(stat.getType()) {
case BLOCK_SPECIAL:
case CHAR_SPECIAL:
case FIFO_SPECIAL:
case SOCKET_SPECIAL:
case REGULAR:
case SYMLINK:
if(!file.getType().contains(Path.Type.file)) {
throw new NotfoundException(String.format("File %s is of type %s but expected %s",
file.getAbsolute(), stat.getType(), file.getType()));
}
break;
case DIRECTORY:
if(!file.getType().contains(Path.Type.directory)) {
throw new NotfoundException(String.format("File %s is of type %s but expected %s",
file.getAbsolute(), stat.getType(), file.getType()));
}
break;
}
return this.toAttributes(stat);
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testFindSymbolicLink() throws Exception {
final Path file = new SFTPTouchFeature(session).touch(new Path(new SFTPHomeDirectoryService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path symlink = new Path(new SFTPHomeDirectoryService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SFTPSymlinkFeature(session).symlink(symlink, file.getAbsolute());
final SFTPAttributesFinderFeature f = new SFTPAttributesFinderFeature(session);
final PathAttributes attributes = f.find(symlink);
assertNotNull(attributes);
} |
@Private
@VisibleForTesting
public static int parseMaximumHeapSizeMB(String javaOpts) {
// Find the last matching -Xmx following word boundaries
Matcher m = JAVA_OPTS_XMX_PATTERN.matcher(javaOpts);
if (m.matches()) {
long size = Long.parseLong(m.group(1));
if (size <= 0) {
return -1;
}
if (m.group(2).isEmpty()) {
// -Xmx specified in bytes
return (int) (size / (1024 * 1024));
}
char unit = m.group(2).charAt(0);
switch (unit) {
case 'g':
case 'G':
// -Xmx specified in GB
return (int) (size * 1024);
case 'm':
case 'M':
// -Xmx specified in MB
return (int) size;
case 'k':
case 'K':
// -Xmx specified in KB
return (int) (size / 1024);
}
}
// -Xmx not specified
return -1;
} | @Test
public void testParseMaximumHeapSizeMB() {
// happy cases
Assert.assertEquals(4096, JobConf.parseMaximumHeapSizeMB("-Xmx4294967296"));
Assert.assertEquals(4096, JobConf.parseMaximumHeapSizeMB("-Xmx4194304k"));
Assert.assertEquals(4096, JobConf.parseMaximumHeapSizeMB("-Xmx4096m"));
Assert.assertEquals(4096, JobConf.parseMaximumHeapSizeMB("-Xmx4g"));
// sad cases
Assert.assertEquals(-1, JobConf.parseMaximumHeapSizeMB("-Xmx4?"));
Assert.assertEquals(-1, JobConf.parseMaximumHeapSizeMB(""));
} |
@SneakyThrows({NoSuchProviderException.class, NoSuchAlgorithmException.class})
public static KeyPair generateRSAKeyPair() {
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA", BouncyCastleProvider.PROVIDER_NAME);
keyPairGenerator.initialize(4096, new SecureRandom());
return keyPairGenerator.generateKeyPair();
} | @Test
void assertGenerateKeyPair() {
KeyPair actual = SSLUtils.generateRSAKeyPair();
assertThat(actual.getPrivate().getAlgorithm(), is("RSA"));
assertThat(actual.getPrivate().getFormat(), is("PKCS#8"));
assertThat(actual.getPublic().getAlgorithm(), is("RSA"));
assertThat(actual.getPublic().getFormat(), is("X.509"));
} |
public static String readFile(String path, String fileName) {
File file = openFile(path, fileName);
if (file.exists()) {
return readFile(file);
}
return null;
} | @Test
void testReadFile() {
assertNotNull(DiskUtils.readFile(testFile));
} |
public long parse(final String text) {
return parse(text, ZoneId.systemDefault());
} | @Test
public void shouldConvertToMillis() {
// Given
final String format = "yyyy-MM-dd HH";
final String timestamp = "1605-11-05 10";
// When
final long ts = new StringToTimestampParser(format).parse(timestamp);
// Then
assertThat(ts, is(
FIFTH_OF_NOVEMBER
.withHour(10)
.withZoneSameInstant(ZID)
.toInstant()
.toEpochMilli()));
} |
public RuntimeOptionsBuilder parse(Class<?> clazz) {
RuntimeOptionsBuilder args = new RuntimeOptionsBuilder();
for (Class<?> classWithOptions = clazz; hasSuperClass(
classWithOptions); classWithOptions = classWithOptions.getSuperclass()) {
CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions);
if (options != null) {
addDryRun(options, args);
addMonochrome(options, args);
addTags(classWithOptions, options, args);
addPlugins(options, args);
addPublish(options, args);
addName(options, args);
addSnippets(options, args);
addGlue(options, args);
addFeatures(options, args);
addObjectFactory(options, args);
addUuidGenerator(options, args);
}
}
addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz);
addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz);
return args;
} | @Test
void cannot_create_with_glue_and_extra_glue() {
Executable testMethod = () -> parser().parse(ClassWithGlueAndExtraGlue.class).build();
CucumberException actualThrown = assertThrows(CucumberException.class, testMethod);
assertThat("Unexpected exception message", actualThrown.getMessage(),
is(equalTo("glue and extraGlue cannot be specified at the same time")));
} |
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
final boolean overwrite, final int bufferSize, final short replication,
final long blockSize, final Progressable progress) throws IOException {
return super.create(fullPath(f), permission, overwrite, bufferSize,
replication, blockSize, progress);
} | @Test(timeout = 30000)
public void testGetAllStoragePolicy() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.getAllStoragePolicies();
verify(mockFs).getAllStoragePolicies();
} |
@Override
public void serviceInit(Configuration conf) throws Exception {
initScheduler(conf);
super.serviceInit(conf);
// Initialize SchedulingMonitorManager
schedulingMonitorManager.initialize(rmContext, conf);
} | @Test(timeout = 30000)
public void testConfValidation() throws Exception {
FifoScheduler scheduler = new FifoScheduler();
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is"
+ " larger than the max memory allocation.");
} catch (YarnRuntimeException e) {
// Exception is expected.
assertTrue("The thrown exception is not the expected one.", e
.getMessage().startsWith("Invalid resource scheduler memory"));
}
} |
public ProjectList searchProjects(String gitlabUrl, String personalAccessToken, @Nullable String projectName,
@Nullable Integer pageNumber, @Nullable Integer pageSize) {
String url = format("%s/projects?archived=false&simple=true&membership=true&order_by=name&sort=asc&search=%s%s%s",
gitlabUrl,
projectName == null ? "" : urlEncode(projectName),
pageNumber == null ? "" : format("&page=%d", pageNumber),
pageSize == null ? "" : format("&per_page=%d", pageSize)
);
LOG.debug("get projects : [{}]", url);
Request request = new Request.Builder()
.addHeader(PRIVATE_TOKEN, personalAccessToken)
.url(url)
.get()
.build();
try (Response response = client.newCall(request).execute()) {
Headers headers = response.headers();
checkResponseIsSuccessful(response, "Could not get projects from GitLab instance");
List<Project> projectList = Project.parseJsonArray(response.body().string());
int returnedPageNumber = parseAndGetIntegerHeader(headers.get("X-Page"));
int returnedPageSize = parseAndGetIntegerHeader(headers.get("X-Per-Page"));
String xtotal = headers.get("X-Total");
Integer totalProjects = Strings.isEmpty(xtotal) ? null : parseAndGetIntegerHeader(xtotal);
return new ProjectList(projectList, returnedPageNumber, returnedPageSize, totalProjects);
} catch (JsonSyntaxException e) {
throw new IllegalArgumentException("Could not parse GitLab answer to search projects. Got a non-json payload as result.");
} catch (IOException e) {
logException(url, e);
throw new IllegalStateException(e.getMessage(), e);
}
} | @Test
public void search_projects_fail_if_pagination_data_not_returned() {
MockResponse projects = new MockResponse()
.setResponseCode(200)
.setBody("[ ]");
server.enqueue(projects);
assertThatThrownBy(() -> underTest.searchProjects(gitlabUrl, "pat", "example", 1, 10))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Pagination data from GitLab response is missing");
} |
@Override
public int hashCode() {
return Objects.hashCode(data);
} | @Test
public void testEqualityWithMemberResponses() {
for (short version : ApiKeys.LEAVE_GROUP.allVersions()) {
List<MemberResponse> localResponses = version > 2 ? memberResponses : memberResponses.subList(0, 1);
LeaveGroupResponse primaryResponse = new LeaveGroupResponse(localResponses,
Errors.NONE,
throttleTimeMs,
version);
// The order of members should not alter result data.
Collections.reverse(localResponses);
LeaveGroupResponse reversedResponse = new LeaveGroupResponse(localResponses,
Errors.NONE,
throttleTimeMs,
version);
assertEquals(primaryResponse, primaryResponse);
assertEquals(primaryResponse, reversedResponse);
assertEquals(primaryResponse.hashCode(), reversedResponse.hashCode());
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.