focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public ConfigResponse resolveConfig(GetConfigRequest req, ConfigResponseFactory responseFactory) {
long start = System.currentTimeMillis();
metricUpdater.incrementRequests();
ConfigKey<?> configKey = req.getConfigKey();
String defMd5 = req.getRequestDefMd5();
if (defMd5 == null || defMd5.isEmpty()) {
defMd5 = ConfigUtils.getDefMd5(req.getDefContent().asList());
}
ConfigCacheKey cacheKey = new ConfigCacheKey(configKey, defMd5);
log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + ("Resolving config " + cacheKey));
ConfigResponse config;
if (useCache(req)) {
config = cache.computeIfAbsent(cacheKey, (ConfigCacheKey key) -> {
var response = createConfigResponse(configKey, req, responseFactory);
metricUpdater.setCacheConfigElems(cache.configElems());
metricUpdater.setCacheChecksumElems(cache.checkSumElems());
return response;
});
} else {
config = createConfigResponse(configKey, req, responseFactory);
}
metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
return config;
} | @Test
public void require_that_configs_are_cached() {
ConfigResponse response = handler.resolveConfig(createRequest(ModelConfig.CONFIG_DEF_NAME, ModelConfig.CONFIG_DEF_NAMESPACE, ModelConfig.CONFIG_DEF_SCHEMA));
assertNotNull(response);
ConfigResponse cached_response = handler.resolveConfig(createRequest(ModelConfig.CONFIG_DEF_NAME, ModelConfig.CONFIG_DEF_NAMESPACE, ModelConfig.CONFIG_DEF_SCHEMA));
assertNotNull(cached_response);
assertSame(response, cached_response);
} |
@Override
public void updateCommentVisible(ProductCommentUpdateVisibleReqVO updateReqVO) {
// 校验评论是否存在
validateCommentExists(updateReqVO.getId());
// 更新可见状态
productCommentMapper.updateById(new ProductCommentDO().setId(updateReqVO.getId())
.setVisible(updateReqVO.getVisible()));
} | @Test
public void testUpdateCommentVisible_success() {
// mock 测试
ProductCommentDO productComment = randomPojo(ProductCommentDO.class, o -> {
o.setVisible(Boolean.TRUE);
});
productCommentMapper.insert(productComment);
Long productCommentId = productComment.getId();
ProductCommentUpdateVisibleReqVO updateReqVO = new ProductCommentUpdateVisibleReqVO();
updateReqVO.setId(productCommentId);
updateReqVO.setVisible(Boolean.FALSE);
productCommentService.updateCommentVisible(updateReqVO);
ProductCommentDO productCommentDO = productCommentMapper.selectById(productCommentId);
assertFalse(productCommentDO.getVisible());
} |
public String toMysqlColumnTypeString() {
return "unknown";
} | @Test
public void testMysqlColumnType() {
Object[][] testCases = new Object[][] {
{ScalarType.createType(PrimitiveType.BOOLEAN), "tinyint(1)"},
{ScalarType.createType(PrimitiveType.LARGEINT), "bigint(20) unsigned"},
{ScalarType.createDecimalV3NarrowestType(18, 4), "decimal(18, 4)"},
{new ArrayType(Type.INT), "array<int(11)>"},
{new MapType(Type.INT, Type.INT), "map<int(11),int(11)>"},
{new StructType(Lists.newArrayList(Type.INT)), "struct<col1 int(11)>"},
};
for (Object[] tc : testCases) {
Type type = (Type) tc[0];
String name = (String) tc[1];
Assert.assertEquals(name, type.toMysqlColumnTypeString());
}
} |
@Override
public void persistEphemeral(final String key, final String value) {
try {
if (isExisted(key)) {
client.delete().deletingChildrenIfNeeded().forPath(key);
}
client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8));
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
ZookeeperExceptionHandler.handleException(ex);
}
} | @Test
void assertPersistEphemeralExist() throws Exception {
when(existsBuilder.forPath("/test/ephemeral")).thenReturn(new Stat());
when(protect.withMode(CreateMode.EPHEMERAL)).thenReturn(protect);
REPOSITORY.persistEphemeral("/test/ephemeral", "value4");
verify(backgroundVersionable).forPath("/test/ephemeral");
verify(protect).forPath("/test/ephemeral", "value4".getBytes(StandardCharsets.UTF_8));
} |
public KafkaFuture<Void> partitionResult(final TopicPartition partition) {
if (!partitions.contains(partition)) {
throw new IllegalArgumentException("Partition " + partition + " was not included in the original request");
}
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else if (!maybeCompleteExceptionally(topicPartitions, partition, result)) {
result.complete(null);
}
});
return result;
} | @Test
public void testPartitionMissingInRequestErrorConstructor() throws InterruptedException, ExecutionException {
DeleteConsumerGroupOffsetsResult partitionLevelErrorResult = createAndVerifyPartitionLevelError();
assertThrows(IllegalArgumentException.class, () -> partitionLevelErrorResult.partitionResult(new TopicPartition("invalid-topic", 0)));
} |
public static <T> PaginatedResponse<T> create(String listKey, PaginatedList<T> paginatedList) {
return new PaginatedResponse<>(listKey, paginatedList, null, null);
} | @Test
public void serialize() throws Exception {
final ImmutableList<String> values = ImmutableList.of("hello", "world");
final PaginatedList<String> paginatedList = new PaginatedList<>(values, values.size(), 1, 10);
final PaginatedResponse<String> response = PaginatedResponse.create("foo", paginatedList);
final DocumentContext ctx = JsonPath.parse(objectMapper.writeValueAsString(response));
final JsonPathAssert jsonPathAssert = JsonPathAssert.assertThat(ctx);
jsonPathAssert.jsonPathAsInteger("$.total").isEqualTo(2);
jsonPathAssert.jsonPathAsInteger("$.count").isEqualTo(2);
jsonPathAssert.jsonPathAsInteger("$.page").isEqualTo(1);
jsonPathAssert.jsonPathAsInteger("$.per_page").isEqualTo(10);
jsonPathAssert.jsonPathAsString("$.foo[0]").isEqualTo("hello");
jsonPathAssert.jsonPathAsString("$.foo[1]").isEqualTo("world");
assertThatThrownBy(() -> jsonPathAssert.jsonPathAsString("$.context")).isInstanceOf(PathNotFoundException.class);
} |
@Override
public int launch(AgentLaunchDescriptor descriptor) {
LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml");
return logConfigurator.runWithLogger(() -> doLaunch(descriptor));
} | @Test
@DisabledOnOs(OS.WINDOWS)
public void shouldDownload_AgentJar_IfTheCurrentJarIsStale() throws Exception {
TEST_AGENT_LAUNCHER.copyTo(AGENT_LAUNCHER_JAR);
File staleJar = randomFile(AGENT_BINARY_JAR);
long original = staleJar.length();
new AgentLauncherImpl().launch(launchDescriptor());
assertThat(staleJar.length(), not(original));
} |
public static void main(String[] args) throws IOException
{
File file = new File("src/main/resources/org/apache/pdfbox/examples/rendering/",
"custom-render-demo.pdf");
try (PDDocument doc = Loader.loadPDF(file))
{
PDFRenderer renderer = new MyPDFRenderer(doc);
BufferedImage image = renderer.renderImage(0);
ImageIO.write(image, "PNG", new File("target","custom-render.png"));
}
} | @Test
void testCustomPageDrawer() throws IOException
{
CustomPageDrawer.main(new String[]{});
BufferedImage bim = ImageIO.read(new File("target","custom-render.png"));
Assertions.assertNotNull(bim);
} |
@Override
public RemoteEnvironment createEnvironment(Environment environment, String workerId)
throws Exception {
Preconditions.checkState(
environment
.getUrn()
.equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.PROCESS)),
"The passed environment does not contain a ProcessPayload.");
final RunnerApi.ProcessPayload processPayload =
RunnerApi.ProcessPayload.parseFrom(environment.getPayload());
String executable = processPayload.getCommand();
String provisionEndpoint = provisioningServiceServer.getApiServiceDescriptor().getUrl();
String semiPersistDir = pipelineOptions.as(RemoteEnvironmentOptions.class).getSemiPersistDir();
ImmutableList.Builder<String> argsBuilder =
ImmutableList.<String>builder()
.add(String.format("--id=%s", workerId))
.add(String.format("--provision_endpoint=%s", provisionEndpoint));
if (semiPersistDir != null) {
argsBuilder.add(String.format("--semi_persist_dir=%s", semiPersistDir));
}
LOG.debug("Creating Process for worker ID {}", workerId);
// Wrap the blocking call to clientSource.get in case an exception is thrown.
InstructionRequestHandler instructionHandler = null;
try {
ProcessManager.RunningProcess process =
processManager.startProcess(
workerId, executable, argsBuilder.build(), processPayload.getEnvMap());
// Wait on a client from the gRPC server.
while (instructionHandler == null) {
try {
// If the process is not alive anymore, we abort.
process.isAliveOrThrow();
instructionHandler = clientSource.take(workerId, Duration.ofSeconds(5));
} catch (TimeoutException timeoutEx) {
LOG.info(
"Still waiting for startup of environment '{}' for worker id {}",
processPayload.getCommand(),
workerId);
} catch (InterruptedException interruptEx) {
Thread.currentThread().interrupt();
throw new RuntimeException(interruptEx);
}
}
} catch (Exception e) {
try {
processManager.stopProcess(workerId);
} catch (Exception processKillException) {
e.addSuppressed(processKillException);
}
throw e;
}
return ProcessEnvironment.create(processManager, environment, workerId, instructionHandler);
} | @Test
public void createsCorrectEnvironment() throws Exception {
RemoteEnvironment handle = factory.createEnvironment(ENVIRONMENT, "workerId");
assertThat(handle.getInstructionRequestHandler(), is(client));
assertThat(handle.getEnvironment(), equalTo(ENVIRONMENT));
Mockito.verify(processManager).startProcess(eq("workerId"), anyString(), anyList(), anyMap());
} |
@Override
protected ClickHouseLogCollectClient getLogConsumeClient() {
return LoggingClickHousePluginDataHandler.getClickHouseLogCollectClient();
} | @Test
public void testGetLogConsumeClient() {
LogConsumeClient logConsumeClient = new ClickHouseLogCollector().getLogConsumeClient();
Assertions.assertEquals(ClickHouseLogCollectClient.class, logConsumeClient.getClass());
} |
public StructType bind(StructType inputSchema) {
if (binding != null && binding.get().inputSchema == inputSchema) {
return binding.get().xschema;
}
Formula formula = expand(inputSchema);
Binding binding = new Binding();
binding.inputSchema = inputSchema;
List<Feature> features = Arrays.stream(formula.predictors)
.filter(predictor -> !(predictor instanceof Delete) && !(predictor instanceof Intercept))
.flatMap(predictor -> predictor.bind(inputSchema).stream())
.collect(Collectors.toList());
binding.x = features.toArray(new Feature[0]);
binding.xschema = DataTypes.struct(
features.stream()
.map(Feature::field)
.toArray(StructField[]::new)
);
if (response != null) {
try {
features.addAll(0, response.bind(inputSchema));
binding.yx = features.toArray(new Feature[0]);
binding.yxschema = DataTypes.struct(
features.stream()
.map(Feature::field)
.toArray(StructField[]::new)
);
} catch (IllegalArgumentException ex) {
logger.debug("The response variable {} doesn't exist in the schema {}", response, inputSchema);
}
}
this.binding = new ThreadLocal<>() {
protected synchronized Binding initialValue() {
return binding;
}
};
return binding.xschema;
} | @Test
public void testBind() {
System.out.println("bind");
Formula formula = Formula.of("revenue", dot(), cross("water", "sowing_density") , mul("humidity", "wind"), delete("wind"));
StructType inputSchema = DataTypes.struct(
new StructField("revenue", DataTypes.DoubleType, Measure.Currency),
new StructField("water", DataTypes.ByteType, new NominalScale("dry", "wet")),
new StructField("sowing_density", DataTypes.ByteType, new NominalScale("low", "high")),
new StructField("humidity", DataTypes.FloatType, Measure.Percent),
new StructField("wind", DataTypes.FloatType)
);
assertEquals("revenue ~ . + (water x sowing_density) + (humidity * wind) - wind", formula.toString());
System.out.println(formula.expand(inputSchema));
StructType outputSchema = formula.bind(inputSchema);
StructType schema = DataTypes.struct(
new StructField("humidity", DataTypes.FloatType, Measure.Percent),
new StructField("water", DataTypes.ByteType, new NominalScale("dry", "wet")),
new StructField("sowing_density", DataTypes.ByteType, new NominalScale("low", "high")),
new StructField("water:sowing_density", DataTypes.IntegerType, new NominalScale("dry:low", "dry:high", "wet:low", "wet:high")),
new StructField("humidity * wind", DataTypes.FloatType)
);
assertEquals(schema, outputSchema);
} |
@Override
public void updateDataSourceConfig(DataSourceConfigSaveReqVO updateReqVO) {
// 校验存在
validateDataSourceConfigExists(updateReqVO.getId());
DataSourceConfigDO updateObj = BeanUtils.toBean(updateReqVO, DataSourceConfigDO.class);
validateConnectionOK(updateObj);
// 更新
dataSourceConfigMapper.updateById(updateObj);
} | @Test
public void testUpdateDataSourceConfig_success() {
try (MockedStatic<JdbcUtils> databaseUtilsMock = mockStatic(JdbcUtils.class)) {
// mock 数据
DataSourceConfigDO dbDataSourceConfig = randomPojo(DataSourceConfigDO.class);
dataSourceConfigMapper.insert(dbDataSourceConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
DataSourceConfigSaveReqVO reqVO = randomPojo(DataSourceConfigSaveReqVO.class, o -> {
o.setId(dbDataSourceConfig.getId()); // 设置更新的 ID
});
// mock 方法
databaseUtilsMock.when(() -> JdbcUtils.isConnectionOK(eq(reqVO.getUrl()),
eq(reqVO.getUsername()), eq(reqVO.getPassword()))).thenReturn(true);
// 调用
dataSourceConfigService.updateDataSourceConfig(reqVO);
// 校验是否更新正确
DataSourceConfigDO dataSourceConfig = dataSourceConfigMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, dataSourceConfig);
}
} |
@Override
public Map<Errors, Integer> errorCounts() {
if (data.errorCode() != Errors.NONE.code())
// Minor optimization since the top-level error applies to all partitions
return Collections.singletonMap(error(), data.partitionErrors().size() + 1);
Map<Errors, Integer> errors = errorCounts(data.partitionErrors().stream().map(p -> Errors.forCode(p.errorCode())));
updateErrorCounts(errors, Errors.forCode(data.errorCode())); // top level error
return errors;
} | @Test
public void testErrorCountsFromGetErrorResponse() {
List<StopReplicaTopicState> topicStates = new ArrayList<>();
topicStates.add(new StopReplicaTopicState()
.setTopicName("foo")
.setPartitionStates(Arrays.asList(
new StopReplicaPartitionState().setPartitionIndex(0),
new StopReplicaPartitionState().setPartitionIndex(1))));
for (short version : STOP_REPLICA.allVersions()) {
StopReplicaRequest request = new StopReplicaRequest.Builder(version,
15, 20, 0, false, topicStates).build(version);
StopReplicaResponse response = request
.getErrorResponse(0, Errors.CLUSTER_AUTHORIZATION_FAILED.exception());
assertEquals(Collections.singletonMap(Errors.CLUSTER_AUTHORIZATION_FAILED, 3),
response.errorCounts());
}
} |
@VisibleForTesting
List<Image> getCachedBaseImages()
throws IOException, CacheCorruptedException, BadContainerConfigurationFormatException,
LayerCountMismatchException, UnlistedPlatformInManifestListException,
PlatformNotFoundInBaseImageException {
ImageReference baseImage = buildContext.getBaseImageConfiguration().getImage();
Optional<ImageMetadataTemplate> metadata =
buildContext.getBaseImageLayersCache().retrieveMetadata(baseImage);
if (!metadata.isPresent()) {
return Collections.emptyList();
}
ManifestTemplate manifestList = metadata.get().getManifestList();
List<ManifestAndConfigTemplate> manifestsAndConfigs = metadata.get().getManifestsAndConfigs();
if (manifestList == null) {
Verify.verify(manifestsAndConfigs.size() == 1);
ManifestAndConfigTemplate manifestAndConfig = manifestsAndConfigs.get(0);
Optional<Image> cachedImage = getBaseImageIfAllLayersCached(manifestAndConfig, true);
if (!cachedImage.isPresent()) {
return Collections.emptyList();
}
return Collections.singletonList(cachedImage.get());
}
// Manifest list cached. Identify matching platforms and check if all of them are cached.
ImmutableList.Builder<Image> images = ImmutableList.builder();
for (Platform platform : buildContext.getContainerConfiguration().getPlatforms()) {
String manifestDigest =
lookUpPlatformSpecificImageManifest((ManifestListTemplate) manifestList, platform);
Optional<ManifestAndConfigTemplate> manifestAndConfigFound =
manifestsAndConfigs.stream()
.filter(entry -> manifestDigest.equals(entry.getManifestDigest()))
.findFirst();
if (!manifestAndConfigFound.isPresent()) {
return Collections.emptyList();
}
Optional<Image> cachedImage =
getBaseImageIfAllLayersCached(manifestAndConfigFound.get(), false);
if (!cachedImage.isPresent()) {
return Collections.emptyList();
}
images.add(cachedImage.get());
}
return images.build();
} | @Test
public void testGetCachedBaseImages_emptyCache()
throws InvalidImageReferenceException, IOException, CacheCorruptedException,
UnlistedPlatformInManifestListException, PlatformNotFoundInBaseImageException,
BadContainerConfigurationFormatException, LayerCountMismatchException {
ImageReference imageReference = ImageReference.parse("cat");
Mockito.when(buildContext.getBaseImageConfiguration())
.thenReturn(ImageConfiguration.builder(imageReference).build());
Mockito.when(cache.retrieveMetadata(imageReference)).thenReturn(Optional.empty());
Assert.assertEquals(Arrays.asList(), pullBaseImageStep.getCachedBaseImages());
} |
@Override
public void onStateElection(Job job, JobState newState) {
if (isNotFailed(newState) || isJobNotFoundException(newState) || isProblematicExceptionAndMustNotRetry(newState) || maxAmountOfRetriesReached(job))
return;
job.scheduleAt(now().plusSeconds(getSecondsToAdd(job)), String.format("Retry %d of %d", getFailureCount(job), getMaxNumberOfRetries(job)));
} | @Test
void skipsIfStateIsNotFailed() {
final Job job = anEnqueuedJob().build();
applyDefaultJobFilter(job);
int beforeVersion = job.getJobStates().size();
retryFilter.onStateElection(job, job.getJobState());
int afterVersion = job.getJobStates().size();
assertThat(afterVersion).isEqualTo(beforeVersion);
assertThat(job.getState()).isEqualTo(ENQUEUED);
} |
@Override
public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) {
Objects.requireNonNull(intentOperationContext);
Optional<IntentData> toUninstall = intentOperationContext.toUninstall();
Optional<IntentData> toInstall = intentOperationContext.toInstall();
List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall();
List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall();
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
intentInstallCoordinator.intentInstallSuccess(intentOperationContext);
return;
}
if (toUninstall.isPresent()) {
IntentData intentData = toUninstall.get();
trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources());
uninstallIntents.forEach(installable ->
trackerService.removeTrackedResources(intentData.intent().key(),
installable.resources()));
}
if (toInstall.isPresent()) {
IntentData intentData = toInstall.get();
trackerService.addTrackedResources(intentData.key(), intentData.intent().resources());
installIntents.forEach(installable ->
trackerService.addTrackedResources(intentData.key(),
installable.resources()));
}
FlowObjectiveIntentInstallationContext intentInstallationContext =
new FlowObjectiveIntentInstallationContext(intentOperationContext);
uninstallIntents.stream()
.map(intent -> buildObjectiveContexts(intent, REMOVE))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addPendingContext(context);
});
installIntents.stream()
.map(intent -> buildObjectiveContexts(intent, ADD))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addNextPendingContext(context);
});
intentInstallationContext.apply();
} | @Test
public void testUninstallAndInstallIntent() {
List<Intent> intentsToUninstall = createFlowObjectiveIntents();
List<Intent> intentsToInstall = createAnotherFlowObjectiveIntents();
IntentData toInstall = new IntentData(createP2PIntent(),
IntentState.INSTALLING,
new WallClockTimestamp());
toInstall = IntentData.compiled(toInstall, intentsToInstall);
IntentData toUninstall = new IntentData(createP2PIntent(),
IntentState.INSTALLED,
new WallClockTimestamp());
toUninstall = IntentData.compiled(toUninstall, intentsToUninstall);
IntentOperationContext<FlowObjectiveIntent> operationContext;
IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall);
operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context);
installer.apply(operationContext);
IntentOperationContext successContext = intentInstallCoordinator.successContext;
assertEquals(successContext, operationContext);
} |
public static GrpcDataWriter create(FileSystemContext context, WorkerNetAddress address,
long id, long length, RequestType type, OutStreamOptions options)
throws IOException {
long chunkSize = context.getClusterConf()
.getBytes(PropertyKey.USER_STREAMING_WRITER_CHUNK_SIZE_BYTES);
CloseableResource<BlockWorkerClient> grpcClient = context.acquireBlockWorkerClient(address);
try {
return new GrpcDataWriter(context, address, id, length, chunkSize, type, options, grpcClient);
} catch (Exception e) {
grpcClient.close();
throw e;
}
} | @Test(timeout = 1000 * 60)
public void writeFileUnknownLength() throws Exception {
long checksumActual;
Future<Long> checksumExpected;
long length = CHUNK_SIZE * 1024;
try (DataWriter writer = create(Long.MAX_VALUE)) {
checksumExpected = writeFile(writer, length, 10, length / 3);
checksumExpected.get();
checksumActual = verifyWriteRequests(mClient, 10, length / 3);
}
assertEquals(checksumExpected.get().longValue(), checksumActual);
} |
static SerializableFunction<Double, Double> getResultUpdaterFunction(final RegressionModel.NormalizationMethod normalizationMethod) {
if (UNSUPPORTED_NORMALIZATION_METHODS.contains(normalizationMethod)) {
return null;
} else {
return getResultUpdaterSupportedFunction(normalizationMethod);
}
} | @Test
void getResultUpdaterUnsupportedFunction() {
UNSUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod ->
assertThat(KiePMMLRegressionTableFactory.getResultUpdaterFunction(normalizationMethod)).isNull());
} |
@ScalarOperator(LESS_THAN)
@SqlType(StandardTypes.BOOLEAN)
public static boolean lessThan(@SqlType("unknown") boolean left, @SqlType("unknown") boolean right)
{
throw new AssertionError("value of unknown type should all be NULL");
} | @Test
public void testLessThan()
{
assertFunction("NULL < NULL", BOOLEAN, null);
} |
public static Map<String, URI> uploadOutputFiles(RunContext runContext, Path outputDir) throws IOException {
// upload output files
Map<String, URI> uploaded = new HashMap<>();
try (Stream<Path> walk = Files.walk(outputDir)) {
walk
.filter(Files::isRegularFile)
.filter(path -> !path.startsWith("."))
.forEach(throwConsumer(path -> {
String filename = outputDir.relativize(path).toString();
uploaded.put(
filename,
runContext.storage().putFile(path.toFile(), filename)
);
}));
}
return uploaded;
} | @Test
void uploadOutputFiles() throws IOException {
var runContext = runContextFactory.of();
Path path = Path.of("/tmp/unittest/file.txt");
if (!path.toFile().exists()) {
Files.createFile(path);
}
var outputFiles = ScriptService.uploadOutputFiles(runContext, Path.of("/tmp/unittest"));
assertThat(outputFiles, not(anEmptyMap()));
assertThat(outputFiles.get("file.txt"), is(URI.create("kestra:///file.txt")));
path.toFile().delete();
} |
@Description("Binomial cdf given numberOfTrials, successProbability, and a value")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double binomialCdf(
@SqlType(StandardTypes.INTEGER) long numberOfTrials,
@SqlType(StandardTypes.DOUBLE) double successProbability,
@SqlType(StandardTypes.INTEGER) long value)
{
checkCondition(successProbability >= 0 && successProbability <= 1, INVALID_FUNCTION_ARGUMENT, "successProbability must be in the interval [0, 1]");
checkCondition(numberOfTrials > 0, INVALID_FUNCTION_ARGUMENT, "numberOfTrials must be greater than 0");
BinomialDistribution distribution = new BinomialDistribution(null, (int) numberOfTrials, successProbability);
return distribution.cumulativeProbability((int) value);
} | @Test
public void testBinomialCdf()
{
assertFunction("binomial_cdf(5, 0.5, 5)", DOUBLE, 1.0);
assertFunction("binomial_cdf(5, 0.5, 0)", DOUBLE, 0.03125);
assertFunction("binomial_cdf(5, 0.5, 3)", DOUBLE, 0.8125);
assertFunction("binomial_cdf(20, 1.0, 0)", DOUBLE, 0.0);
assertInvalidFunction("binomial_cdf(5, -0.5, 3)", "successProbability must be in the interval [0, 1]");
assertInvalidFunction("binomial_cdf(5, 1.5, 3)", "successProbability must be in the interval [0, 1]");
assertInvalidFunction("binomial_cdf(-5, 0.5, 3)", "numberOfTrials must be greater than 0");
} |
public static SqlToConnectTypeConverter sqlToConnectConverter() {
return SQL_TO_CONNECT_CONVERTER;
} | @Test
public void shouldConvertRegularStructFromSqlToConnect() {
// Given:
SqlStruct sqlStruct = SqlStruct.builder()
.field("foo", SqlPrimitiveType.of(SqlBaseType.STRING))
.field("bar", SqlPrimitiveType.of(SqlBaseType.BOOLEAN))
.field("baz", SqlPrimitiveType.of(SqlBaseType.INTEGER))
.build();
// When:
Schema connectSchema = sqlToConnectConverter().toConnectSchema(sqlStruct);
// Then:
assertThat(connectSchema.type(), is(Schema.Type.STRUCT));
assertNull(connectSchema.schema().name());
} |
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final PlanNode source = getSource();
final SchemaKStream<?> schemaKStream = source.buildStream(buildContext);
final QueryContext.Stacker contextStacker = buildContext.buildNodeContext(getId().toString());
return schemaKStream.into(
ksqlTopic,
contextStacker,
getTimestampColumn()
);
} | @Test
public void shouldBuildOutputNodeForInsertIntoAvroFromNonAvro() {
// Given:
givenInsertIntoNode();
KeyFormat.nonWindowed(
FormatInfo.of(
FormatFactory.AVRO.name(),
ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "key-name")
),
SerdeFeatures.of()
);
ValueFormat.of(
FormatInfo.of(
FormatFactory.AVRO.name(),
ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "name")
),
SerdeFeatures.of()
);
// When:
outputNode.buildStream(planBuildContext);
// Then:
verify(sourceStream).into(eq(ksqlTopic), any(), any());
} |
public void updateAll() throws InterruptedException {
LOGGER.debug("DAILY UPDATE ALL");
var extensions = repositories.findAllPublicIds();
var extensionPublicIdsMap = extensions.stream()
.filter(e -> StringUtils.isNotEmpty(e.getPublicId()))
.collect(Collectors.toMap(e -> e.getId(), e -> e.getPublicId()));
var namespacePublicIdsMap = extensions.stream()
.map(e -> e.getNamespace())
.filter(n -> StringUtils.isNotEmpty(n.getPublicId()))
.collect(Collectors.toMap(n -> n.getId(), n -> n.getPublicId(), (id1, id2) -> id1));
var upstreamExtensionPublicIds = new HashMap<Long, String>();
var upstreamNamespacePublicIds = new HashMap<Long, String>();
for(var extension : extensions) {
if(BuiltInExtensionUtil.isBuiltIn(extension)) {
LOGGER.trace("SKIP BUILT-IN EXTENSION {}", NamingUtil.toExtensionId(extension));
continue;
}
LOGGER.trace("GET UPSTREAM PUBLIC ID: {} | {}", extension.getId(), NamingUtil.toExtensionId(extension));
var publicIds = service.getUpstreamPublicIds(extension);
if(upstreamExtensionPublicIds.get(extension.getId()) == null) {
LOGGER.trace("ADD EXTENSION PUBLIC ID: {} - {}", extension.getId(), publicIds.extension());
upstreamExtensionPublicIds.put(extension.getId(), publicIds.extension());
}
var namespace = extension.getNamespace();
if(upstreamNamespacePublicIds.get(namespace.getId()) == null) {
LOGGER.trace("ADD NAMESPACE PUBLIC ID: {} - {}", namespace.getId(), publicIds.namespace());
upstreamNamespacePublicIds.put(namespace.getId(), publicIds.namespace());
}
}
var changedExtensionPublicIds = getChangedPublicIds(upstreamExtensionPublicIds, extensionPublicIdsMap);
LOGGER.debug("UPSTREAM EXTENSIONS: {}", upstreamExtensionPublicIds.size());
LOGGER.debug("CHANGED EXTENSIONS: {}", changedExtensionPublicIds.size());
if(!changedExtensionPublicIds.isEmpty()) {
LOGGER.debug("CHANGED EXTENSION PUBLIC IDS");
for(var entry : changedExtensionPublicIds.entrySet()) {
LOGGER.debug("{}: {}", entry.getKey(), entry.getValue());
}
repositories.updateExtensionPublicIds(changedExtensionPublicIds);
}
var changedNamespacePublicIds = getChangedPublicIds(upstreamNamespacePublicIds, namespacePublicIdsMap);
LOGGER.debug("UPSTREAM NAMESPACES: {}", upstreamNamespacePublicIds.size());
LOGGER.debug("CHANGED NAMESPACES: {}", changedNamespacePublicIds.size());
if(!changedNamespacePublicIds.isEmpty()) {
LOGGER.debug("CHANGED NAMESPACE PUBLIC IDS");
for(var entry : changedNamespacePublicIds.entrySet()) {
LOGGER.debug("{}: {}", entry.getKey(), entry.getValue());
}
repositories.updateNamespacePublicIds(changedNamespacePublicIds);
}
} | @Test
public void testUpdateAllChange() throws InterruptedException {
var namespaceName1 = "foo";
var namespacePublicId1 = UUID.randomUUID().toString();
var extensionName1 = "bar";
var extensionPublicId1 = UUID.randomUUID().toString();
var namespace1 = new Namespace();
namespace1.setId(1L);
namespace1.setName(namespaceName1);
namespace1.setPublicId(namespacePublicId1);
var extension1 = new Extension();
extension1.setId(2L);
extension1.setName(extensionName1);
extension1.setNamespace(namespace1);
extension1.setPublicId(extensionPublicId1);
var namespaceName2 = "baz";
var namespacePublicId2 = UUID.randomUUID().toString();
var extensionName2 = "foobar";
var extensionPublicId2 = UUID.randomUUID().toString();
var namespace2 = new Namespace();
namespace2.setId(3L);
namespace2.setName(namespaceName2);
namespace2.setPublicId(namespacePublicId2);
var extension2 = new Extension();
extension2.setId(4L);
extension2.setName(extensionName2);
extension2.setPublicId(extensionPublicId2);
extension2.setNamespace(namespace2);
var namespaceName3 = "baz2";
var namespacePublicId3 = UUID.randomUUID().toString();
var extensionName3 = "foobar2";
var extensionPublicId3 = UUID.randomUUID().toString();
var namespace3 = new Namespace();
namespace3.setId(5L);
namespace3.setName(namespaceName3);
namespace3.setPublicId(namespacePublicId3);
var extension3 = new Extension();
extension3.setId(6L);
extension3.setName(extensionName3);
extension3.setPublicId(extensionPublicId3);
extension3.setNamespace(namespace3);
Mockito.when(idService.getUpstreamPublicIds(extension1)).thenReturn(new PublicIds(null, null));
Mockito.when(idService.getUpstreamPublicIds(extension2)).thenReturn(new PublicIds(namespacePublicId3, extensionPublicId3));
Mockito.when(idService.getUpstreamPublicIds(extension3)).thenReturn(new PublicIds(null, null));
Mockito.when(repositories.findAllPublicIds()).thenReturn(List.of(extension1, extension2, extension3));
var extensionPublicId = UUID.randomUUID().toString();
var namespacePublicId = UUID.randomUUID().toString();
Mockito.when(idService.getRandomPublicId()).thenReturn(extensionPublicId, namespacePublicId);
updateService.updateAll();
Mockito.verify(repositories, Mockito.times(1)).updateExtensionPublicIds(Map.of(
extension2.getId(), extensionPublicId3,
extension3.getId(), extensionPublicId
));
Mockito.verify(repositories, Mockito.times(1)).updateNamespacePublicIds(Map.of(
namespace2.getId(), namespacePublicId3,
namespace3.getId(), namespacePublicId
));
} |
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
ConfigServer[] configServers = getConfigServers();
int[] zookeeperIds = getConfigServerZookeeperIds();
if (configServers.length != zookeeperIds.length) {
throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " +
"same as number of provided config server zookeeper ids (%d)",
configServers.length, zookeeperIds.length));
}
String myhostname = HostName.getLocalhost();
// TODO: Server index should be in interval [1, 254] according to doc,
// however, we cannot change this id for an existing server
for (int i = 0; i < configServers.length; i++) {
if (zookeeperIds[i] < 0) {
throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s",
zookeeperIds[i], configServers[i].hostName));
}
if (configServers[i].hostName.equals(myhostname)) {
builder.myid(zookeeperIds[i]);
}
builder.server(getZkServer(configServers[i], zookeeperIds[i]));
}
if (options.zookeeperClientPort().isPresent()) {
builder.clientPort(options.zookeeperClientPort().get());
}
if (options.hostedVespa().orElse(false)) {
builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json"));
}
boolean isHostedVespa = options.hostedVespa().orElse(false);
builder.dynamicReconfiguration(isHostedVespa);
builder.reconfigureEnsemble(!isHostedVespa);
builder.snapshotMethod(options.zooKeeperSnapshotMethod());
builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer());
} | @Test
void zookeeperConfig_only_config_servers_set_hosted() {
TestOptions testOptions = createTestOptions(List.of("cfg1", "localhost", "cfg3"), List.of());
ZookeeperServerConfig config = getConfig(ZookeeperServerConfig.class, testOptions);
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::hostname, "cfg1", "localhost", "cfg3");
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::id, 0, 1, 2);
assertEquals(1, config.myid());
assertEquals("gz", config.snapshotMethod());
assertEquals("/opt/vespa/var/zookeeper/conf/tls.conf.json", config.vespaTlsConfigFile());
} |
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> stream,
final StreamFilter<K> step,
final RuntimeBuildContext buildContext) {
return build(stream, step, buildContext, SqlPredicate::new);
} | @Test
public void shouldUseCorrectNameForProcessingLogger() {
// When:
step.build(planBuilder, planInfo);
// Then:
verify(buildContext).getProcessingLogger(queryContext);
} |
public static Field getDeclaredField(Class<?> clazz, String fieldName) throws SecurityException {
if (null == clazz || StrUtil.isBlank(fieldName)) {
return null;
}
try {
return clazz.getDeclaredField(fieldName);
} catch (NoSuchFieldException e) {
// e.printStackTrace();
}
return null;
} | @Test
public void getDeclaredField() {
Field noField = ClassUtil.getDeclaredField(TestSubClass.class, "noField");
assertNull(noField);
// 获取不到父类字段
Field field = ClassUtil.getDeclaredField(TestSubClass.class, "field");
assertNull(field);
Field subField = ClassUtil.getDeclaredField(TestSubClass.class, "subField");
assertNotNull(subField);
} |
@Deprecated
public static SegwitAddress fromBech32(@Nullable NetworkParameters params, String bech32)
throws AddressFormatException {
return (SegwitAddress) AddressParser.getLegacy(params).parseAddress(bech32);
} | @Test(expected = AddressFormatException.WrongNetwork.class)
public void fromBech32_wrongNetwork() {
SegwitAddress.fromBech32("bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj", TESTNET);
} |
public Map<MediaType, List<Parser>> findDuplicateParsers(ParseContext context) {
Map<MediaType, Parser> types = new HashMap<>();
Map<MediaType, List<Parser>> duplicates = new HashMap<>();
for (Parser parser : parsers) {
for (MediaType type : parser.getSupportedTypes(context)) {
MediaType canonicalType = registry.normalize(type);
if (types.containsKey(canonicalType)) {
List<Parser> list = duplicates.get(canonicalType);
if (list == null) {
list = new ArrayList<>();
list.add(types.get(canonicalType));
duplicates.put(canonicalType, list);
}
list.add(parser);
} else {
types.put(canonicalType, parser);
}
}
}
return duplicates;
} | @Test
@SuppressWarnings("serial")
public void testFindDuplicateParsers() {
Parser a = new EmptyParser() {
public Set<MediaType> getSupportedTypes(ParseContext context) {
return Collections.singleton(MediaType.TEXT_PLAIN);
}
};
Parser b = new EmptyParser() {
public Set<MediaType> getSupportedTypes(ParseContext context) {
return Collections.singleton(MediaType.TEXT_PLAIN);
}
};
Parser c = new EmptyParser() {
public Set<MediaType> getSupportedTypes(ParseContext context) {
return Collections.singleton(MediaType.OCTET_STREAM);
}
};
CompositeParser composite =
new CompositeParser(MediaTypeRegistry.getDefaultRegistry(), a, b, c);
Map<MediaType, List<Parser>> duplicates =
composite.findDuplicateParsers(new ParseContext());
assertEquals(1, duplicates.size());
List<Parser> parsers = duplicates.get(MediaType.TEXT_PLAIN);
assertNotNull(parsers);
assertEquals(2, parsers.size());
assertEquals(a, parsers.get(0));
assertEquals(b, parsers.get(1));
} |
@Override
public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) {
// 1.1 校验积分功能是否开启
int givePointPerYuan = Optional.ofNullable(memberConfigApi.getConfig())
.filter(config -> BooleanUtil.isTrue(config.getPointTradeDeductEnable()))
.map(MemberConfigRespDTO::getPointTradeGivePoint)
.orElse(0);
if (givePointPerYuan <= 0) {
return;
}
// 1.2 校验支付金额
if (result.getPrice().getPayPrice() <= 0) {
return;
}
// 2.1 计算赠送积分
int givePoint = MoneyUtils.calculateRatePriceFloor(result.getPrice().getPayPrice(), (double) givePointPerYuan);
// 2.2 计算分摊的赠送积分
List<TradePriceCalculateRespBO.OrderItem> orderItems = filterList(result.getItems(), TradePriceCalculateRespBO.OrderItem::getSelected);
List<Integer> dividePoints = TradePriceCalculatorHelper.dividePrice(orderItems, givePoint);
// 3.2 更新 SKU 赠送积分
for (int i = 0; i < orderItems.size(); i++) {
TradePriceCalculateRespBO.OrderItem orderItem = orderItems.get(i);
// 商品可能赠送了积分,所以这里要加上
orderItem.setGivePoint(orderItem.getGivePoint() + dividePoints.get(i));
}
// 3.3 更新订单赠送积分
TradePriceCalculatorHelper.recountAllGivePoint(result);
} | @Test
public void testCalculate() {
// 准备参数
TradePriceCalculateReqBO param = new TradePriceCalculateReqBO()
.setUserId(233L)
.setItems(asList(
new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 全局积分
new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(true), // 全局积分 + SKU 积分
new TradePriceCalculateReqBO.Item().setSkuId(30L).setCount(4).setSelected(false), // 全局积分,但是未选中
new TradePriceCalculateReqBO.Item().setSkuId(40L).setCount(5).setSelected(false) // 全局积分 + SKU 积分,但是未选中
));
TradePriceCalculateRespBO result = new TradePriceCalculateRespBO()
.setType(TradeOrderTypeEnum.NORMAL.getType())
.setPrice(new TradePriceCalculateRespBO.Price())
.setPromotions(new ArrayList<>())
.setItems(asList(
new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true)
.setPrice(100).setSpuId(1L).setGivePoint(0),
new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(true)
.setPrice(50).setSpuId(2L).setGivePoint(100),
new TradePriceCalculateRespBO.OrderItem().setSkuId(30L).setCount(4).setSelected(false)
.setPrice(30).setSpuId(3L).setGivePoint(0),
new TradePriceCalculateRespBO.OrderItem().setSkuId(40L).setCount(5).setSelected(false)
.setPrice(60).setSpuId(1L).setGivePoint(100)
));
// 保证价格被初始化上
TradePriceCalculatorHelper.recountPayPrice(result.getItems());
TradePriceCalculatorHelper.recountAllPrice(result);
// mock 方法(积分配置 信息)
MemberConfigRespDTO memberConfig = randomPojo(MemberConfigRespDTO.class,
o -> o.setPointTradeDeductEnable(true) // 启用积分折扣
.setPointTradeGivePoint(100)); // 1 元赠送多少分
when(memberConfigApi.getConfig()).thenReturn(memberConfig);
// 调用
tradePointGiveCalculator.calculate(param, result);
// 断言:Price 部分
assertEquals(result.getGivePoint(), 2 * 100 + 3 * 50 + 100);
// 断言:SKU 1
TradePriceCalculateRespBO.OrderItem orderItem01 = result.getItems().get(0);
assertEquals(orderItem01.getSkuId(), 10L);
assertEquals(orderItem01.getCount(), 2);
assertEquals(orderItem01.getPrice(), 100);
assertEquals(orderItem01.getGivePoint(), 2 * 100); // 全局积分
// 断言:SKU 2
TradePriceCalculateRespBO.OrderItem orderItem02 = result.getItems().get(1);
assertEquals(orderItem02.getSkuId(), 20L);
assertEquals(orderItem02.getCount(), 3);
assertEquals(orderItem02.getPrice(), 50);
assertEquals(orderItem02.getGivePoint(), 3 * 50 + 100); // 全局积分 + SKU 积分
// 断言:SKU 3
TradePriceCalculateRespBO.OrderItem orderItem03 = result.getItems().get(2);
assertEquals(orderItem03.getSkuId(), 30L);
assertEquals(orderItem03.getCount(), 4);
assertEquals(orderItem03.getPrice(), 30);
assertEquals(orderItem03.getGivePoint(), 0); // 全局积分,但是未选中
// 断言:SKU 4
TradePriceCalculateRespBO.OrderItem orderItem04 = result.getItems().get(3);
assertEquals(orderItem04.getSkuId(), 40L);
assertEquals(orderItem04.getCount(), 5);
assertEquals(orderItem04.getPrice(), 60);
assertEquals(orderItem04.getGivePoint(), 100); // 全局积分 + SKU 积分,但是未选中
} |
@Override
public TransferStatus prepare(final Path file, final Local local, final TransferStatus parent, final ProgressListener progress) throws BackgroundException {
final TransferStatus status = super.prepare(file, local, parent, progress);
if(status.isSegmented()) {
for(TransferStatus segmentStatus : status.getSegments()) {
final Local segmentFile = segmentStatus.getRename().local;
if(segmentFile.exists()) {
if(log.isInfoEnabled()) {
log.info(String.format("Determine if part %s can be skipped", segmentStatus));
}
if(segmentFile.attributes().getSize() == segmentStatus.getLength()) {
segmentStatus.setComplete();
status.setLength(status.getLength() - segmentStatus.getLength());
status.setOffset(status.getOffset() + segmentStatus.getLength());
}
}
}
}
else {
if(download.offset(file)) {
if(local.isFile()) {
if(local.exists()) {
if(local.attributes().getSize() > 0) {
status.setAppend(true);
status.setLength(status.getLength() - local.attributes().getSize());
status.setOffset(status.getOffset() + local.attributes().getSize());
status.withRename((Local) null);
if(status.getLength() == 0L) {
status.setComplete();
}
}
}
}
}
}
return status;
} | @Test
public void testPrepareDirectoryExistsFalse() throws Exception {
final Host host = new Host(new TestProtocol());
final NullSession session = new NullTransferSession(host);
ResumeFilter f = new ResumeFilter(new DisabledDownloadSymlinkResolver(), session,
new DownloadFilterOptions(host), new DefaultDownloadFeature(session.getFeature(Read.class)) {
@Override
public boolean offset(final Path file) {
return true;
}
});
Path p = new Path("a", EnumSet.of(Path.Type.directory));
final NullLocal local = new NullLocal("a") {
@Override
public boolean exists() {
return false;
}
};
final TransferStatus status = f.prepare(p, local, new TransferStatus(), new DisabledProgressListener());
assertFalse(status.isAppend());
} |
@VisibleForTesting
static IssueCache.Issue toProto(IssueCache.Issue.Builder builder, DefaultIssue defaultIssue) {
builder.clear();
builder.setKey(defaultIssue.key());
builder.setRuleType(defaultIssue.type().getDbConstant());
ofNullable(defaultIssue.getCleanCodeAttribute()).ifPresent(value -> builder.setCleanCodeAttribute(value.name()));
ofNullable(defaultIssue.componentUuid()).ifPresent(builder::setComponentUuid);
builder.setComponentKey(defaultIssue.componentKey());
builder.setProjectUuid(defaultIssue.projectUuid());
builder.setProjectKey(defaultIssue.projectKey());
builder.setRuleKey(defaultIssue.ruleKey().toString());
ofNullable(defaultIssue.language()).ifPresent(builder::setLanguage);
ofNullable(defaultIssue.severity()).ifPresent(builder::setSeverity);
builder.setManualSeverity(defaultIssue.manualSeverity());
ofNullable(defaultIssue.message()).ifPresent(builder::setMessage);
ofNullable(defaultIssue.getMessageFormattings()).ifPresent(m -> builder.setMessageFormattings((DbIssues.MessageFormattings) m));
ofNullable(defaultIssue.line()).ifPresent(builder::setLine);
ofNullable(defaultIssue.gap()).ifPresent(builder::setGap);
ofNullable(defaultIssue.effort()).map(Duration::toMinutes).ifPresent(builder::setEffort);
builder.setStatus(defaultIssue.status());
ofNullable(defaultIssue.resolution()).ifPresent(builder::setResolution);
ofNullable(defaultIssue.assignee()).ifPresent(builder::setAssigneeUuid);
ofNullable(defaultIssue.assigneeLogin()).ifPresent(builder::setAssigneeLogin);
ofNullable(defaultIssue.checksum()).ifPresent(builder::setChecksum);
ofNullable(defaultIssue.authorLogin()).ifPresent(builder::setAuthorLogin);
defaultIssue.defaultIssueComments().forEach(c -> builder.addComments(toProtoComment(c)));
ofNullable(defaultIssue.tags()).ifPresent(t -> builder.setTags(String.join(TAGS_SEPARATOR, t)));
ofNullable(defaultIssue.codeVariants()).ifPresent(codeVariant -> builder.setCodeVariants(String.join(TAGS_SEPARATOR, codeVariant)));
ofNullable(defaultIssue.getLocations()).ifPresent(l -> builder.setLocations((DbIssues.Locations) l));
defaultIssue.getRuleDescriptionContextKey().ifPresent(builder::setRuleDescriptionContextKey);
builder.setIsFromExternalRuleEngine(defaultIssue.isFromExternalRuleEngine());
builder.setCreationDate(defaultIssue.creationDate().getTime());
ofNullable(defaultIssue.updateDate()).map(Date::getTime).ifPresent(builder::setUpdateDate);
ofNullable(defaultIssue.closeDate()).map(Date::getTime).ifPresent(builder::setCloseDate);
ofNullable(defaultIssue.currentChange()).ifPresent(c -> builder.setCurrentChanges(toProtoIssueChanges(c)));
builder.setIsNew(defaultIssue.isNew());
builder.setIsOnChangedLine(defaultIssue.isOnChangedLine());
builder.setIsPrioritizedRule(defaultIssue.isPrioritizedRule());
builder.setIsNewCodeReferenceIssue(defaultIssue.isNewCodeReferenceIssue());
builder.setIsCopied(defaultIssue.isCopied());
builder.setBeingClosed(defaultIssue.isBeingClosed());
builder.setOnDisabledRule(defaultIssue.isOnDisabledRule());
builder.setIsChanged(defaultIssue.isChanged());
builder.setSendNotifications(defaultIssue.mustSendNotifications());
ofNullable(defaultIssue.selectedAt()).ifPresent(builder::setSelectedAt);
builder.setQuickFixAvailable(defaultIssue.isQuickFixAvailable());
builder.setIsNoLongerNewCodeReferenceIssue(defaultIssue.isNoLongerNewCodeReferenceIssue());
defaultIssue.getAnticipatedTransitionUuid().ifPresent(builder::setAnticipatedTransitionUuid);
for (Map.Entry<SoftwareQuality, Severity> impact : defaultIssue.impacts().entrySet()) {
builder.addImpacts(IssueCache.Impact.newBuilder()
.setSoftwareQuality(impact.getKey().name())
.setSeverity(impact.getValue().name())
.build());
}
for (FieldDiffs fieldDiffs : defaultIssue.changes()) {
builder.addChanges(toProtoIssueChanges(fieldDiffs));
}
return builder.build();
} | @Test
public void toProto_whenRuleDescriptionContextKeyIsSet_shouldCopyToIssueProto() {
DefaultIssue defaultIssue = createDefaultIssueWithMandatoryFields();
defaultIssue.addImpact(SoftwareQuality.MAINTAINABILITY, Severity.HIGH);
defaultIssue.addImpact(SoftwareQuality.RELIABILITY, Severity.LOW);
IssueCache.Issue issue = ProtobufIssueDiskCache.toProto(IssueCache.Issue.newBuilder(), defaultIssue);
assertThat(issue.getImpactsList()).containsExactly(
toImpact(SoftwareQuality.MAINTAINABILITY, Severity.HIGH),
toImpact(SoftwareQuality.RELIABILITY, Severity.LOW)
);
} |
@Override
public int count(String term) {
MutableInt count = freq.get(term);
return count == null ? 0 : count.value;
} | @Test
public void testGetTermFrequency() {
System.out.println("getTermFrequency");
assertEquals(27, corpus.count("romantic"));
} |
public static int computeMaxLEPower2(int num) {
num |= (num >>> 1);
num |= (num >>> 2);
num |= (num >>> 4);
num |= (num >>> 8);
num |= (num >>> 16);
return num - (num >>> 1);
} | @Test
public void testComputeMaxLEPower2() {
Assert.assertEquals(0, Utils.computeMaxLEPower2(0));
for (int i = 1; i < 10000; i++) {
int out = Utils.computeMaxLEPower2(i);
// The number i belongs to the range [out, out*2).
Assert.assertTrue(out <= i);
Assert.assertTrue(out * 2 > i);
}
} |
@VisibleForTesting
public static int getNumericPrecision(DataType dataType) {
if (dataType.is(DataTypeFamily.EXACT_NUMERIC)) {
if (dataType.is(DataTypeRoot.TINYINT)) {
return 3;
} else if (dataType.is(DataTypeRoot.SMALLINT)) {
return 5;
} else if (dataType.is(DataTypeRoot.INTEGER)) {
return 10;
} else if (dataType.is(DataTypeRoot.BIGINT)) {
return 19;
} else if (dataType.is(DataTypeRoot.DECIMAL)) {
return ((DecimalType) dataType).getPrecision();
}
}
throw new IllegalArgumentException(
"Failed to get precision of non-exact decimal type " + dataType);
} | @Test
public void testGetNumericPrecision() {
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.TINYINT())).isEqualTo(3);
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.SMALLINT())).isEqualTo(5);
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.INT())).isEqualTo(10);
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.BIGINT())).isEqualTo(19);
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.DECIMAL(10, 2)))
.isEqualTo(10);
Assertions.assertThat(SchemaUtils.getNumericPrecision(DataTypes.DECIMAL(17, 0)))
.isEqualTo(17);
Assertions.assertThatThrownBy(() -> SchemaUtils.getNumericPrecision(DataTypes.STRING()))
.isExactlyInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Failed to get precision of non-exact decimal type");
} |
@Override
@CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateNotifyTemplate(NotifyTemplateSaveReqVO updateReqVO) {
// 校验存在
validateNotifyTemplateExists(updateReqVO.getId());
// 校验站内信编码是否重复
validateNotifyTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 更新
NotifyTemplateDO updateObj = BeanUtils.toBean(updateReqVO, NotifyTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
notifyTemplateMapper.updateById(updateObj);
} | @Test
public void testUpdateNotifyTemplate_success() {
// mock 数据
NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class);
notifyTemplateMapper.insert(dbNotifyTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class, o -> {
o.setId(dbNotifyTemplate.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
});
// 调用
notifyTemplateService.updateNotifyTemplate(reqVO);
// 校验是否更新正确
NotifyTemplateDO notifyTemplate = notifyTemplateMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, notifyTemplate);
} |
public static boolean isPrimitives(Class<?> cls) {
while (cls.isArray()) {
cls = cls.getComponentType();
}
return isPrimitive(cls);
} | @Test
void testIsPrimitives() {
assertTrue(ReflectUtils.isPrimitives(boolean[].class));
assertTrue(ReflectUtils.isPrimitives(byte.class));
assertFalse(ReflectUtils.isPrimitive(Map[].class));
} |
@Override
List<DiscoveryNode> resolveNodes() {
if (serviceName != null && !serviceName.isEmpty()) {
logger.fine("Using service name to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByName(serviceName));
} else if (serviceLabel != null && !serviceLabel.isEmpty()) {
logger.fine("Using service label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByServiceLabel(serviceLabel, serviceLabelValue));
} else if (podLabel != null && !podLabel.isEmpty()) {
logger.fine("Using pod label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByPodLabel(podLabel, podLabelValue));
}
return getSimpleDiscoveryNodes(client.endpoints());
} | @Test
public void resolveWithServiceLabelWhenNodeWithServiceLabel() {
// given
List<Endpoint> endpoints = createEndpoints(2);
given(client.endpointsByServiceLabel(SERVICE_LABEL, SERVICE_LABEL_VALUE)).willReturn(endpoints);
KubernetesApiEndpointResolver sut = new KubernetesApiEndpointResolver(LOGGER, null, 0, SERVICE_LABEL, SERVICE_LABEL_VALUE,
null, null, null, client);
// when
List<DiscoveryNode> nodes = sut.resolveNodes();
// then
assertEquals(1, nodes.size());
assertEquals(2, nodes.get(0).getPrivateAddress().getPort());
} |
@SuppressWarnings("unchecked")
@Override
protected List<OUT> executeOnCollections(
List<IN1> inputData1,
List<IN2> inputData2,
RuntimeContext runtimeContext,
ExecutionConfig executionConfig)
throws Exception {
FlatJoinFunction<IN1, IN2, OUT> function = userFunction.getUserCodeObject();
FunctionUtils.setFunctionRuntimeContext(function, runtimeContext);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
TypeInformation<IN1> leftInformation = getOperatorInfo().getFirstInputType();
TypeInformation<IN2> rightInformation = getOperatorInfo().getSecondInputType();
TypeInformation<OUT> outInformation = getOperatorInfo().getOutputType();
TypeSerializer<IN1> leftSerializer =
leftInformation.createSerializer(executionConfig.getSerializerConfig());
TypeSerializer<IN2> rightSerializer =
rightInformation.createSerializer(executionConfig.getSerializerConfig());
TypeComparator<IN1> leftComparator;
TypeComparator<IN2> rightComparator;
if (leftInformation instanceof AtomicType) {
leftComparator =
((AtomicType<IN1>) leftInformation).createComparator(true, executionConfig);
} else if (leftInformation instanceof CompositeType) {
int[] keyPositions = getKeyColumns(0);
boolean[] orders = new boolean[keyPositions.length];
Arrays.fill(orders, true);
leftComparator =
((CompositeType<IN1>) leftInformation)
.createComparator(keyPositions, orders, 0, executionConfig);
} else {
throw new RuntimeException(
"Type information for left input of type "
+ leftInformation.getClass().getCanonicalName()
+ " is not supported. Could not generate a comparator.");
}
if (rightInformation instanceof AtomicType) {
rightComparator =
((AtomicType<IN2>) rightInformation).createComparator(true, executionConfig);
} else if (rightInformation instanceof CompositeType) {
int[] keyPositions = getKeyColumns(1);
boolean[] orders = new boolean[keyPositions.length];
Arrays.fill(orders, true);
rightComparator =
((CompositeType<IN2>) rightInformation)
.createComparator(keyPositions, orders, 0, executionConfig);
} else {
throw new RuntimeException(
"Type information for right input of type "
+ rightInformation.getClass().getCanonicalName()
+ " is not supported. Could not generate a comparator.");
}
TypePairComparator<IN1, IN2> pairComparator =
new GenericPairComparator<IN1, IN2>(leftComparator, rightComparator);
List<OUT> result = new ArrayList<OUT>();
Collector<OUT> collector =
new CopyingListCollector<OUT>(
result,
outInformation.createSerializer(executionConfig.getSerializerConfig()));
Map<Integer, List<IN2>> probeTable = new HashMap<Integer, List<IN2>>();
// Build hash table
for (IN2 element : inputData2) {
List<IN2> list = probeTable.get(rightComparator.hash(element));
if (list == null) {
list = new ArrayList<IN2>();
probeTable.put(rightComparator.hash(element), list);
}
list.add(element);
}
// Probing
for (IN1 left : inputData1) {
List<IN2> matchingHashes = probeTable.get(leftComparator.hash(left));
if (matchingHashes != null) {
pairComparator.setReference(left);
for (IN2 right : matchingHashes) {
if (pairComparator.equalToReference(right)) {
function.join(
leftSerializer.copy(left), rightSerializer.copy(right), collector);
}
}
}
}
FunctionUtils.closeFunction(function);
return result;
} | @Test
void testJoinRich() throws Exception {
final AtomicBoolean opened = new AtomicBoolean(false);
final AtomicBoolean closed = new AtomicBoolean(false);
final String taskName = "Test rich join function";
final RichFlatJoinFunction<String, String, Integer> joiner =
new RichFlatJoinFunction<String, String, Integer>() {
@Override
public void open(OpenContext openContext) {
opened.compareAndSet(false, true);
assertThat(getRuntimeContext().getTaskInfo().getIndexOfThisSubtask())
.isZero();
assertThat(getRuntimeContext().getTaskInfo().getNumberOfParallelSubtasks())
.isOne();
}
@Override
public void close() {
closed.compareAndSet(false, true);
}
@Override
public void join(String first, String second, Collector<Integer> out) {
out.collect(first.length());
out.collect(second.length());
}
};
InnerJoinOperatorBase<
String, String, Integer, RichFlatJoinFunction<String, String, Integer>>
base =
new InnerJoinOperatorBase<>(
joiner,
new BinaryOperatorInformation<>(
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.INT_TYPE_INFO),
new int[0],
new int[0],
taskName);
final List<String> inputData1 = new ArrayList<>(Arrays.asList("foo", "bar", "foobar"));
final List<String> inputData2 = new ArrayList<>(Arrays.asList("foobar", "foo"));
final List<Integer> expected = new ArrayList<>(Arrays.asList(3, 3, 6, 6));
final TaskInfo taskInfo = new TaskInfoImpl(taskName, 1, 0, 1, 0);
final HashMap<String, Accumulator<?, ?>> accumulatorMap = new HashMap<>();
final HashMap<String, Future<Path>> cpTasks = new HashMap<>();
ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.disableObjectReuse();
List<Integer> resultSafe =
base.executeOnCollections(
inputData1,
inputData2,
new RuntimeUDFContext(
taskInfo,
null,
executionConfig,
cpTasks,
accumulatorMap,
UnregisteredMetricsGroup.createOperatorMetricGroup()),
executionConfig);
executionConfig.enableObjectReuse();
List<Integer> resultRegular =
base.executeOnCollections(
inputData1,
inputData2,
new RuntimeUDFContext(
taskInfo,
null,
executionConfig,
cpTasks,
accumulatorMap,
UnregisteredMetricsGroup.createOperatorMetricGroup()),
executionConfig);
assertThat(resultSafe).isEqualTo(expected);
assertThat(resultRegular).isEqualTo(expected);
assertThat(opened).isTrue();
assertThat(closed).isTrue();
} |
public static boolean validatePlugin(PluginLookup.PluginType type, Class<?> pluginClass) {
switch (type) {
case INPUT:
return containsAllMethods(inputMethods, pluginClass.getMethods());
case FILTER:
return containsAllMethods(filterMethods, pluginClass.getMethods());
case CODEC:
return containsAllMethods(codecMethods, pluginClass.getMethods());
case OUTPUT:
return containsAllMethods(outputMethods, pluginClass.getMethods());
default:
throw new IllegalStateException("Unknown plugin type for validation: " + type);
}
} | @Test
public void testValidOutputPlugin() {
Assert.assertTrue(PluginValidator.validatePlugin(PluginLookup.PluginType.OUTPUT, Stdout.class));
} |
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) {
return new CreateStreamCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(),
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldHandleValueAvroSchemaNameForStream() {
// Given:
givenCommandFactoriesWithMocks();
givenProperty("VALUE_FORMAT", new StringLiteral("Avro"));
givenProperty("value_avro_schema_full_name", new StringLiteral("full.schema.name"));
final CreateStream statement = new CreateStream(SOME_NAME, ONE_KEY_ONE_VALUE, false, true,
withProperties, false);
// When:
final CreateStreamCommand cmd = createSourceFactory.createStreamCommand(
statement,
ksqlConfig
);
// Then:
assertThat(
cmd.getFormats().getValueFormat(),
is(FormatInfo.of(AVRO.name(), ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "full.schema.name"))));
} |
static MethodWrapper none() {
return new MethodWrapper(null, false);
} | @Test
public void testNone() {
MethodWrapper none = MethodWrapper.none();
assertThat(none.isPresent()).isFalse();
assertThat(none.getMethod()).isNull();
} |
public static <T extends TypedSPI> Optional<T> findService(final Class<T> serviceInterface, final Object type) {
return findService(serviceInterface, type, new Properties());
} | @Test
void assertFindServiceWithProperties() {
assertTrue(TypedSPILoader.findService(TypedSPIFixture.class, "TYPED.FIXTURE", new Properties()).isPresent());
} |
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs) throws KafkaException {
if (sslEngineFactory != null) {
throw new IllegalStateException("SslFactory was already configured.");
}
this.endpointIdentification = (String) configs.get(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG);
// The input map must be a mutable RecordingMap in production.
Map<String, Object> nextConfigs = (Map<String, Object>) configs;
if (clientAuthConfigOverride != null) {
nextConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, clientAuthConfigOverride);
}
SslEngineFactory builder = instantiateSslEngineFactory(nextConfigs);
if (keystoreVerifiableUsingTruststore) {
try {
SslEngineValidator.validate(builder, builder);
} catch (Exception e) {
throw new ConfigException("A client SSLEngine created with the provided settings " +
"can't connect to a server SSLEngine created with those settings.", e);
}
}
this.sslEngineFactory = builder;
} | @Test
public void testSslFactoryWithoutPasswordConfiguration() throws Exception {
File trustStoreFile = TestUtils.tempFile("truststore", ".jks");
Map<String, Object> serverSslConfig = sslConfigsBuilder(ConnectionMode.SERVER)
.createNewTrustStore(trustStoreFile)
.build();
// unset the password
serverSslConfig.remove(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG);
SslFactory sslFactory = new SslFactory(ConnectionMode.SERVER);
try {
sslFactory.configure(serverSslConfig);
} catch (Exception e) {
fail("An exception was thrown when configuring the truststore without a password: " + e);
}
} |
@Override
public ValidationFailure addFailure(String message, @Nullable String correctiveAction) {
ValidationFailure validationFailure = new ValidationFailure(message, correctiveAction);
failuresCollection.add(validationFailure);
return validationFailure;
} | @Test
public void addFailure() {
/** arrange */
FailureCollectorWrapper failureCollectorWrapper = new FailureCollectorWrapper();
/** act */
RuntimeException error = new RuntimeException("An error has occurred");
failureCollectorWrapper.addFailure(error.getMessage(), null);
/** assert */
assertThrows(ValidationException.class, () -> failureCollectorWrapper.getOrThrowException());
} |
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStart,
final Range<Instant> windowEnd,
final Optional<Position> position
) {
try {
final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result =
stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it =
queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next();
final Window wnd = next.key.window();
if (!windowStart.contains(wnd.startTime())) {
continue;
}
if (!windowEnd.contains(wnd.endTime())) {
continue;
}
final long rowTime = wnd.end();
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
next.key,
next.value,
rowTime
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldReturnMultipleSessions() {
// Given:
givenSingleSession(LOWER_INSTANT.minusMillis(1), LOWER_INSTANT.plusSeconds(1));
final Instant wend0 = LOWER_INSTANT;
givenSingleSession(LOWER_INSTANT, wend0);
final Instant wend1 = UPPER_INSTANT;
givenSingleSession(UPPER_INSTANT, wend1);
givenSingleSession(UPPER_INSTANT.plusMillis(1), UPPER_INSTANT.plusSeconds(1));
// When:
final KsMaterializedQueryResult<WindowedRow> result =
table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS);
// Then:
final Iterator<WindowedRow> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
final List<WindowedRow> resultList = Lists.newArrayList(rowIterator);
assertThat(resultList, contains(
WindowedRow.of(
SCHEMA,
sessionKey(LOWER_INSTANT, wend0),
A_VALUE,
wend0.toEpochMilli()
),
WindowedRow.of(
SCHEMA,
sessionKey(UPPER_INSTANT, wend1),
A_VALUE,
wend1.toEpochMilli()
)
));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
} |
@Override
public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> env, Map<String, String> context) {
int exitValue = 0;
LOG.info("Agent launcher is version: {}", CurrentGoCDVersion.getInstance().fullVersion());
String[] command = new String[]{};
try {
AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context);
ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
agentDownloader.downloadIfNecessary(DownloadableFile.AGENT);
ServerBinaryDownloader pluginZipDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
pluginZipDownloader.downloadIfNecessary(DownloadableFile.AGENT_PLUGINS);
ServerBinaryDownloader tfsImplDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
tfsImplDownloader.downloadIfNecessary(DownloadableFile.TFS_IMPL);
command = agentInvocationCommand(agentDownloader.getMd5(), launcherMd5, pluginZipDownloader.getMd5(), tfsImplDownloader.getMd5(),
env, context, agentDownloader.getExtraProperties());
LOG.info("Launching Agent with command: {}", join(command, " "));
Process agent = invoke(command);
// The next lines prevent the child process from blocking on Windows
AgentOutputAppender agentOutputAppenderForStdErr = new AgentOutputAppender(GO_AGENT_STDERR_LOG);
AgentOutputAppender agentOutputAppenderForStdOut = new AgentOutputAppender(GO_AGENT_STDOUT_LOG);
if (new SystemEnvironment().consoleOutToStdout()) {
agentOutputAppenderForStdErr.writeTo(AgentOutputAppender.Outstream.STDERR);
agentOutputAppenderForStdOut.writeTo(AgentOutputAppender.Outstream.STDOUT);
}
agent.getOutputStream().close();
AgentConsoleLogThread stdErrThd = new AgentConsoleLogThread(agent.getErrorStream(), agentOutputAppenderForStdErr);
stdErrThd.start();
AgentConsoleLogThread stdOutThd = new AgentConsoleLogThread(agent.getInputStream(), agentOutputAppenderForStdOut);
stdOutThd.start();
Shutdown shutdownHook = new Shutdown(agent);
Runtime.getRuntime().addShutdownHook(shutdownHook);
try {
exitValue = agent.waitFor();
} catch (InterruptedException ie) {
LOG.error("Agent was interrupted. Terminating agent and respawning. {}", ie.toString());
agent.destroy();
} finally {
removeShutdownHook(shutdownHook);
stdErrThd.stopAndJoin();
stdOutThd.stopAndJoin();
}
} catch (Exception e) {
LOG.error("Exception while executing command: {} - {}", join(command, " "), e.toString());
exitValue = EXCEPTION_OCCURRED;
}
return exitValue;
} | @Test
public void shouldAddSSLConfigurationIfProvided() throws InterruptedException {
final List<String> cmd = new ArrayList<>();
String expectedAgentMd5 = TEST_AGENT.getMd5();
String expectedAgentPluginsMd5 = TEST_AGENT_PLUGINS.getMd5();
String expectedTfsMd5 = TEST_TFS_IMPL.getMd5();
Map<String, String> context = context();
context.put(AgentBootstrapperArgs.PRIVATE_KEY, "/path/to/private.key");
context.put(AgentBootstrapperArgs.PRIVATE_KEY_PASSPHRASE_FILE, "/path/to/private_key_passphrase.key");
context.put(AgentBootstrapperArgs.SSL_CERTIFICATE, "/path/to/ssl_certificate.pem");
AgentProcessParentImpl bootstrapper = createBootstrapper(cmd);
int returnCode = bootstrapper.run("launcher_version", "bar", getURLGenerator(), new HashMap<>(), context);
assertThat(returnCode, is(42));
assertThat(cmd.toArray(new String[]{}), equalTo(new String[]{
(getProperty("java.home") + FileSystems.getDefault().getSeparator() + "bin" + FileSystems.getDefault().getSeparator() + "java"),
"-Dagent.plugins.md5=" + expectedAgentPluginsMd5,
"-Dagent.binary.md5=" + expectedAgentMd5,
"-Dagent.launcher.md5=bar",
"-Dagent.tfs.md5=" + expectedTfsMd5,
"-Dagent.bootstrapper.version=UNKNOWN",
"-jar",
"agent.jar",
"-serverUrl",
"http://localhost:" + server.getPort() + "/go/",
"-sslVerificationMode",
"NONE",
"-rootCertFile",
new File("/path/to/cert.pem").getAbsolutePath(),
"-sslCertificateFile",
new File("/path/to/ssl_certificate.pem").getAbsolutePath(),
"-sslPrivateKeyFile",
new File("/path/to/private.key").getAbsolutePath(),
"-sslPrivateKeyPassphraseFile",
new File("/path/to/private_key_passphrase.key").getAbsolutePath()
}));
} |
@Override
public Network network(String netId) {
checkArgument(!Strings.isNullOrEmpty(netId), ERR_NULL_NETWORK_ID);
return osNetworkStore.network(netId);
} | @Test
public void testGetNetworkById() {
createBasicNetworks();
assertTrue("Network did not match", target.network(NETWORK_ID) != null);
assertTrue("Network did not match", target.network(UNKNOWN_ID) == null);
} |
@Override
public ByteBuf readBytes(int length) {
checkReadableBytes(length);
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
ByteBuf buf = alloc().buffer(length, maxCapacity);
buf.writeBytes(this, readerIndex, length);
readerIndex += length;
return buf;
} | @Test
public void testReadBytesAfterRelease6() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readBytes(new byte[8], 0, 1);
}
});
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void kGroupedStreamAnonymousMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>with(null, Serdes.Long())
// set store type explicitly with default rocksDB
.withStoreType(Materialized.StoreType.ROCKS_DB));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
} |
@CheckForNull
@Override
public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) {
return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir)))
.map(GitScmProvider::extractAbsoluteFilePaths)
.orElse(null);
} | @Test
public void branchChangedFiles_should_return_null_on_errors() throws GitAPIException {
DiffCommand diffCommand = mock(DiffCommand.class);
when(diffCommand.setShowNameAndStatusOnly(anyBoolean())).thenReturn(diffCommand);
when(diffCommand.setOldTree(any())).thenReturn(diffCommand);
when(diffCommand.setNewTree(any())).thenReturn(diffCommand);
when(diffCommand.call()).thenThrow(mock(GitAPIException.class));
Git git = mock(Git.class);
when(git.diff()).thenReturn(diffCommand);
GitScmProvider provider = new GitScmProvider(mockCommand(), analysisWarnings, gitIgnoreCommand, system2, documentationLinkGenerator) {
@Override
Git newGit(Repository repo) {
return git;
}
};
assertThat(provider.branchChangedFiles("master", worktree)).isNull();
verify(diffCommand).call();
} |
public static ShenyuAdminResult timeout(final String msg) {
return error(HttpStatus.REQUEST_TIMEOUT.value(), msg);
} | @Test
public void testTimeout() {
final ShenyuAdminResult result = ShenyuAdminResult.timeout("msg");
assertEquals(HttpStatus.REQUEST_TIMEOUT.value(), result.getCode().intValue());
assertEquals("msg", result.getMessage());
assertNull(result.getData());
assertEquals(3782806, result.hashCode());
assertEquals("ShenyuAdminResult{code=408, message='msg', data=null}", result.toString());
} |
@GetMapping("/hystrix")
public Object hystrixPluginFallback() {
return ShenyuResultWrap.error(ShenyuResultEnum.HYSTRIX_PLUGIN_FALLBACK, null);
} | @Test
public void hystrixPluginFallback() throws Exception {
final MockHttpServletResponse response = this.mockMvc.perform(MockMvcRequestBuilders.get("/fallback/hystrix"))
.andReturn().getResponse();
assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value());
} |
@Override
public void removeEndpoints(String uid) {
checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_ENDPOINTS_UID);
synchronized (this) {
if (isEndpointsInUse(uid)) {
final String error = String.format(MSG_ENDPOINTS, uid, ERR_IN_USE);
throw new IllegalStateException(error);
}
Endpoints endpoints = k8sEndpointsStore.removeEndpoints(uid);
if (endpoints != null) {
log.info(String.format(MSG_ENDPOINTS,
endpoints.getMetadata().getName(), MSG_REMOVED));
}
}
} | @Test(expected = IllegalArgumentException.class)
public void testRemoveEndpointsWithNull() {
target.removeEndpoints(null);
} |
@Override
public Response setHeader(String name, String value) {
stream.response().setHeader(name, value);
return this;
} | @Test
public void set_header() {
underTest.setHeader("header", "value");
verify(response).setHeader("header", "value");
} |
@VisibleForTesting
protected ObjectMapper getMapper() {
return this.mapper;
} | @Test
public void testJacksonFeatureByConfigKey() {
// origin, should be FAIL_ON_UNKNOWN_PROPERTIES ture and FAIL_ON_EMPTY_BEANS true
JacksonSerializer origin = new JacksonSerializer();
ObjectMapper originMapper = origin.getMapper();
// originally false
Assert.assertFalse(originMapper.isEnabled(SerializationFeature.WRAP_ROOT_VALUE));
Assert.assertFalse(originMapper.isEnabled(SerializationFeature.INDENT_OUTPUT));
// originally true
Assert.assertTrue(originMapper.isEnabled(SerializationFeature.FAIL_ON_EMPTY_BEANS));
Assert.assertTrue(originMapper.isEnabled(SerializationFeature.FAIL_ON_SELF_REFERENCES));
// originally false
Assert.assertFalse(originMapper.isEnabled(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES));
Assert.assertFalse(originMapper.isEnabled(DeserializationFeature.FAIL_ON_NUMBERS_FOR_ENUMS));
// originally true
Assert.assertTrue(originMapper.isEnabled(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES));
Assert.assertTrue(originMapper.isEnabled(DeserializationFeature.FAIL_ON_UNRESOLVED_OBJECT_IDS));
JacksonSerializer testSer = null;
try {
System.setProperty(JacksonConfigKeys.JACKSON_SER_FEATURE_ENABLE_LIST.getKey(),
SerializationFeature.WRAP_ROOT_VALUE + ","
+ SerializationFeature.INDENT_OUTPUT);
System.setProperty(JacksonConfigKeys.JACKSON_SER_FEATURE_DISABLE_LIST.getKey(),
SerializationFeature.FAIL_ON_EMPTY_BEANS.name() + ","
+ SerializationFeature.FAIL_ON_SELF_REFERENCES.name());
System.setProperty(JacksonConfigKeys.JACKSON_DES_FEATURE_ENABLE_LIST.getKey(),
DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES + ","
+ DeserializationFeature.FAIL_ON_NUMBERS_FOR_ENUMS);
System.setProperty(JacksonConfigKeys.JACKSON_DES_FEATURE_DISABLE_LIST.getKey(),
DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES.name() + ","
+ DeserializationFeature.FAIL_ON_UNRESOLVED_OBJECT_IDS.name());
// test ser after property set
testSer = new JacksonSerializer();
} finally {
System.clearProperty(JacksonConfigKeys.JACKSON_SER_FEATURE_ENABLE_LIST.getKey());
System.clearProperty(JacksonConfigKeys.JACKSON_SER_FEATURE_DISABLE_LIST.getKey());
System.clearProperty(JacksonConfigKeys.JACKSON_DES_FEATURE_ENABLE_LIST.getKey());
System.clearProperty(JacksonConfigKeys.JACKSON_DES_FEATURE_DISABLE_LIST.getKey());
}
ObjectMapper testMapper = testSer.getMapper();
// originally false, but enabled
Assert.assertTrue(testMapper.isEnabled(SerializationFeature.WRAP_ROOT_VALUE));
Assert.assertTrue(testMapper.isEnabled(SerializationFeature.INDENT_OUTPUT));
// originally true, but disabled
Assert.assertFalse(testMapper.isEnabled(SerializationFeature.FAIL_ON_EMPTY_BEANS));
Assert.assertFalse(testMapper.isEnabled(SerializationFeature.FAIL_ON_SELF_REFERENCES));
// originally false, but enabled
Assert.assertTrue(testMapper.isEnabled(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES));
Assert.assertTrue(testMapper.isEnabled(DeserializationFeature.FAIL_ON_NUMBERS_FOR_ENUMS));
// originally true, but disabled
Assert.assertFalse(testMapper.isEnabled(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES));
Assert.assertFalse(testMapper.isEnabled(DeserializationFeature.FAIL_ON_UNRESOLVED_OBJECT_IDS));
} |
@Override
public boolean isEmpty() {
return size <= 0;
} | @Test
public void testIsEmpty() {
RangeSet rs = new RangeSet(0);
assertTrue(rs.isEmpty());
rs = new RangeSet(3);
assertFalse(rs.isEmpty());
} |
@Override
public void computeScanRangeAssignment() throws UserException {
if (locations.size() == 0) {
return;
}
long totalSize = computeTotalSize();
long avgNodeScanRangeBytes = totalSize / Math.max(workerProvider.getAllWorkers().size(), 1) + 1;
for (ComputeNode computeNode : workerProvider.getAllWorkers()) {
assignedScansPerComputeNode.put(computeNode, 0L);
reBalanceBytesPerComputeNode.put(computeNode, 0L);
hostToBackends.put(computeNode.getHost(), computeNode);
}
// schedule scan ranges to co-located backends.
// and put rest scan ranges into remote scan ranges.
List<TScanRangeLocations> remoteScanRangeLocations = Lists.newArrayList();
if (forceScheduleLocal) {
for (int i = 0; i < locations.size(); ++i) {
TScanRangeLocations scanRangeLocations = locations.get(i);
List<ComputeNode> backends = new ArrayList<>();
// select all backends that are co-located with this scan range.
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Collection<ComputeNode> servers = hostToBackends.get(location.getServer().getHostname());
if (servers == null || servers.isEmpty()) {
continue;
}
backends.addAll(servers);
}
ComputeNode node =
reBalanceScanRangeForComputeNode(backends, avgNodeScanRangeBytes, scanRangeLocations);
if (node == null) {
remoteScanRangeLocations.add(scanRangeLocations);
} else {
recordScanRangeAssignment(node, backends, scanRangeLocations);
}
}
} else {
remoteScanRangeLocations = locations;
}
if (remoteScanRangeLocations.isEmpty()) {
return;
}
// use consistent hashing to schedule remote scan ranges
HashRing hashRing = makeHashRing();
if (shuffleScanRange) {
Collections.shuffle(remoteScanRangeLocations);
}
// assign scan ranges.
for (int i = 0; i < remoteScanRangeLocations.size(); ++i) {
TScanRangeLocations scanRangeLocations = remoteScanRangeLocations.get(i);
List<ComputeNode> backends = hashRing.get(scanRangeLocations, kCandidateNumber);
ComputeNode node = reBalanceScanRangeForComputeNode(backends, avgNodeScanRangeBytes, scanRangeLocations);
if (node == null) {
throw new RuntimeException("Failed to find backend to execute");
}
recordScanRangeAssignment(node, backends, scanRangeLocations);
}
recordScanRangeStatistic();
} | @Test
public void testHdfsScanNodeForceScheduleLocal() throws Exception {
new Expectations() {
{
hdfsScanNode.getId();
result = scanNodeId;
hiveTable.getTableLocation();
result = "hdfs://dfs00/dataset/";
}
};
int scanRangeNumber = 100;
int scanRangeSize = 10000;
int hostNumber = 100;
// rewrite scan ranges locations to only 3 hosts.
// so with `forceScheduleLocal` only 3 nodes will get scan ranges.
int localHostNumber = 3;
List<TScanRangeLocations> locations = createScanRanges(scanRangeNumber, scanRangeSize);
for (TScanRangeLocations location : locations) {
List<TScanRangeLocation> servers = location.locations;
servers.clear();
for (int i = 0; i < localHostNumber; i++) {
TScanRangeLocation loc = new TScanRangeLocation();
loc.setServer(new TNetworkAddress(String.format(hostFormat, i), computeNodePort));
servers.add(loc);
}
}
FragmentScanRangeAssignment assignment = new FragmentScanRangeAssignment();
ImmutableMap<Long, ComputeNode> computeNodes = createComputeNodes(hostNumber);
DefaultWorkerProvider workerProvider = new DefaultWorkerProvider(
ImmutableMap.of(),
computeNodes,
ImmutableMap.of(),
computeNodes,
true
);
HDFSBackendSelector selector =
new HDFSBackendSelector(hdfsScanNode, locations, assignment, workerProvider, true, false);
selector.computeScanRangeAssignment();
Map<Long, Long> stats = computeWorkerIdToReadBytes(assignment, scanNodeId);
Assert.assertEquals(stats.size(), localHostNumber);
for (Map.Entry<Long, Long> entry : stats.entrySet()) {
System.out.printf("%s -> %d bytes\n", entry.getKey(), entry.getValue());
}
} |
@Override
public void queuePermissionSyncTask(String submitterUuid, String componentUuid, String projectUuid) {
findManagedProjectService()
.ifPresent(managedProjectService -> managedProjectService.queuePermissionSyncTask(submitterUuid, componentUuid, projectUuid));
} | @Test
public void queuePermissionSyncTask_whenManagedInstanceServices_shouldDelegatesToRightService() {
NeverManagedInstanceService neverManagedInstanceService = spy(new NeverManagedInstanceService());
AlwaysManagedInstanceService alwaysManagedInstanceService = spy(new AlwaysManagedInstanceService());
Set<ManagedInstanceService> delegates = Set.of(neverManagedInstanceService, alwaysManagedInstanceService);
DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(delegates);
managedInstanceService.queuePermissionSyncTask("userUuid", "componentUuid", "projectUuid");
verify(neverManagedInstanceService, never()).queuePermissionSyncTask(anyString(), anyString(), anyString());
verify(alwaysManagedInstanceService).queuePermissionSyncTask("userUuid", "componentUuid", "projectUuid");
} |
public static List<Chunk> split(String s) {
int pos = s.indexOf(SLASH);
if (pos == -1) {
throw new RuntimeException("path did not start with or contain '/'");
}
List<Chunk> list = new ArrayList();
int startPos = 0;
int searchPos = 0;
boolean anyDepth = false;
while (pos != -1) {
if (pos == 0) {
startPos = 1;
searchPos = 1;
} else if (s.charAt(pos - 1) == '\\') {
s = s.substring(0, pos - 1) + s.substring(pos);
searchPos = pos;
} else {
String temp = s.substring(startPos, pos);
if (temp.isEmpty()) {
anyDepth = true;
} else {
list.add(new Chunk(anyDepth, temp));
anyDepth = false; // reset
}
startPos = pos + 1;
searchPos = startPos;
}
pos = s.indexOf(SLASH, searchPos);
}
if (startPos != s.length()) {
String temp = s.substring(startPos);
if (!temp.isEmpty()) {
list.add(new Chunk(anyDepth, temp));
}
}
return list;
} | @Test
void testIndex() {
List<PathSearch.Chunk> list = PathSearch.split("/hello[3]//world");
logger.debug("list: {}", list);
PathSearch.Chunk first = list.get(0);
assertFalse(first.anyDepth);
assertEquals("hello", first.controlType);
assertEquals(2, first.index);
PathSearch.Chunk second = list.get(1);
assertTrue(second.anyDepth);
assertEquals(-1, second.index);
assertEquals("world", second.controlType);
} |
@Override
public String pwd() {
try {
return client.printWorkingDirectory();
} catch (IOException e) {
throw new IORuntimeException(e);
}
} | @Test
@Disabled
public void isDirTest() throws Exception {
try (final Ftp ftp = new Ftp("127.0.0.1", 21)) {
Console.log(ftp.pwd());
ftp.isDir("/test");
Console.log(ftp.pwd());
}
} |
public List<QueryMetadata> sql(final String sql) {
return sql(sql, Collections.emptyMap());
} | @Test
public void shouldThrowIfFailedToInferSchema() {
// Given:
when(schemaInjector.inject(any()))
.thenThrow(new RuntimeException("Boom"));
// When:
final Exception e = assertThrows(
RuntimeException.class,
() -> ksqlContext.sql("Some SQL", SOME_PROPERTIES)
);
// Then:
assertThat(e.getMessage(), containsString("Boom"));
} |
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) {
checkArgument(
OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp);
return new AutoValue_UBinary(binaryOp, lhs, rhs);
} | @Test
public void signedRightShift() {
assertUnifiesAndInlines(
"4 >> 17", UBinary.create(Kind.RIGHT_SHIFT, ULiteral.intLit(4), ULiteral.intLit(17)));
} |
@Override
public int getOrder() {
return PluginEnum.JWT.getCode();
} | @Test
public void testGetOrder() {
final int result = jwtPluginUnderTest.getOrder();
Assertions.assertEquals(PluginEnum.JWT.getCode(), result);
} |
@Override
public synchronized boolean isTerminated() {
if (!shutdownStarted) {
return false;
}
for (ManagedChannel channel : usedChannels) {
if (!channel.isTerminated()) return false;
}
for (ManagedChannel channel : channelCache) {
if (!channel.isTerminated()) return false;
}
return true;
} | @Test
public void testIsTerminated() throws Exception {
ManagedChannel mockChannel = mock(ManagedChannel.class);
when(channelSupplier.get()).thenReturn(mockChannel);
IsolationChannel isolationChannel = IsolationChannel.create(channelSupplier);
when(mockChannel.shutdown()).thenReturn(mockChannel);
when(mockChannel.isTerminated()).thenReturn(false, true);
isolationChannel.shutdown();
assertFalse(isolationChannel.isTerminated());
assertTrue(isolationChannel.isTerminated());
verify(channelSupplier, times(1)).get();
verify(mockChannel, times(1)).shutdown();
verify(mockChannel, times(2)).isTerminated();
} |
public String anonymize(final ParseTree tree) {
return build(tree);
} | @Test
public void shouldAnonymizeJoinWithBeforeAndAfterAndGraceStatementsCorrectly() {
final String output = anon.anonymize("INSERT INTO OUTPUT SELECT col1, col2, col3"
+ " FROM SOURCE1 S1 JOIN SOURCE2 S2 "
+ "WITHIN (1 SECOND, 3 SECONDS) GRACE PERIOD 2 SECONDS ON col1.k=col2.k;");
Approvals.verify(output);
} |
@Override
public ResultSet executeQuery(String sql)
throws SQLException {
validateState();
try {
if (!DriverUtils.queryContainsLimitStatement(sql)) {
sql += " " + LIMIT_STATEMENT + " " + _maxRows;
}
String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions());
ResultSetGroup resultSetGroup = _session.execute(enabledSql);
if (resultSetGroup.getResultSetCount() == 0) {
_resultSet = PinotResultSet.empty();
return _resultSet;
}
_resultSet = new PinotResultSet(resultSetGroup.getResultSet(0));
return _resultSet;
} catch (PinotClientException e) {
throw new SQLException(String.format("Failed to execute query : %s", sql), e);
}
} | @Test
public void testPresetEnableNullHandling()
throws Exception {
Properties props = new Properties();
props.put(QueryOptionKey.ENABLE_NULL_HANDLING, "true");
PinotConnection pinotConnection =
new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Statement statement = pinotConnection.createStatement();
Assert.assertNotNull(statement);
String presetSql =
DriverUtils.createSetQueryOptionString(QueryOptionKey.ENABLE_NULL_HANDLING, true) + BASIC_TEST_QUERY;
statement.executeQuery(presetSql);
Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, presetSql.length()), presetSql);
} |
public static String getChecksum(String algorithm, File file) throws NoSuchAlgorithmException, IOException {
FileChecksums fileChecksums = CHECKSUM_CACHE.get(file);
if (fileChecksums == null) {
try (InputStream stream = Files.newInputStream(file.toPath())) {
final MessageDigest md5Digest = getMessageDigest(MD5);
final MessageDigest sha1Digest = getMessageDigest(SHA1);
final MessageDigest sha256Digest = getMessageDigest(SHA256);
final byte[] buffer = new byte[BUFFER_SIZE];
int read = stream.read(buffer, 0, BUFFER_SIZE);
while (read > -1) {
// update all checksums together instead of reading the file multiple times
md5Digest.update(buffer, 0, read);
sha1Digest.update(buffer, 0, read);
sha256Digest.update(buffer, 0, read);
read = stream.read(buffer, 0, BUFFER_SIZE);
}
fileChecksums = new FileChecksums(
getHex(md5Digest.digest()),
getHex(sha1Digest.digest()),
getHex(sha256Digest.digest())
);
CHECKSUM_CACHE.put(file, fileChecksums);
}
}
switch (algorithm.toUpperCase()) {
case MD5:
return fileChecksums.md5;
case SHA1:
return fileChecksums.sha1;
case SHA256:
return fileChecksums.sha256;
default:
throw new NoSuchAlgorithmException(algorithm);
}
} | @Test
public void testGetChecksum_NoSuchAlgorithm() throws Exception {
String algorithm = "some unknown algorithm";
File file = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").getPath());
Exception exception = Assert.assertThrows(NoSuchAlgorithmException.class, () -> Checksum.getChecksum(algorithm, file));
assertTrue(exception.getMessage().contains("some unknown algorithm"));
} |
public Node deserializeObject(JsonReader reader) {
Log.info("Deserializing JSON to Node.");
JsonObject jsonObject = reader.readObject();
return deserializeObject(jsonObject);
} | @Test
void testOperator() {
Expression expr = parseExpression("1+1");
String serialized = serialize(expr, false);
Node deserialized = deserializer.deserializeObject(Json.createReader(new StringReader(serialized)));
assertEqualsStringIgnoringEol("1 + 1", deserialized.toString());
assertEquals(expr.hashCode(), deserialized.hashCode());
} |
public void start() {
if (!enabled) {
logger.info(format("Diagnostics disabled. To enable add -D%s=true to the JVM arguments.", ENABLED.getName()));
return;
}
this.diagnosticsLog = outputType.newLog(this);
this.scheduler = new ScheduledThreadPoolExecutor(1, new DiagnosticSchedulerThreadFactory());
logger.info("Diagnostics started");
} | @Test
public void start_whenEnabled() throws Exception {
Diagnostics diagnostics = newDiagnostics(new Config().setProperty(Diagnostics.ENABLED.getName(), "true"));
diagnostics.start();
assertNotNull("DiagnosticsLogFile should not be null", diagnostics.diagnosticsLog);
} |
@Override
protected String buildApiPath(final Method method,
final String superPath,
@NonNull final ShenyuSofaClient shenyuSofaClient) {
final String contextPath = this.getContextPath();
return superPath.contains("*") ? pathJoin(contextPath, superPath.replace("*", ""), method.getName())
: pathJoin(contextPath, superPath, shenyuSofaClient.path());
} | @Test
public void testBuildApiPathSuperPathContainsStar() {
given(method.getName()).willReturn(METHOD_NAME);
String realApiPath = sofaServiceEventListener.buildApiPath(method, SUPER_PATH_CONTAINS_STAR, shenyuSofaClient);
String expectedApiPath = "/sofa/demo/buildURIRegisterDTO";
assertEquals(expectedApiPath, realApiPath);
} |
@Override
public SchemaTransform from(PubsubReadSchemaTransformConfiguration configuration) {
if (configuration.getSubscription() == null && configuration.getTopic() == null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided");
}
if (configuration.getSubscription() != null && configuration.getTopic() != null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided. Not both.");
}
if (!"RAW".equals(configuration.getFormat())) {
if ((Strings.isNullOrEmpty(configuration.getSchema())
&& !Strings.isNullOrEmpty(configuration.getFormat()))
|| (!Strings.isNullOrEmpty(configuration.getSchema())
&& Strings.isNullOrEmpty(configuration.getFormat()))) {
throw new IllegalArgumentException(
"A schema was provided without a data format (or viceversa). Please provide "
+ "both of these parameters to read from Pubsub, or if you would like to use the Pubsub schema service,"
+ " please leave both of these blank.");
}
}
Schema payloadSchema;
SerializableFunction<byte[], Row> payloadMapper;
String format =
configuration.getFormat() == null ? null : configuration.getFormat().toUpperCase();
if ("RAW".equals(format)) {
payloadSchema = Schema.of(Schema.Field.of("payload", Schema.FieldType.BYTES));
payloadMapper = input -> Row.withSchema(payloadSchema).addValue(input).build();
} else if ("JSON".equals(format)) {
payloadSchema = JsonUtils.beamSchemaFromJsonSchema(configuration.getSchema());
payloadMapper = JsonUtils.getJsonBytesToRowFunction(payloadSchema);
} else if ("AVRO".equals(format)) {
payloadSchema =
AvroUtils.toBeamSchema(
new org.apache.avro.Schema.Parser().parse(configuration.getSchema()));
payloadMapper = AvroUtils.getAvroBytesToRowFunction(payloadSchema);
} else {
throw new IllegalArgumentException(
String.format(
"Format %s not supported. Only supported formats are %s",
configuration.getFormat(), VALID_FORMATS_STR));
}
PubsubReadSchemaTransform transform =
new PubsubReadSchemaTransform(configuration, payloadSchema, payloadMapper);
if (configuration.getClientFactory() != null) {
transform.setClientFactory(configuration.getClientFactory());
}
if (configuration.getClock() != null) {
transform.setClock(configuration.getClock());
}
return transform;
} | @Test
public void testInvalidConfigBothTopicAndSubscription() {
PCollectionRowTuple begin = PCollectionRowTuple.empty(p);
assertThrows(
IllegalArgumentException.class,
() ->
begin.apply(
new PubsubReadSchemaTransformProvider()
.from(
PubsubReadSchemaTransformConfiguration.builder()
.setSchema(SCHEMA)
.setFormat("AVRO")
.setTopic(TOPIC)
.setSubscription(SUBSCRIPTION)
.build())));
p.run().waitUntilFinish();
} |
public static GlobalMemoryAccessor getDefaultGlobalMemoryAccessor() {
return STORAGE.get(PLATFORM_AWARE);
} | @Test
public void test_getMemoryAccessor_default() {
assertNotNull(GlobalMemoryAccessorRegistry.getDefaultGlobalMemoryAccessor());
} |
public static String defaultEmptyIfBlank(String str) {
return defaultIfBlank(str, EMPTY);
} | @Test
void testDefaultEmptyIfBlank() {
assertEquals("", StringUtils.defaultEmptyIfBlank(null));
assertEquals("", StringUtils.defaultEmptyIfBlank(""));
assertEquals("", StringUtils.defaultEmptyIfBlank(" "));
assertEquals("bat", StringUtils.defaultEmptyIfBlank("bat"));
} |
public boolean fileExists(String path) throws IOException, InvalidTokenException {
String url;
try {
url =
getUriBuilder()
.setPath(API_PATH_PREFIX + "/mounts/primary/files/info")
.setParameter("path", path)
.build()
.toString();
} catch (URISyntaxException e) {
throw new IllegalStateException("Could not produce url.", e);
}
Request.Builder requestBuilder = getRequestBuilder(url);
try (Response response = getResponse(requestBuilder)) {
int code = response.code();
if (code == 200) {
return true;
}
if (code == 404) {
return false;
}
throw new KoofrClientIOException(response);
}
} | @Test
public void testFileExists() throws Exception {
server.enqueue(new MockResponse().setResponseCode(200));
boolean exists = client.fileExists("/path/to/file");
assertTrue(exists);
assertEquals(1, server.getRequestCount());
final RecordedRequest recordedRequest = server.takeRequest();
assertEquals("GET", recordedRequest.getMethod());
assertEquals(
"/api/v2/mounts/primary/files/info?path=%2Fpath%2Fto%2Ffile", recordedRequest.getPath());
assertEquals("Bearer acc", recordedRequest.getHeader("Authorization"));
assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version"));
} |
public static String prettyJson(ObjectMapper mapper, String jsonString) {
try {
Object jsonObject = mapper.readValue(jsonString, Object.class);
return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject);
} catch (JsonParseException e) {
log.debug("JsonParseException caused by {}", e);
} catch (JsonMappingException e) {
log.debug("JsonMappingException caused by {}", e);
} catch (JsonProcessingException e) {
log.debug("JsonProcessingException caused by {}", e);
}
return null;
} | @Test
public void testPrettyJson() {
String string = prettyJson(new ObjectMapper(), "{\"json\":\"json\"}");
String prettyJsonString = "{\n \"json\" : \"json\"\n}";
assertEquals(string, prettyJsonString);
assertNull(prettyJson(new ObjectMapper(), "{\"json\":\"json\""));
assertNull(prettyJson(new ObjectMapper(), "{\"json\"\"json\"}"));
} |
@VisibleForTesting
@NonNull <T extends VFSConnectionDetails> FileName getConnectionRootProviderFileName(
@NonNull VFSConnectionFileNameTransformer<T> fileNameTransformer,
@NonNull T details )
throws KettleException {
// Example: "pvfs://my-connection"
ConnectionFileName rootPvfsFileName = getConnectionRootFileName( details );
// Example: "s3://root-path-bucket"
// Would fail if only "s3://"
return fileNameTransformer.toProviderFileName( rootPvfsFileName, details );
} | @Test( expected = IllegalArgumentException.class )
public void testGetConnectionRootProviderFileNameThrowsWhenConnectionNameIsEmpty() throws KettleException {
when( vfsConnectionDetails.getName() ).thenReturn( "" );
when( vfsConnectionDetails.getRootPath() ).thenReturn( "root/path" );
vfsConnectionManagerHelper
.getConnectionRootProviderFileName( vfsConnectionFileNameTransformer, vfsConnectionDetails );
} |
@Nullable
public byte[] getValue() {
return mValue;
} | @Test
public void setValue_UINT32_big() {
final MutableData data = new MutableData(new byte[4]);
data.setValue(0xF0000001L, Data.FORMAT_UINT32_LE, 0);
assertArrayEquals(new byte[] { 0x01, 0x00, 0x00, (byte) 0xF0 } , data.getValue());
} |
public static void ensureAllReadsConsumed(Pipeline pipeline) {
final Set<PCollection<?>> unconsumed = new HashSet<>();
pipeline.traverseTopologically(
new PipelineVisitor.Defaults() {
@Override
public void visitPrimitiveTransform(Node node) {
unconsumed.removeAll(node.getInputs().values());
}
@Override
public void visitValue(PValue value, Node producer) {
String urn = PTransformTranslation.urnForTransformOrNull(producer.getTransform());
if (PTransformTranslation.READ_TRANSFORM_URN.equals(urn)) {
unconsumed.add((PCollection<?>) value);
}
}
});
int i = 0;
for (PCollection<?> unconsumedPCollection : unconsumed) {
consume(unconsumedPCollection, i);
i++;
}
} | @Test
public void matcherProducesUnconsumedValueUnboundedRead() {
Unbounded<Long> transform = Read.from(CountingSource.unbounded());
pipeline.apply(transform);
UnconsumedReads.ensureAllReadsConsumed(pipeline);
validateConsumed();
} |
public LRUCacheEntry delete(final String namespace, final Bytes key) {
final NamedCache cache = getCache(namespace);
if (cache == null) {
return null;
}
final LRUCacheEntry entry;
synchronized (cache) {
final long oldSize = cache.sizeInBytes();
entry = cache.delete(key);
sizeInBytes.getAndAdd(cache.sizeInBytes() - oldSize);
}
return entry;
} | @Test
public void shouldNotBlowUpOnNonExistentNamespaceWhenDeleting() {
final ThreadCache cache = new ThreadCache(logContext, 10000L, new MockStreamsMetrics(new Metrics()));
assertNull(cache.delete(namespace, Bytes.wrap(new byte[]{1})));
} |
@Override
public long nextDelayDuration(int renewTimes) {
if (renewTimes < 0) {
renewTimes = 0;
}
int index = renewTimes;
if (index >= next.length) {
index = next.length - 1;
}
return next[index];
} | @Test
public void testNextDelayDuration() {
long value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.MINUTES.toMillis(1));
value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.MINUTES.toMillis(3));
value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.MINUTES.toMillis(5));
value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.MINUTES.toMillis(10));
value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.MINUTES.toMillis(30));
value = this.retryPolicy.nextDelayDuration(times.getAndIncrement());
assertEquals(value, TimeUnit.HOURS.toMillis(1));
} |
@Override
public MapperResult getCapacityList4CorrectUsage(MapperContext context) {
String sql = "SELECT id, tenant_id FROM tenant_capacity WHERE id>? LIMIT ?";
return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.ID),
context.getWhereParameter(FieldConstant.LIMIT_SIZE)));
} | @Test
void testGetCapacityList4CorrectUsage() {
Object id = 1;
Object limit = 10;
context.putWhereParameter(FieldConstant.ID, id);
context.putWhereParameter(FieldConstant.LIMIT_SIZE, limit);
MapperResult mapperResult = tenantCapacityMapperByMySql.getCapacityList4CorrectUsage(context);
assertEquals("SELECT id, tenant_id FROM tenant_capacity WHERE id>? LIMIT ?", mapperResult.getSql());
assertArrayEquals(new Object[] {id, limit}, mapperResult.getParamList().toArray());
} |
@Override
public boolean updatesAreDetected(final int type) {
return false;
} | @Test
void assertUpdatesAreDetected() {
assertFalse(metaData.updatesAreDetected(0));
} |
public boolean isShardingTable(final String logicTableName) {
return shardingTables.containsKey(logicTableName);
} | @Test
void assertIsNotShardingTable() {
assertFalse(createMaximumShardingRule().isShardingTable("other_table"));
} |
@Override
public Object parse(final String property, final Object value) {
if (property.equalsIgnoreCase(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT)) {
validator.validate(property, value);
return value;
}
final ConfigItem configItem = resolver.resolve(property, true)
.orElseThrow(() -> new PropertyNotFoundException(property));
final Object parsedValue = configItem.parseValue(value);
validator.validate(configItem.getPropertyName(), parsedValue);
return parsedValue;
} | @Test
public void shouldCallValidatorForRunScriptConstant() {
// When:
parser.parse(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, "something2");
// Then:
verify(validator).validate(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, "something2");
} |
public NewIssuesNotification newNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new NewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
} | @Test
public void newNewIssuesNotification_DetailsSupplier_getRuleDefinitionByRuleKey_fails_with_NPE_if_ruleKey_is_null() {
NewIssuesNotification underTest = this.underTest.newNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThatThrownBy(() -> detailsSupplier.getRuleDefinitionByRuleKey(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("ruleKey can't be null");
} |
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of milliseconds since 1970-01-01 00:00:00 UTC/GMT."
+ " Single quotes in the timestamp format can be escaped with '',"
+ " for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided.")
public long stringToTimestamp(
@UdfParameter(
description = "The string representation of a date.") final String formattedTimestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final StringToTimestampParser timestampParser = parsers.get(formatPattern);
return timestampParser.parse(formattedTimestamp);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse timestamp '" + formattedTimestamp
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldThrowOnEmptyString() {
// When:
final KsqlFunctionException e = assertThrows(
KsqlFunctionException.class,
() -> udf.stringToTimestamp("", "yyyy-MM-dd'T'HH:mm:ss.SSS")
);
// Then:
assertThat(e.getMessage(), containsString("Text '' could not be parsed at index 0"));
} |
public void updateNextVisibleTime(String topic, String group, int queueId, long queueOffset, long popTime, long nextVisibleTime) {
String key = buildKey(topic, group);
ConcurrentHashMap<Integer/*queueId*/, OrderInfo> qs = table.get(key);
if (qs == null) {
log.warn("orderInfo of queueId is null. key: {}, queueOffset: {}, queueId: {}", key, queueOffset, queueId);
return;
}
OrderInfo orderInfo = qs.get(queueId);
if (orderInfo == null) {
log.warn("orderInfo is null, key: {}, queueOffset: {}, queueId: {}", key, queueOffset, queueId);
return;
}
if (popTime != orderInfo.popTime) {
log.warn("popTime is not equal to orderInfo saved. key: {}, queueOffset: {}, orderInfo: {}, popTime: {}", key, queueOffset, orderInfo, popTime);
return;
}
orderInfo.updateOffsetNextVisibleTime(queueOffset, nextVisibleTime);
updateLockFreeTimestamp(topic, group, queueId, orderInfo);
} | @Test
public void testUpdateNextVisibleTime() {
long invisibleTime = 3000;
StringBuilder orderInfoBuilder = new StringBuilder();
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_0,
popTime,
1,
Lists.newArrayList(1L, 2L, 3L),
orderInfoBuilder
);
consumerOrderInfoManager.updateNextVisibleTime(TOPIC, GROUP, QUEUE_ID_0, 2L, popTime, System.currentTimeMillis() + invisibleTime);
assertEncodeAndDecode();
assertEquals(2, consumerOrderInfoManager.commitAndNext(TOPIC, GROUP, QUEUE_ID_0, 1L, popTime));
assertEncodeAndDecode();
assertEquals(2, consumerOrderInfoManager.commitAndNext(TOPIC, GROUP, QUEUE_ID_0, 3L, popTime));
assertEncodeAndDecode();
await().atMost(Duration.ofSeconds(invisibleTime + 1)).until(() -> !consumerOrderInfoManager.checkBlock(null, TOPIC, GROUP, QUEUE_ID_0, invisibleTime));
orderInfoBuilder = new StringBuilder();
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_0,
popTime,
1,
Lists.newArrayList(2L, 3L, 4L),
orderInfoBuilder
);
consumerOrderInfoManager.updateNextVisibleTime(TOPIC, GROUP, QUEUE_ID_0, 2L, popTime, System.currentTimeMillis() + invisibleTime);
assertEncodeAndDecode();
assertEquals(2, consumerOrderInfoManager.commitAndNext(TOPIC, GROUP, QUEUE_ID_0, 3L, popTime));
assertEncodeAndDecode();
assertEquals(2, consumerOrderInfoManager.commitAndNext(TOPIC, GROUP, QUEUE_ID_0, 4L, popTime));
assertEncodeAndDecode();
assertTrue(consumerOrderInfoManager.checkBlock(null, TOPIC, GROUP, QUEUE_ID_0, invisibleTime));
assertEquals(5L, consumerOrderInfoManager.commitAndNext(TOPIC, GROUP, QUEUE_ID_0, 2L, popTime));
assertEncodeAndDecode();
assertFalse(consumerOrderInfoManager.checkBlock(null, TOPIC, GROUP, QUEUE_ID_0, invisibleTime));
} |
@Override
public ObjectNode encode(MappingEntry mappingEntry, CodecContext context) {
checkNotNull(mappingEntry, "Mapping entry cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put(ID, Long.toString(mappingEntry.id().value()))
.put(DEVICE_ID, mappingEntry.deviceId().toString())
.put(STATE, mappingEntry.state().toString());
if (mappingEntry.key() != null) {
final JsonCodec<MappingKey> keyCodec =
context.codec(MappingKey.class);
result.set(KEY, keyCodec.encode(mappingEntry.key(), context));
}
if (mappingEntry.value() != null) {
final JsonCodec<MappingValue> valueCodec =
context.codec(MappingValue.class);
result.set(VALUE, valueCodec.encode(mappingEntry.value(), context));
}
return result;
} | @Test
public void testMappingEntryEncode() {
MappingAddress address = MappingAddresses.ipv4MappingAddress(IPV4_PREFIX);
MappingInstruction unicastWeight = MappingInstructions.unicastWeight(UNICAST_WEIGHT);
MappingInstruction unicastPriority = MappingInstructions.unicastPriority(UNICAST_PRIORITY);
MappingInstruction multicastWeight = MappingInstructions.multicastWeight(MULTICAST_WEIGHT);
MappingInstruction multicastPriority = MappingInstructions.multicastPriority(MULTICAST_PRIORITY);
MappingKey key = DefaultMappingKey.builder()
.withAddress(address)
.build();
MappingTreatment treatment = DefaultMappingTreatment.builder()
.add(unicastWeight)
.add(unicastPriority)
.add(multicastWeight)
.add(multicastPriority)
.withAddress(address)
.build();
MappingAction action = MappingActions.noAction();
MappingValue value = DefaultMappingValue.builder()
.add(treatment)
.withAction(action)
.build();
Mapping mapping = DefaultMapping.builder()
.withId(ID)
.forDevice(DEVICE_ID)
.withKey(key)
.withValue(value)
.build();
MappingEntry entry = new DefaultMappingEntry(mapping, STATE);
ObjectNode entryJson = entryCodec.encode(entry, context);
assertThat(entryJson, MappingEntryJsonMatcher.matchesMappingEntry(entry));
} |
public byte[] createFor(Account account, Device device, boolean includeE164) throws InvalidKeyException {
SenderCertificate.Certificate.Builder builder = SenderCertificate.Certificate.newBuilder()
.setSenderDevice(Math.toIntExact(device.getId()))
.setExpires(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(expiresDays))
.setIdentityKey(ByteString.copyFrom(account.getIdentityKey(IdentityType.ACI).serialize()))
.setSigner(serverCertificate)
.setSenderUuid(account.getUuid().toString());
if (includeE164) {
builder.setSender(account.getNumber());
}
byte[] certificate = builder.build().toByteArray();
byte[] signature;
try {
signature = Curve.calculateSignature(privateKey, certificate);
} catch (org.signal.libsignal.protocol.InvalidKeyException e) {
throw new InvalidKeyException(e);
}
return SenderCertificate.newBuilder()
.setCertificate(ByteString.copyFrom(certificate))
.setSignature(ByteString.copyFrom(signature))
.build()
.toByteArray();
} | @Test
void testCreateFor() throws IOException, InvalidKeyException, org.signal.libsignal.protocol.InvalidKeyException {
final Account account = mock(Account.class);
final Device device = mock(Device.class);
final CertificateGenerator certificateGenerator = new CertificateGenerator(
Base64.getDecoder().decode(SIGNING_CERTIFICATE),
Curve.decodePrivatePoint(Base64.getDecoder().decode(SIGNING_KEY)), 1);
when(account.getIdentityKey(IdentityType.ACI)).thenReturn(IDENTITY_KEY);
when(account.getUuid()).thenReturn(UUID.randomUUID());
when(account.getNumber()).thenReturn("+18005551234");
when(device.getId()).thenReturn((byte) 4);
assertTrue(certificateGenerator.createFor(account, device, true).length > 0);
assertTrue(certificateGenerator.createFor(account, device, false).length > 0);
} |
public E pop() {
if (mSize == 0) {
throw new EmptyStackException();
}
return mElements.set(--mSize, null);
} | @Test
void testIllegalPop() throws Exception {
Assertions.assertThrows(EmptyStackException.class, () -> {
Stack<String> stack = new Stack<String>();
stack.pop();
});
} |
@SuppressWarnings("argument")
@VisibleForTesting
ProducerRecord<byte[], byte[]> transformOutput(Row row) {
row = castRow(row, row.getSchema(), schema);
String topic = Iterables.getOnlyElement(getTopics());
byte[] key = null;
byte[] payload;
List<Header> headers = ImmutableList.of();
Long timestampMillis = null;
if (schema.hasField(Schemas.MESSAGE_KEY_FIELD)) {
key = row.getBytes(Schemas.MESSAGE_KEY_FIELD);
}
if (schema.hasField(Schemas.EVENT_TIMESTAMP_FIELD)) {
ReadableDateTime time = row.getDateTime(Schemas.EVENT_TIMESTAMP_FIELD);
if (time != null) {
timestampMillis = time.getMillis();
}
}
if (schema.hasField(Schemas.HEADERS_FIELD)) {
Collection<Row> headerRows = checkArgumentNotNull(row.getArray(Schemas.HEADERS_FIELD));
ImmutableList.Builder<Header> headersBuilder = ImmutableList.builder();
headerRows.forEach(
entry -> {
String headerKey = checkArgumentNotNull(entry.getString(Schemas.HEADERS_KEY_FIELD));
Collection<byte[]> values =
checkArgumentNotNull(entry.getArray(Schemas.HEADERS_VALUES_FIELD));
values.forEach(value -> headersBuilder.add(new RecordHeader(headerKey, value)));
});
headers = headersBuilder.build();
}
if (payloadSerializer == null) {
payload = row.getBytes(Schemas.PAYLOAD_FIELD);
} else {
payload =
payloadSerializer.serialize(checkArgumentNotNull(row.getRow(Schemas.PAYLOAD_FIELD)));
}
return new ProducerRecord<>(topic, null, timestampMillis, key, payload, headers);
} | @Test
public void reorderRowToRecord() {
Schema schema =
Schema.builder()
.addField(Schemas.HEADERS_FIELD, Schemas.HEADERS_FIELD_TYPE)
.addByteArrayField(Schemas.PAYLOAD_FIELD)
.build();
Schema rowSchema =
Schema.builder()
.addByteArrayField(Schemas.PAYLOAD_FIELD)
.addField(Schemas.HEADERS_FIELD, Schemas.HEADERS_FIELD_TYPE)
.build();
NestedPayloadKafkaTable table = newTable(schema, Optional.empty());
Row row = Row.withSchema(rowSchema).attachValues("abc".getBytes(UTF_8), ImmutableList.of());
ProducerRecord<byte[], byte[]> output = table.transformOutput(row);
assertEquals("abc", new String(output.value(), UTF_8));
assertEquals(0, output.headers().toArray().length);
} |
public static Method getMostSpecificMethod(Method method, Class<?> targetClass) {
if (targetClass != null && targetClass != method.getDeclaringClass() && isOverridable(method, targetClass)) {
try {
if (Modifier.isPublic(method.getModifiers())) {
try {
return targetClass.getMethod(method.getName(), method.getParameterTypes());
} catch (NoSuchMethodException ex) {
return method;
}
} else {
return method;
}
} catch (SecurityException ex) {
// Security settings are disallowing reflective access; fall back to 'method' below.
}
}
return method;
} | @Test
public void testGetMostSpecificNotPublicMethod() throws NoSuchMethodException {
Method method = AbstractMap.class.getDeclaredMethod("clone");
Method specificMethod = ClassUtils.getMostSpecificMethod(method, HashMap.class);
assertNotEquals(HashMap.class.getDeclaredMethod("clone"), specificMethod);
} |
private String toStringDate() {
String retval;
if ( value == null ) {
return null;
}
SimpleDateFormat df = new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss.SSS", Locale.US );
if ( isNull() || value.getDate() == null ) {
retval = Const.NULL_DATE;
} else {
retval = df.format( value.getDate() ).toString();
}
/*
* This code was removed as TYPE_VALUE_DATE does not know "length", so this could never be called anyway else {
* StringBuffer ret; if (isNull() || value.getDate()==null) ret=new StringBuffer(Const.NULL_DATE); else ret=new
* StringBuffer(df.format(value.getDate()).toString()); Const.rightPad(ret, getLength()<=10?10:getLength());
* retval=ret.toString(); }
*/
return retval;
} | @Test
public void testToStringDate() {
String result = null;
Value vs1 = new Value( "Name", Value.VALUE_TYPE_DATE );
result = vs1.toString( true );
assertEquals( "", result );
Value vs2 = new Value( "Name", Value.VALUE_TYPE_DATE );
vs2.setNull( true );
result = vs2.toString( true );
assertEquals( "", result );
SimpleDateFormat df = new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss.SSS" );
Date dt = df.parse( "2006/03/01 17:01:02.005", new ParsePosition( 0 ) );
Value vs3 = new Value( "Name", Value.VALUE_TYPE_DATE );
vs3.setValue( dt );
result = vs3.toString( true );
assertEquals( "2006/03/01 17:01:02.005", result );
Value vs4 = new Value( "Name", Value.VALUE_TYPE_DATE );
vs4.setNull( true );
vs4.setLength( 2 );
result = vs4.toString( true );
assertEquals( "", result );
Value vs5 = new Value( "Name", Value.VALUE_TYPE_DATE );
vs3.setValue( dt );
vs5.setLength( 10 );
result = vs5.toString( true );
assertEquals( "", result );
} |
@Override
public void doPut(HttpServletRequest req, HttpServletResponse resp) {
resp.setContentType(CONTENT_TYPE);
try (PrintWriter out = resp.getWriter()) {
out.println(msgPartOne + " Put " + msgPartTwo);
} catch (Exception e) {
LOGGER.error("Exception occurred PUT request processing ", e);
}
} | @Test
void testDoPut() throws Exception {
HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
HttpServletResponse mockResp = Mockito.mock(HttpServletResponse.class);
StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter);
when(mockResp.getWriter()).thenReturn(printWriter);
AppServlet curServlet = new AppServlet();
curServlet.doPut(mockReq, mockResp);
printWriter.flush();
assertTrue(stringWriter.toString().contains(msgPartOne + " Put " + msgPartTwo));
} |
@Override
public String generateSqlType(Dialect dialect) {
return switch (dialect.getId()) {
case MsSql.ID -> "VARBINARY(MAX)";
case Oracle.ID, H2.ID -> "BLOB";
case PostgreSql.ID -> "BYTEA";
default -> throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId());
};
} | @Test
public void generateSqlType_thows_IAE_for_unknown_dialect() {
Dialect dialect = mock(Dialect.class);
when(dialect.getId()).thenReturn("AAA");
assertThatThrownBy(() -> underTest.generateSqlType(dialect))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Unsupported dialect id AAA");
} |
@Override
public boolean removeIf(IntPredicate filter) {
throw new UnsupportedOperationException("RangeSet is immutable");
} | @Test(expected = UnsupportedOperationException.class)
public void removeIfPrimitive() {
RangeSet rs = new RangeSet(4);
rs.removeIf((int i) -> i == 3);
} |
public static DataflowRunner fromOptions(PipelineOptions options) {
DataflowPipelineOptions dataflowOptions =
PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options);
ArrayList<String> missing = new ArrayList<>();
if (dataflowOptions.getAppName() == null) {
missing.add("appName");
}
if (Strings.isNullOrEmpty(dataflowOptions.getRegion())
&& isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) {
missing.add("region");
}
if (missing.size() > 0) {
throw new IllegalArgumentException(
"Missing required pipeline options: " + Joiner.on(',').join(missing));
}
validateWorkerSettings(
PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options));
PathValidator validator = dataflowOptions.getPathValidator();
String gcpTempLocation;
try {
gcpTempLocation = dataflowOptions.getGcpTempLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires gcpTempLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(gcpTempLocation);
String stagingLocation;
try {
stagingLocation = dataflowOptions.getStagingLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires stagingLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(stagingLocation);
if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) {
validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs());
}
if (dataflowOptions.getFilesToStage() != null) {
// The user specifically requested these files, so fail now if they do not exist.
// (automatically detected classpath elements are permitted to not exist, so later
// staging will not fail on nonexistent files)
dataflowOptions.getFilesToStage().stream()
.forEach(
stagedFileSpec -> {
File localFile;
if (stagedFileSpec.contains("=")) {
String[] components = stagedFileSpec.split("=", 2);
localFile = new File(components[1]);
} else {
localFile = new File(stagedFileSpec);
}
if (!localFile.exists()) {
// should be FileNotFoundException, but for build-time backwards compatibility
// cannot add checked exception
throw new RuntimeException(
String.format("Non-existent files specified in filesToStage: %s", localFile));
}
});
} else {
dataflowOptions.setFilesToStage(
detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options));
if (dataflowOptions.getFilesToStage().isEmpty()) {
throw new IllegalArgumentException("No files to stage has been found.");
} else {
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
dataflowOptions.getFilesToStage().size());
LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage());
}
}
// Verify jobName according to service requirements, truncating converting to lowercase if
// necessary.
String jobName = dataflowOptions.getJobName().toLowerCase();
checkArgument(
jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"),
"JobName invalid; the name must consist of only the characters "
+ "[-a-z0-9], starting with a letter and ending with a letter "
+ "or number");
if (!jobName.equals(dataflowOptions.getJobName())) {
LOG.info(
"PipelineOptions.jobName did not match the service requirements. "
+ "Using {} instead of {}.",
jobName,
dataflowOptions.getJobName());
}
dataflowOptions.setJobName(jobName);
// Verify project
String project = dataflowOptions.getProject();
if (project.matches("[0-9]*")) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project number.");
} else if (!project.matches(PROJECT_ID_REGEXP)) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project"
+ " description.");
}
DataflowPipelineDebugOptions debugOptions =
dataflowOptions.as(DataflowPipelineDebugOptions.class);
// Verify the number of worker threads is a valid value
if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) {
throw new IllegalArgumentException(
"Number of worker harness threads '"
+ debugOptions.getNumberOfWorkerHarnessThreads()
+ "' invalid. Please make sure the value is non-negative.");
}
// Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11
if (dataflowOptions.getRecordJfrOnGcThrashing()
&& Environments.getJavaVersion() == Environments.JavaVersion.java8) {
throw new IllegalArgumentException(
"recordJfrOnGcThrashing is only supported on java 9 and up.");
}
if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) {
dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT);
}
// Adding the Java version to the SDK name for user's and support convenience.
String agentJavaVer = "(JRE 8 environment)";
if (Environments.getJavaVersion() != Environments.JavaVersion.java8) {
agentJavaVer =
String.format("(JRE %s environment)", Environments.getJavaVersion().specification());
}
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String userAgentName = dataflowRunnerInfo.getName();
Preconditions.checkArgument(
!userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty.");
String userAgentVersion = dataflowRunnerInfo.getVersion();
Preconditions.checkArgument(
!userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty.");
String userAgent =
String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_");
dataflowOptions.setUserAgent(userAgent);
return new DataflowRunner(dataflowOptions);
} | @Test
public void testNonExistentProfileLocation() throws IOException {
DataflowPipelineOptions options = buildPipelineOptions();
options.setSaveProfilesToGcs(NON_EXISTENT_BUCKET);
thrown.expect(RuntimeException.class);
thrown.expectCause(instanceOf(FileNotFoundException.class));
thrown.expectMessage(
containsString("Output path does not exist or is not writeable: " + NON_EXISTENT_BUCKET));
DataflowRunner.fromOptions(options);
ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class);
Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture());
assertValidJob(jobCaptor.getValue());
} |
@ScalarOperator(SUBTRACT)
@SqlType(StandardTypes.INTEGER)
public static long subtract(@SqlType(StandardTypes.INTEGER) long left, @SqlType(StandardTypes.INTEGER) long right)
{
try {
return Math.subtractExact((int) left, (int) right);
}
catch (ArithmeticException e) {
throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("integer subtraction overflow: %s - %s", left, right), e);
}
} | @Test
public void testSubtract()
{
assertFunction("INTEGER'37' - INTEGER'37'", INTEGER, 0);
assertFunction("INTEGER'37' - INTEGER'17'", INTEGER, 37 - 17);
assertFunction("INTEGER'17' - INTEGER'37'", INTEGER, 17 - 37);
assertFunction("INTEGER'17' - INTEGER'17'", INTEGER, 0);
assertNumericOverflow(format("INTEGER'%s' - INTEGER'1'", Integer.MIN_VALUE), "integer subtraction overflow: -2147483648 - 1");
} |
@Override
protected ActivityState<TransportProtos.SessionInfoProto> updateState(UUID sessionId, ActivityState<TransportProtos.SessionInfoProto> state) {
SessionMetaData session = sessions.get(sessionId);
if (session == null) {
return null;
}
state.setMetadata(session.getSessionInfo());
var sessionInfo = state.getMetadata();
if (sessionInfo.getGwSessionIdMSB() == 0L || sessionInfo.getGwSessionIdLSB() == 0L) {
return state;
}
var gwSessionId = new UUID(sessionInfo.getGwSessionIdMSB(), sessionInfo.getGwSessionIdLSB());
SessionMetaData gwSession = sessions.get(gwSessionId);
if (gwSession == null || !gwSession.isOverwriteActivityTime()) {
return state;
}
long lastRecordedTime = state.getLastRecordedTime();
long gwLastRecordedTime = getLastRecordedTime(gwSessionId);
log.debug("Session with id: [{}] has gateway session with id: [{}] with overwrite activity time enabled. " +
"Updating last activity time. Session last recorded time: [{}], gateway session last recorded time: [{}].",
sessionId, gwSessionId, lastRecordedTime, gwLastRecordedTime);
state.setLastRecordedTime(Math.max(lastRecordedTime, gwLastRecordedTime));
return state;
} | @Test
void givenNoGwSessionId_whenUpdatingActivityState_thenShouldReturnSameInstanceWithUpdatedSessionInfo() {
// GIVEN
TransportProtos.SessionInfoProto sessionInfo = TransportProtos.SessionInfoProto.newBuilder()
.setSessionIdMSB(SESSION_ID.getMostSignificantBits())
.setSessionIdLSB(SESSION_ID.getLeastSignificantBits())
.build();
SessionMsgListener listenerMock = mock(SessionMsgListener.class);
sessions.put(SESSION_ID, new SessionMetaData(sessionInfo, TransportProtos.SessionType.ASYNC, listenerMock));
long lastRecordedTime = 123L;
ActivityState<TransportProtos.SessionInfoProto> state = new ActivityState<>();
state.setLastRecordedTime(lastRecordedTime);
state.setMetadata(TransportProtos.SessionInfoProto.getDefaultInstance());
when(transportServiceMock.updateState(SESSION_ID, state)).thenCallRealMethod();
// WHEN
ActivityState<TransportProtos.SessionInfoProto> updatedState = transportServiceMock.updateState(SESSION_ID, state);
// THEN
assertThat(updatedState).isSameAs(state);
assertThat(updatedState.getLastRecordedTime()).isEqualTo(lastRecordedTime);
assertThat(updatedState.getMetadata()).isEqualTo(sessionInfo);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.