focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
@NotNull
public List<PartitionStatistics> select(Collection<PartitionStatistics> statistics, Set<Long> excludeTables) {
long now = System.currentTimeMillis();
return statistics.stream()
.filter(p -> p.getNextCompactionTime() <= now)
.filter(p -> !excludeTables.contains(p.getPartition().getTableId()))
.filter(p -> isReadyForCompaction(p, now))
.collect(Collectors.toList());
} | @Test
public void testEmpty() {
List<PartitionStatistics> statisticsList = new ArrayList<>();
Assert.assertEquals(0, selector.select(statisticsList, new HashSet<Long>()).size());
} |
@Override
public void publish(ScannerReportWriter writer) {
List<Map.Entry<String, String>> properties = new ArrayList<>(cache.getAll().entrySet());
properties.add(constructScmInfo());
properties.add(constructCiInfo());
// properties that are automatically included to report so that
// they can be included to webhook payloads
properties.addAll(config.getProperties().entrySet()
.stream()
.filter(e -> e.getKey().startsWith(CorePropertyDefinitions.SONAR_ANALYSIS))
.toList());
writer.writeContextProperties(properties
.stream()
.map(e -> ScannerReport.ContextProperty.newBuilder()
.setKey(e.getKey())
.setValue(e.getValue())
.build())
.toList());
} | @Test
public void publish_writes_properties_to_report() {
cache.put("foo1", "bar1");
cache.put("foo2", "bar2");
underTest.publish(writer);
List<ScannerReport.ContextProperty> expected = Arrays.asList(
newContextProperty("foo1", "bar1"),
newContextProperty("foo2", "bar2"),
newContextProperty("sonar.analysis.detectedscm", "undetected"),
newContextProperty("sonar.analysis.detectedci", "undetected"));
expectWritten(expected);
} |
@Override
public boolean hasConflict(ConcurrentOperation thisOperation, ConcurrentOperation otherOperation) {
// TODO : UUID's can clash even for insert/insert, handle that case.
Set<String> partitionBucketIdSetForFirstInstant = thisOperation
.getMutatedPartitionAndFileIds()
.stream()
.map(partitionAndFileId ->
BucketIdentifier.partitionBucketIdStr(partitionAndFileId.getLeft(), BucketIdentifier.bucketIdFromFileId(partitionAndFileId.getRight()))
).collect(Collectors.toSet());
Set<String> partitionBucketIdSetForSecondInstant = otherOperation
.getMutatedPartitionAndFileIds()
.stream()
.map(partitionAndFileId ->
BucketIdentifier.partitionBucketIdStr(partitionAndFileId.getLeft(), BucketIdentifier.bucketIdFromFileId(partitionAndFileId.getRight()))
).collect(Collectors.toSet());
Set<String> intersection = new HashSet<>(partitionBucketIdSetForFirstInstant);
intersection.retainAll(partitionBucketIdSetForSecondInstant);
if (!intersection.isEmpty()) {
LOG.info("Found conflicting writes between first operation = " + thisOperation
+ ", second operation = " + otherOperation + " , intersecting bucket ids " + intersection);
return true;
}
return false;
} | @Test
public void testConcurrentWritesWithDifferentPartition() throws Exception {
createCommit(metaClient.createNewInstantTime());
HoodieActiveTimeline timeline = metaClient.getActiveTimeline();
// consider commits before this are all successful
Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant();
// writer 1 starts
String currentWriterInstant = metaClient.createNewInstantTime();
createInflightCommit(currentWriterInstant, HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH);
// writer 2 starts and finishes
String newInstantTime = metaClient.createNewInstantTime();
createCommit(newInstantTime);
Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant));
SimpleConcurrentFileWritesConflictResolutionStrategy strategy = new BucketIndexConcurrentFileWritesConflictResolutionStrategy();
HoodieCommitMetadata currentMetadata = createCommitMetadata(currentWriterInstant, HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH);
metaClient.reloadActiveTimeline();
List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect(
Collectors.toList());
// there should be 1 candidate instant
Assertions.assertEquals(1, candidateInstants.size());
ConcurrentOperation thatCommitOperation = new ConcurrentOperation(candidateInstants.get(0), metaClient);
ConcurrentOperation thisCommitOperation = new ConcurrentOperation(currentInstant.get(), currentMetadata);
// there should be no conflict between writer 1 and writer 2
Assertions.assertFalse(strategy.hasConflict(thisCommitOperation, thatCommitOperation));
} |
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
} | @Test
public void testUpdateNonExistentPipeline() throws IOException {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Could not find running job named badjobname");
DataflowPipelineOptions options = buildPipelineOptions();
options.setUpdate(true);
options.setJobName("badJobName");
Pipeline p = buildDataflowPipeline(options);
p.run();
} |
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
} | @Test
void beansWithMethodsAnnotatedWithRecurringAnnotationNoCronOrIntervalWillThrowException() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN & THEN
assertThatThrownBy(() -> recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringJobWithoutCronAndInterval(), "not important"))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public boolean databaseExists(SnowflakeIdentifier database) {
Preconditions.checkArgument(
database.type() == SnowflakeIdentifier.Type.DATABASE,
"databaseExists requires a DATABASE identifier, got '%s'",
database);
final String finalQuery = "SHOW SCHEMAS IN DATABASE IDENTIFIER(?) LIMIT 1";
List<SnowflakeIdentifier> schemas;
try {
schemas =
connectionPool.run(
conn ->
queryHarness.query(
conn, finalQuery, SCHEMA_RESULT_SET_HANDLER, database.databaseName()));
} catch (SQLException e) {
if (DATABASE_NOT_FOUND_ERROR_CODES.contains(e.getErrorCode())) {
return false;
}
throw new UncheckedSQLException(e, "Failed to check if database '%s' exists", database);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while checking if database '%s' exists", database);
}
return !schemas.isEmpty();
} | @Test
public void testDatabaseDoesntExist() throws SQLException {
when(mockResultSet.next())
.thenThrow(new SQLException("Database does not exist", "2000", 2003, null))
.thenThrow(
new SQLException(
"Database does not exist, or operation cannot be performed", "2000", 2043, null))
.thenThrow(
new SQLException("Database does not exist or not authorized", "2000", 2001, null));
// Error code 2003
assertThat(snowflakeClient.databaseExists(SnowflakeIdentifier.ofDatabase("DB_1"))).isFalse();
// Error code 2043
assertThat(snowflakeClient.databaseExists(SnowflakeIdentifier.ofDatabase("DB_1"))).isFalse();
// Error code 2001
assertThat(snowflakeClient.databaseExists(SnowflakeIdentifier.ofDatabase("DB_1"))).isFalse();
} |
public static BigDecimal ensureFit(final BigDecimal value, final Schema schema) {
return ensureFit(value, precision(schema), scale(schema));
} | @Test
public void shouldFailFitIfTruncationNecessary() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> ensureFit(new BigDecimal("1.23"), DECIMAL_SCHEMA)
);
// Then:
assertThat(e.getMessage(), containsString("Cannot fit decimal '1.23' into DECIMAL(2, 1) without rounding."));
} |
public TimelineWriteResponse putEntities(TimelineEntities entities,
UserGroupInformation callerUgi) throws IOException {
LOG.debug("putEntities(entities={}, callerUgi={})", entities, callerUgi);
TimelineWriteResponse response = null;
try {
boolean isStorageUp = checkRetryWithSleep();
if (isStorageUp) {
// synchronize on the writer object so that no other threads can
// flush the writer buffer concurrently and swallow any exception
// caused by the timeline enitites that are being put here.
synchronized (writer) {
response = writeTimelineEntities(entities, callerUgi);
flushBufferedTimelineEntities();
}
} else {
String msg = String.format("Failed to putEntities(" +
"entities=%s, callerUgi=%s) as Timeline Storage is Down",
entities, callerUgi);
throw new IOException(msg);
}
} catch (InterruptedException ex) {
String msg = String.format("Interrupted while retrying to putEntities(" +
"entities=%s, callerUgi=%s)", entities, callerUgi);
throw new IOException(msg);
}
return response;
} | @Test
void testPutEntityWithStorageDown() throws IOException {
TimelineWriter writer = mock(TimelineWriter.class);
TimelineHealth timelineHealth = new TimelineHealth(TimelineHealth.
TimelineHealthStatus.CONNECTION_FAILURE, "");
when(writer.getHealthStatus()).thenReturn(timelineHealth);
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 5);
conf.setLong(YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
500L);
TimelineCollector collector = new TimelineCollectorForTest(writer);
collector.init(conf);
TimelineEntities entities = generateTestEntities(1, 1);
boolean exceptionCaught = false;
try {
collector.putEntities(entities, UserGroupInformation.
createRemoteUser("test-user"));
} catch (Exception e) {
if (e.getMessage().contains("Failed to putEntities")) {
exceptionCaught = true;
}
}
assertTrue(exceptionCaught, "TimelineCollector putEntity failed to " +
"handle storage down");
} |
@Operation(summary = "countQueueState", description = "COUNT_QUEUE_STATE_NOTES")
@GetMapping(value = "/queue-count")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUEUE_COUNT_ERROR)
public Result<Map<String, Integer>> countQueueState(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
Map<String, Integer> stringIntegerMap = dataAnalysisService.countQueueState(loginUser);
return Result.success(stringIntegerMap);
} | @Test
public void testCountQueueState() throws Exception {
MvcResult mvcResult = mockMvc.perform(get("/projects/analysis/queue-count")
.header("sessionId", sessionId))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
assertThat(result.getCode().intValue()).isEqualTo(Status.SUCCESS.getCode());
logger.info(mvcResult.getResponse().getContentAsString());
} |
public static ObjectInputDecoder createDecoder(Type type, TypeManager typeManager)
{
String base = type.getTypeSignature().getBase();
switch (base) {
case UnknownType.NAME:
return o -> o;
case BIGINT:
return o -> (Long) o;
case INTEGER:
return o -> ((Long) o).intValue();
case SMALLINT:
return o -> ((Long) o).shortValue();
case TINYINT:
return o -> ((Long) o).byteValue();
case BOOLEAN:
return o -> (Boolean) o;
case DATE:
return DateTimeUtils::createDate;
case DECIMAL:
if (Decimals.isShortDecimal(type)) {
final int scale = ((DecimalType) type).getScale();
return o -> HiveDecimal.create(BigInteger.valueOf((long) o), scale);
}
else if (Decimals.isLongDecimal(type)) {
final int scale = ((DecimalType) type).getScale();
return o -> HiveDecimal.create(Decimals.decodeUnscaledValue((Slice) o), scale);
}
break;
case REAL:
return o -> intBitsToFloat(((Number) o).intValue());
case DOUBLE:
return o -> ((Double) o);
case TIMESTAMP:
return o -> new Timestamp(((long) o));
case VARBINARY:
return o -> ((Slice) o).getBytes();
case VARCHAR:
return o -> ((Slice) o).toStringUtf8();
case CHAR:
return o -> ((Slice) o).toStringUtf8();
case ROW:
return RowObjectInputDecoder.create(((RowType) type), typeManager);
case ARRAY:
return ArrayObjectInputDecoder.create(((ArrayType) type), typeManager);
case MAP:
return MapObjectInputDecoder.create(((MapType) type), typeManager);
}
throw unsupportedType(type);
} | @Test
public void testDecimalObjectDecoders()
{
ObjectInputDecoder decoder;
// short decimal
decoder = createDecoder(createDecimalType(11, 10), typeManager);
assertTrue(decoder.decode(decimal("1.2345678910")) instanceof HiveDecimal);
// long decimal
decoder = createDecoder(createDecimalType(34, 33), typeManager);
assertTrue(decoder.decode(decimal("1.281734081274028174012432412423134")) instanceof HiveDecimal);
} |
public static int checkPositiveOrZero(int n, String name)
{
if (n < 0)
{
throw new IllegalArgumentException(name + ": " + n + " (expected: >= 0)");
}
return n;
} | @Test(expected = IllegalArgumentException.class)
public void checkPositiveOrZeroMustFailIfArgumentIsNegative()
{
RangeUtil.checkPositiveOrZero(-1, "var");
} |
@Override
public void storeRowInCache( DatabaseLookupMeta meta, RowMetaInterface lookupMeta, Object[] lookupRow,
Object[] add ) {
RowMetaAndData rowMetaAndData = new RowMetaAndData( lookupMeta, lookupRow );
if ( !map.containsKey( rowMetaAndData ) ) {
map.put( rowMetaAndData, add );
}
// DEinspanjer 2009-02-01: If you had previously set a cache size and then turned on load all, this
// method would throw out entries if the previous cache size wasn't big enough.
if ( !meta.isLoadingAllDataInCache() && meta.getCacheSize() > 0 && map.size() > meta.getCacheSize() ) {
map.remove( map.entrySet().iterator().next().getKey() );
}
} | @Test
public void storeRowInCacheTest() throws Exception {
DatabaseLookupData databaseLookupData = mock( DatabaseLookupData.class );
DatabaseLookupMeta databaseLookupMeta = mock( DatabaseLookupMeta.class );
DefaultCache defaultCache = new DefaultCache( databaseLookupData, 10 );
when( databaseLookupMeta.isLoadingAllDataInCache() ).thenReturn( true );
RowMeta rowMeta = new RowMeta();
//Several inserts with different key and different values
for ( int i = 1; i <= 10; ++i ) {
defaultCache.storeRowInCache( databaseLookupMeta, rowMeta, new Object[]{i}, new Object[]{ i * 100 } );
}
LinkedHashMap<RowMetaAndData, Object[]> map = (LinkedHashMap<RowMetaAndData, Object[]>) getInternalState( defaultCache, "map" );
//All inserted entries expected
assertEquals( 10, map.size() );
} |
@Override
public int read() throws IOException {
checkAndUseNewPos();
pos += 1;
readBytes.increment();
readOperations.increment();
return internalStream.read();
} | @Test
public void testReadOneByte() throws IOException {
String objectName = rule.randomObjectName();
rule.client()
.putObject(new PutObjectRequest(rule.bucket(), objectName, "0123456789".getBytes()));
try (EcsSeekableInputStream input =
new EcsSeekableInputStream(
rule.client(), new EcsURI(rule.bucket(), objectName), MetricsContext.nullMetrics())) {
assertThat(input.read()).as("The first byte should be 0 ").isEqualTo('0');
}
} |
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
} | @Test
public void testIfInJoinClause()
{
analyze("SELECT * FROM (VALUES (1)) a (x) JOIN (VALUES (2)) b ON IF(a.x = 1, true, false)");
} |
@Override
public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException {
Collection<TableMetaData> tableMetaDataList = new LinkedList<>();
try (Connection connection = material.getDataSource().getConnection()) {
Map<String, Collection<ColumnMetaData>> columnMetaDataMap = loadColumnMetaDataMap(connection, material.getActualTableNames());
Map<String, Collection<IndexMetaData>> indexMetaDataMap = columnMetaDataMap.isEmpty() ? Collections.emptyMap() : loadIndexMetaData(connection, columnMetaDataMap.keySet());
for (Entry<String, Collection<ColumnMetaData>> entry : columnMetaDataMap.entrySet()) {
Collection<IndexMetaData> indexMetaDataList = indexMetaDataMap.getOrDefault(entry.getKey(), Collections.emptyList());
tableMetaDataList.add(new TableMetaData(entry.getKey(), entry.getValue(), indexMetaDataList, Collections.emptyList()));
}
}
return Collections.singleton(new SchemaMetaData(material.getDefaultSchemaName(), tableMetaDataList));
} | @Test
void assertLoadWithTables() throws SQLException {
DataSource dataSource = mockDataSource();
ResultSet resultSet = mockTableMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(
"SELECT TABLE_CATALOG, TABLE_NAME, COLUMN_NAME, DATA_TYPE, ORDINAL_POSITION, COALESCE(IS_VISIBLE, FALSE) IS_VISIBLE, IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS"
+ " WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? AND UPPER(TABLE_NAME) IN ('TBL') ORDER BY ORDINAL_POSITION")
.executeQuery()).thenReturn(resultSet);
ResultSet indexResultSet = mockIndexMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(
"SELECT TABLE_CATALOG, TABLE_NAME, INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? AND UPPER(TABLE_NAME) IN ('TBL')")
.executeQuery()).thenReturn(indexResultSet);
ResultSet primaryKeys = mockPrimaryKeysMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(
"SELECT TABLE_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? AND INDEX_TYPE_NAME = 'PRIMARY KEY' AND UPPER(TABLE_NAME) IN ('TBL')")
.executeQuery()).thenReturn(primaryKeys);
ResultSet generatedInfo = mockGeneratedInfoResultSet();
when(dataSource.getConnection().prepareStatement(
"SELECT C.TABLE_NAME TABLE_NAME, C.COLUMN_NAME COLUMN_NAME, COALESCE(I.IS_GENERATED, FALSE) IS_GENERATED FROM INFORMATION_SCHEMA.COLUMNS C"
+ " RIGHT JOIN INFORMATION_SCHEMA.INDEXES I ON C.TABLE_NAME=I.TABLE_NAME WHERE C.TABLE_CATALOG=? AND C.TABLE_SCHEMA=? AND C.TABLE_NAME IN ('tbl')")
.executeQuery()).thenReturn(generatedInfo);
assertTableMetaDataMap(getDialectTableMetaDataLoader().load(new MetaDataLoaderMaterial(Collections.singletonList("tbl"), dataSource, new H2DatabaseType(), "sharding_db")));
} |
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
} | @Test
public void testLiteralWithoutIndexingWithEmptyName() throws Http2Exception {
decode("000005" + hex("value"));
verify(mockHeaders, times(1)).add(EMPTY_STRING, of("value"));
} |
@Override
public void validate() {
if (pathPrefix == null || !pathPrefix.matches("\\w+")) {
throw new IllegalArgumentException("Path is incorrect");
}
} | @Test(expected = IllegalArgumentException.class)
public void shouldValidateStartingSlash() {
new RestRouteSource("/test").validate();
} |
public static boolean isCoreRequest(HeaderMap headers) {
return headers.contains(ORIGIN)
|| headers.contains(ACCESS_CONTROL_REQUEST_HEADERS)
|| headers.contains(ACCESS_CONTROL_REQUEST_METHOD);
} | @Test
public void testIsCoreRequest() {
HeaderMap headers = new HeaderMap();
assertThat(CorsUtil.isCoreRequest(headers), is(false));
headers = new HeaderMap();
headers.add(ORIGIN, "");
assertThat(CorsUtil.isCoreRequest(headers), is(true));
headers = new HeaderMap();
headers.add(ACCESS_CONTROL_REQUEST_HEADERS, "");
assertThat(CorsUtil.isCoreRequest(headers), is(true));
headers = new HeaderMap();
headers.add(ACCESS_CONTROL_REQUEST_METHOD, "");
assertThat(CorsUtil.isCoreRequest(headers), is(true));
} |
public static <T> Values<T> of(Iterable<T> elems) {
return new Values<>(elems, Optional.absent(), Optional.absent(), false);
} | @Test
@Category(NeedsRunner.class)
public void testCreateWithUnserializableElements() throws Exception {
List<UnserializableRecord> elements =
ImmutableList.of(
new UnserializableRecord("foo"),
new UnserializableRecord("bar"),
new UnserializableRecord("baz"));
Create.Values<UnserializableRecord> create =
Create.of(elements).withCoder(new UnserializableRecord.UnserializableRecordCoder());
PAssert.that(p.apply(create))
.containsInAnyOrder(
new UnserializableRecord("foo"),
new UnserializableRecord("bar"),
new UnserializableRecord("baz"));
p.run();
} |
public static Optional<String> getDatabaseNameByDatabasePath(final String databasePath) {
Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)?", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(databasePath);
return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty();
} | @Test
void assertGetDatabaseNameByDatabasePathHappyPath() {
assertThat(ShardingSphereDataNode.getDatabaseNameByDatabasePath("/statistics/databases/db_name"), is(Optional.of("db_name")));
} |
public static String generateDatabaseId(String baseString) {
checkArgument(baseString.length() != 0, "baseString cannot be empty!");
String databaseId =
generateResourceId(
baseString,
ILLEGAL_DATABASE_CHARS,
REPLACE_DATABASE_CHAR,
MAX_DATABASE_ID_LENGTH,
DATABASE_TIME_FORMAT);
// replace hyphen with underscore, so there's no need for backticks
String trimmed = CharMatcher.is('_').trimTrailingFrom(databaseId);
checkArgument(
trimmed.length() > 0,
"Database id is empty after removing illegal characters and trailing underscores");
// if first char is not a letter, replace with a padding letter, so it doesn't
// violate spanner's database naming rules
char padding = generatePadding();
if (!Character.isLetter(trimmed.charAt(0))) {
trimmed = padding + trimmed.substring(1);
}
return trimmed;
} | @Test
public void testGenerateDatabaseIdShouldReplaceUpperCaseLettersWithLowerCase() {
String testBaseString = "TDa";
String actual = generateDatabaseId(testBaseString);
assertThat(actual).matches("tda_\\d{8}_\\d{6}_\\d{6}");
} |
@SuppressWarnings("unchecked")
public <T extends Expression> T rewrite(final T expression, final C context) {
return (T) rewriter.process(expression, context);
} | @Test
public void shouldRewriteComparisonExpression() {
// Given:
final ComparisonExpression parsed = parseExpression("1 < 2");
when(processor.apply(parsed.getLeft(), context)).thenReturn(expr1);
when(processor.apply(parsed.getRight(), context)).thenReturn(expr2);
// When:
final Expression rewritten = expressionRewriter.rewrite(parsed, context);
// Then:
assertThat(
rewritten,
equalTo(new ComparisonExpression(parsed.getLocation(), parsed.getType(), expr1, expr2))
);
} |
@Override
public GcsResourceId resolve(String other, ResolveOptions resolveOptions) {
checkState(
isDirectory(),
String.format("Expected the gcsPath is a directory, but had [%s].", gcsPath));
checkArgument(
resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE)
|| resolveOptions.equals(StandardResolveOptions.RESOLVE_DIRECTORY),
String.format("ResolveOptions: [%s] is not supported.", resolveOptions));
if (resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE)) {
checkArgument(
!other.endsWith("/"), "The resolved file: [%s] should not end with '/'.", other);
return fromGcsPath(gcsPath.resolve(other));
} else {
// StandardResolveOptions.RESOLVE_DIRECTORY
if (other.endsWith("/")) {
// other already contains the delimiter for gcs.
// It is not recommended for callers to set the delimiter.
// However, we consider it as a valid input.
return fromGcsPath(gcsPath.resolve(other));
} else {
return fromGcsPath(gcsPath.resolve(other + "/"));
}
}
} | @Test
public void testResolveInvalidInputs() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("The resolved file: [tmp/] should not end with '/'.");
toResourceIdentifier("gs://my_bucket/").resolve("tmp/", StandardResolveOptions.RESOLVE_FILE);
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object firstExpected,
@Nullable Object secondExpected,
@Nullable Object @Nullable ... restOfExpected) {
return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected));
} | @Test
public void iterableContainsAtLeastInOrderWithFailure() {
expectFailureWhenTestingThat(asList(1, null, 3)).containsAtLeast(null, 1, 3).inOrder();
assertFailureKeys(
"required elements were all found, but order was wrong",
"expected order for required elements",
"but was");
assertFailureValue("expected order for required elements", "[null, 1, 3]");
assertFailureValue("but was", "[1, null, 3]");
} |
public List<InterfaceIpAddress> prefixes() {
if (!object.has(PREFIXES)) {
return null;
}
List<InterfaceIpAddress> ips = Lists.newArrayList();
ArrayNode prefixes = (ArrayNode) object.path(PREFIXES);
prefixes.forEach(i -> ips.add(InterfaceIpAddress.valueOf(i.asText())));
return ips;
} | @Test
public void testPrefixes() throws Exception {
assertThat(config.prefixes(), is(prefixes));
} |
@Override
public List<Application> getAllHandlers(final String scheme) {
final List<Application> applications = new ArrayList<>();
final NSArray urls = workspace.URLsForApplicationsToOpenURL(NSURL.URLWithString(String.format("%s:/", scheme)));
NSEnumerator enumerator = urls.objectEnumerator();
NSObject next;
while((next = enumerator.nextObject()) != null) {
final NSURL url = Rococoa.cast(next, NSURL.class);
final NSBundle bundle = NSBundle.bundleWithPath(url.path());
if(null == bundle) {
log.warn(String.format("Failure loading bundle for path %s", url.path()));
continue;
}
if(null == bundle.bundleIdentifier()) {
log.warn(String.format("Missing CFBundleIdentifier for bundle at path %s", url.path()));
continue;
}
final Application application = finder.getDescription(bundle.bundleIdentifier());
if(finder.isInstalled(application)) {
applications.add(application);
}
}
return applications;
} | @Test
public void testGetAllHandlers() {
assumeTrue(Factory.Platform.osversion.matches("12\\..*"));
final List<Application> list = new WorkspaceSchemeHandler(new LaunchServicesApplicationFinder()).getAllHandlers("http:/");
assertFalse(list.isEmpty());
for(Application application : list) {
assertNotNull(application.getIdentifier());
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UserAccessToken that = (UserAccessToken) o;
return Objects.equals(token, that.token);
} | @Test
void equals_whenAnotherInstanceAndDifferentToken_shouldReturnFalse() {
UserAccessToken userAccessToken1 = new UserAccessToken("token1");
UserAccessToken userAccessToken2 = new UserAccessToken("token2");
assertThat(userAccessToken1.equals(userAccessToken2)).isFalse();
} |
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.error("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
} | @Test
@Timeout(60000)
void testLaunchWithArguments() throws Exception {
ExitUtil.disableSystemExit();
ApplicationHistoryServer historyServer = null;
try {
// Not able to modify the config of this test case,
// but others have been customized to avoid conflicts
String[] args = new String[2];
args[0] = "-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
args[1] = "-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(args);
Configuration conf = historyServer.getConfig();
assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
} finally {
if (historyServer != null) {
historyServer.stop();
}
}
} |
public static LockManager defaultLockManager() {
return LOCK_MANAGER_DEFAULT;
} | @Test
public void testLoadDefaultLockManager() {
assertThat(LockManagers.defaultLockManager())
.isInstanceOf(LockManagers.InMemoryLockManager.class);
} |
@Override
public void emit(String emitKey, List<Metadata> metadataList, ParseContext parseContext) throws IOException, TikaEmitterException {
if (metadataList == null || metadataList.size() == 0) {
throw new TikaEmitterException("metadata list must not be null or of size 0");
}
try (UnsynchronizedByteArrayOutputStream bos = UnsynchronizedByteArrayOutputStream
.builder()
.get()) {
try (Writer writer = new OutputStreamWriter(bos, StandardCharsets.UTF_8)) {
JsonMetadataList.toJson(metadataList, writer);
} catch (IOException e) {
throw new TikaEmitterException("can't jsonify", e);
}
write(emitKey, new Metadata(), bos.toByteArray());
}
} | @Test
public void testBasic() throws Exception {
EmitterManager emitterManager = EmitterManager.load(getConfig("tika-config-gcs.xml"));
Emitter emitter = emitterManager.getEmitter("gcs");
List<Metadata> metadataList = new ArrayList<>();
Metadata m = new Metadata();
m.set("k1", "v1");
m.add("k1", "v2");
m.set("k2", "v3");
metadataList.add(m);
emitter.emit("something-or-other/test-out", metadataList, new ParseContext());
} |
@Nullable
public String ensureBuiltinRole(String roleName, String description, Set<String> expectedPermissions) {
Role previousRole = null;
try {
previousRole = roleService.load(roleName);
if (!previousRole.isReadOnly() || !expectedPermissions.equals(previousRole.getPermissions())) {
final String msg = "Invalid role '" + roleName + "', fixing it.";
LOG.debug(msg);
throw new IllegalArgumentException(msg); // jump to fix code
}
} catch (NotFoundException | IllegalArgumentException | NoSuchElementException ignored) {
LOG.info("{} role is missing or invalid, re-adding it as a built-in role.", roleName);
final RoleImpl fixedRole = new RoleImpl();
// copy the mongodb id over, in order to update the role instead of reading it
if (previousRole != null) {
fixedRole._id = previousRole.getId();
}
fixedRole.setReadOnly(true);
fixedRole.setName(roleName);
fixedRole.setDescription(description);
fixedRole.setPermissions(expectedPermissions);
try {
final Role savedRole = roleService.save(fixedRole);
return savedRole.getId();
} catch (DuplicateKeyException | ValidationException e) {
LOG.error("Unable to save fixed '" + roleName + "' role, please restart Graylog to fix this.", e);
}
}
if (previousRole == null) {
LOG.error("Unable to access fixed '" + roleName + "' role, please restart Graylog to fix this.");
return null;
}
return previousRole.getId();
} | @Test
public void ensureBuiltinRole() throws Exception {
final Role newRole = mock(Role.class);
when(newRole.getId()).thenReturn("new-id");
when(roleService.load("test-role")).thenThrow(NotFoundException.class);
when(roleService.save(any(Role.class))).thenReturn(newRole);
assertThat(migrationHelpers.ensureBuiltinRole("test-role", "description", ImmutableSet.of("a", "b")))
.isEqualTo("new-id");
final ArgumentCaptor<Role> roleArg = ArgumentCaptor.forClass(Role.class);
verify(roleService, times(1)).save(roleArg.capture());
assertThat(roleArg.getValue()).satisfies(role -> {
assertThat(role.getName()).describedAs("role name").isEqualTo("test-role");
assertThat(role.getDescription()).describedAs("role description").isEqualTo("description");
assertThat(role.isReadOnly()).describedAs("role is read-only").isTrue();
assertThat(role.getPermissions()).describedAs("role permissions").containsOnly("a", "b");
});
} |
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
} | @Test
void convert_to_map_of_string_to_string__throws_exception__blank_space() {
DataTable table = parse("",
"| | -90.258056 |",
"| 37.618889 | -122.375 |",
"| 47.448889 | -122.309444 |",
"| 40.639722 | -73.778889 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.convert(table, MAP_OF_STRING_TO_LIST_OF_DOUBLE));
assertThat(exception.getMessage(), is(format("" +
"Can't convert DataTable to Map<%s, %s>.\n" +
"There are more values then keys. " +
"The first header cell was left blank. " +
"You can add a value there",
typeName(String.class), LIST_OF_DOUBLE)));
} |
@Restricted(NoExternalUse.class)
public static int permissionsToMode(Set<PosixFilePermission> permissions) {
PosixFilePermission[] allPermissions = PosixFilePermission.values();
int result = 0;
for (PosixFilePermission allPermission : allPermissions) {
result <<= 1;
result |= permissions.contains(allPermission) ? 1 : 0;
}
return result;
} | @Test
public void testPermissionsToMode() {
assertEquals(0777, Util.permissionsToMode(PosixFilePermissions.fromString("rwxrwxrwx")));
assertEquals(0757, Util.permissionsToMode(PosixFilePermissions.fromString("rwxr-xrwx")));
assertEquals(0750, Util.permissionsToMode(PosixFilePermissions.fromString("rwxr-x---")));
assertEquals(0550, Util.permissionsToMode(PosixFilePermissions.fromString("r-xr-x---")));
assertEquals(0540, Util.permissionsToMode(PosixFilePermissions.fromString("r-xr-----")));
assertEquals(0140, Util.permissionsToMode(PosixFilePermissions.fromString("--xr-----")));
assertEquals(0142, Util.permissionsToMode(PosixFilePermissions.fromString("--xr---w-")));
assertEquals(0146, Util.permissionsToMode(PosixFilePermissions.fromString("--xr--rw-")));
assertEquals(0346, Util.permissionsToMode(PosixFilePermissions.fromString("-wxr--rw-")));
assertEquals(0000, Util.permissionsToMode(PosixFilePermissions.fromString("---------")));
} |
@Override
public void onThrowing(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Throwable throwable, final String pluginType) {
MetricsCollectorRegistry.<CounterMetricsCollector>get(config, pluginType).inc(getStatementType());
} | @Test
void assertWithStatement() {
StatementExecuteErrorsCountAdvice advice = new StatementExecuteErrorsCountAdvice();
advice.onThrowing(new TargetAdviceObjectFixture(), mock(TargetAdviceMethod.class), new Object[]{}, mock(IOException.class), "FIXTURE");
assertThat(MetricsCollectorRegistry.get(config, "FIXTURE").toString(), is("statement=1"));
} |
@Override
public Optional<InetAddress> getLocalInetAddress(Predicate<InetAddress> predicate) {
try {
return Collections.list(NetworkInterface.getNetworkInterfaces()).stream()
.flatMap(ni -> Collections.list(ni.getInetAddresses()).stream())
.filter(a -> a.getHostAddress() != null)
.filter(predicate)
.findFirst();
} catch (SocketException e) {
LOG.trace("getLocalInetAddress(Predicate<InetAddress>) failed", e);
throw new IllegalStateException("Can not retrieve network interfaces", e);
}
} | @Test
public void getLocalInetAddress_returns_empty_if_no_local_addresses_match() {
Optional<InetAddress> address = underTest.getLocalInetAddress(a -> false);
assertThat(address).isEmpty();
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
if(containerService.isContainer(folder)) {
final S3BucketCreateService service = new S3BucketCreateService(session);
service.create(folder, StringUtils.isBlank(status.getRegion()) ?
new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion());
return folder;
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
return new S3TouchFeature(session, acl).withWriter(writer).touch(folder
.withType(type), status
// Add placeholder object
.withMime(MIMETYPE)
.withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status)));
}
} | @Test
public void testDirectoryDeleteWithVersioning() throws Exception {
final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path parent = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(parent,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertNotNull(test.attributes().getVersionId());
assertTrue(test.isPlaceholder());
// Only placeholder is found in list output with no version id set
final Path placeholder = new S3ListService(session, acl).list(parent, new DisabledListProgressListener()).find(new SimplePathPredicate(test));
assertTrue(placeholder.isPlaceholder());
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
// This will only cause a delete marker being added
new S3DefaultDeleteFeature(session).delete(Arrays.asList(new Path(test).withAttributes(PathAttributes.EMPTY), parent), new DisabledLoginCallback(), new Delete.DisabledCallback());
// Specific version is still found
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
assertFalse(new S3FindFeature(session, acl).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
assertFalse(new DefaultFindFeature(session).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
} |
public JsonGetterContext getContext(String queryPath) {
JsonGetterContext context = internalCache.get(queryPath);
if (context != null) {
return context;
}
context = new JsonGetterContext(queryPath);
JsonGetterContext previousContextValue = internalCache.putIfAbsent(queryPath, context);
if (previousContextValue == null) {
cleanupIfNecessary(context);
return context;
} else {
return previousContextValue;
}
} | @Test
public void testGetReturnsTheSameObject() {
JsonGetterContextCache cache = new JsonGetterContextCache(4, 2);
JsonGetterContext contextA = cache.getContext("a");
JsonGetterContext contextB = cache.getContext("b");
JsonGetterContext contextC = cache.getContext("c");
JsonGetterContext contextD = cache.getContext("d");
assertSame(contextA, cache.getContext("a"));
assertSame(contextB, cache.getContext("b"));
assertSame(contextC, cache.getContext("c"));
assertSame(contextD, cache.getContext("d"));
} |
@Override
public Map<String, Object> encode(Object object) throws EncodeException {
if (object == null) {
return Collections.emptyMap();
}
try {
ObjectParamMetadata metadata = getMetadata(object.getClass());
Map<String, Object> propertyNameToValue = new HashMap<String, Object>();
for (PropertyDescriptor pd : metadata.objectProperties) {
Method method = pd.getReadMethod();
Object value = method.invoke(object);
if (value != null && value != object) {
Param alias = method.getAnnotation(Param.class);
String name = alias != null ? alias.value() : pd.getName();
propertyNameToValue.put(name, value);
}
}
return propertyNameToValue;
} catch (IllegalAccessException | IntrospectionException | InvocationTargetException e) {
throw new EncodeException("Failure encoding object into query map", e);
}
} | @Test
void defaultEncoder_normalClassWithValues() {
Map<String, Object> expected = new HashMap<>();
expected.put("foo", "fooz");
expected.put("bar", "barz");
expected.put("fooAppendBar", "foozbarz");
NormalObject normalObject = new NormalObject("fooz", "barz");
Map<String, Object> encodedMap = encoder.encode(normalObject);
assertThat(encodedMap).as("Unexpected encoded query map").isEqualTo(expected);
} |
public static Instant fromMillisOrIso8601(String time, String fieldName) {
try {
return Instant.ofEpochMilli(Long.parseLong(time));
} catch (NumberFormatException nfe) {
// TODO: copied from PluginConfigurationProcessor, find a way to share better
try {
DateTimeFormatter formatter =
new DateTimeFormatterBuilder()
.append(DateTimeFormatter.ISO_DATE_TIME)
.optionalStart()
.appendOffset("+HHmm", "+0000")
.optionalEnd()
.toFormatter();
return formatter.parse(time, Instant::from);
} catch (DateTimeParseException dtpe) {
throw new IllegalArgumentException(
fieldName
+ " must be a number of milliseconds since epoch or an ISO 8601 formatted date");
}
}
} | @Test
public void testFromMillisOrIso8601_failed() {
try {
Instants.fromMillisOrIso8601("bad-time", "testFieldName");
Assert.fail();
} catch (IllegalArgumentException iae) {
Assert.assertEquals(
"testFieldName must be a number of milliseconds since epoch or an ISO 8601 formatted date",
iae.getMessage());
}
} |
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) {
switch (trigger.getTriggerCase()) {
case AFTER_ALL:
return AfterAllStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList()));
case AFTER_ANY:
return AfterFirstStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList()));
case AFTER_END_OF_WINDOW:
return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow());
case ELEMENT_COUNT:
return AfterPaneStateMachine.elementCountAtLeast(
trigger.getElementCount().getElementCount());
case AFTER_SYNCHRONIZED_PROCESSING_TIME:
return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement();
case DEFAULT:
return DefaultTriggerStateMachine.of();
case NEVER:
return NeverStateMachine.ever();
case ALWAYS:
return ReshuffleTriggerStateMachine.create();
case OR_FINALLY:
return stateMachineForTrigger(trigger.getOrFinally().getMain())
.orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally()));
case REPEAT:
return RepeatedlyStateMachine.forever(
stateMachineForTrigger(trigger.getRepeat().getSubtrigger()));
case AFTER_EACH:
return AfterEachStateMachine.inOrder(
stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList()));
case AFTER_PROCESSING_TIME:
return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime());
case TRIGGER_NOT_SET:
throw new IllegalArgumentException(
String.format("Required field 'trigger' not set on %s", trigger));
default:
throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger));
}
} | @Test
public void testAfterWatermarkEarlyLateTranslation() {
RunnerApi.Trigger trigger =
RunnerApi.Trigger.newBuilder()
.setAfterEndOfWindow(
RunnerApi.Trigger.AfterEndOfWindow.newBuilder()
.setEarlyFirings(subtrigger1)
.setLateFirings(subtrigger2))
.build();
AfterWatermarkStateMachine.AfterWatermarkEarlyAndLate machine =
(AfterWatermarkStateMachine.AfterWatermarkEarlyAndLate)
TriggerStateMachines.stateMachineForTrigger(trigger);
assertThat(
machine,
equalTo(
AfterWatermarkStateMachine.pastEndOfWindow()
.withEarlyFirings(submachine1)
.withLateFirings(submachine2)));
} |
@Injection( name = "PARTITION_OVER_TABLES" )
public void metaSetPartitionOverTables( String value ) {
setPartitioningEnabled( "Y".equalsIgnoreCase( value ) );
} | @Test
public void metaSetPartitionOverTables() {
TableOutputMeta tableOutputMeta = new TableOutputMeta();
tableOutputMeta.metaSetPartitionOverTables( "Y" );
assertTrue( tableOutputMeta.isPartitioningEnabled() );
tableOutputMeta.metaSetPartitionOverTables( "N" );
assertFalse( tableOutputMeta.isPartitioningEnabled() );
tableOutputMeta.metaSetPartitionOverTables( "Ynot" );
assertFalse( tableOutputMeta.isPartitioningEnabled() );
} |
public Mono<Void> createConsumerAcl(KafkaCluster cluster, CreateConsumerAclDTO request) {
return adminClientService.get(cluster)
.flatMap(ac -> createAclsWithLogging(ac, createConsumerBindings(request)))
.then();
} | @Test
void createsConsumerDependantAclsWhenTopicsAndGroupsSpecifiedByPrefix() {
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
when(adminClientMock.createAcls(createdCaptor.capture()))
.thenReturn(Mono.empty());
var principal = UUID.randomUUID().toString();
var host = UUID.randomUUID().toString();
aclsService.createConsumerAcl(
CLUSTER,
new CreateConsumerAclDTO()
.principal(principal)
.host(host)
.consumerGroupsPrefix("cgPref")
.topicsPrefix("topicPref")
).block();
//Read, Describe on topics, Read on consumerGroups
Collection<AclBinding> createdBindings = createdCaptor.getValue();
assertThat(createdBindings)
.hasSize(3)
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
.contains(new AclBinding(
new ResourcePattern(ResourceType.GROUP, "cgPref", PatternType.PREFIXED),
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)));
} |
public WithoutJsonPath(JsonPath jsonPath) {
this.jsonPath = jsonPath;
} | @Test
public void shouldBeDescriptive() {
assertThat(withoutJsonPath("$.name"),
hasToString(equalTo("without json path \"$['name']\"")));
} |
public static <LeftT, RightT> ByBuilder<LeftT, RightT> of(
PCollection<LeftT> left, PCollection<RightT> right) {
return named(null).of(left, right);
} | @Test
public void testBuild_ImplicitName() {
final Pipeline pipeline = TestUtils.createTestPipeline();
final PCollection<String> left =
TestUtils.createMockDataset(pipeline, TypeDescriptors.strings());
final PCollection<String> right =
TestUtils.createMockDataset(pipeline, TypeDescriptors.strings());
final PCollection<KV<Integer, String>> joined =
Join.of(left, right)
.by(String::length, String::length)
.using(
(String l, String r, Collector<String> c) -> {
// no-op
})
.output();
final Join join = (Join) TestUtils.getProducer(joined);
assertFalse(join.getName().isPresent());
} |
public static MonitoringInfoMetricName named(String urn, Map<String, String> labels) {
return new MonitoringInfoMetricName(urn, labels);
} | @Test
public void testNullUrnThrows() {
HashMap<String, String> labels = new HashMap<String, String>();
thrown.expect(IllegalArgumentException.class);
MonitoringInfoMetricName.named(null, labels);
} |
public static Impl join(By clause) {
return new Impl(new JoinArguments(clause));
} | @Test
@Category(NeedsRunner.class)
public void testUnderspecifiedCoGroup() {
PCollection<Row> pc1 =
pipeline
.apply(
"Create1",
Create.of(Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build()))
.setRowSchema(CG_SCHEMA_1);
PCollection<Row> pc2 =
pipeline
.apply(
"Create2",
Create.of(Row.withSchema(CG_SCHEMA_2).addValues("user1", 9, "us").build()))
.setRowSchema(CG_SCHEMA_2);
PCollection<Row> pc3 =
pipeline.apply(
"Create3", Create.of(Row.withSchema(CG_SCHEMA_3).addValues("user1", 17, "us").build()));
thrown.expect(IllegalArgumentException.class);
PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3)
.apply(
"CoGroup",
CoGroup.join("pc1", By.fieldNames("user", "country"))
.join("pc2", By.fieldNames("user2", "country2")));
pipeline.run();
} |
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> backwardFetch(final Bytes key) {
return wrapped().backwardFetch(key);
} | @Test
public void shouldDelegateToUnderlyingStoreWhenBackwardFetching() {
store.backwardFetch(bytesKey);
verify(inner).backwardFetch(bytesKey);
} |
@Override
public List<String> detect(ClassLoader classLoader) {
List<File> classpathContents =
classGraph
.disableNestedJarScanning()
.addClassLoader(classLoader)
.scan(1)
.getClasspathFiles();
return classpathContents.stream().map(File::getAbsolutePath).collect(Collectors.toList());
} | @Test
public void shouldDetectJarFiles() throws Exception {
File jarFile = createTestTmpJarFile("test");
ClassLoader classLoader = new URLClassLoader(new URL[] {jarFile.toURI().toURL()});
ClasspathScanningResourcesDetector detector =
new ClasspathScanningResourcesDetector(new ClassGraph());
List<String> result = detector.detect(classLoader);
assertThat(result, hasItem(containsString(jarFile.getCanonicalPath())));
} |
public static ColumnIndex build(
PrimitiveType type,
BoundaryOrder boundaryOrder,
List<Boolean> nullPages,
List<Long> nullCounts,
List<ByteBuffer> minValues,
List<ByteBuffer> maxValues) {
return build(type, boundaryOrder, nullPages, nullCounts, minValues, maxValues, null, null);
} | @Test
public void testStaticBuildBinary() {
ColumnIndex columnIndex = ColumnIndexBuilder.build(
Types.required(BINARY).as(UTF8).named("test_binary_utf8"),
BoundaryOrder.ASCENDING,
asList(true, true, false, false, true, false, true, false),
asList(1l, 2l, 3l, 4l, 5l, 6l, 7l, 8l),
toBBList(
null,
null,
stringBinary("Beeblebrox"),
stringBinary("Dent"),
null,
stringBinary("Jeltz"),
null,
stringBinary("Slartibartfast")),
toBBList(
null,
null,
stringBinary("Dent"),
stringBinary("Dent"),
null,
stringBinary("Prefect"),
null,
stringBinary("Slartibartfast")));
assertEquals(BoundaryOrder.ASCENDING, columnIndex.getBoundaryOrder());
assertCorrectNullCounts(columnIndex, 1, 2, 3, 4, 5, 6, 7, 8);
assertCorrectNullPages(columnIndex, true, true, false, false, true, false, true, false);
assertCorrectValues(
columnIndex.getMaxValues(),
null,
null,
stringBinary("Dent"),
stringBinary("Dent"),
null,
stringBinary("Prefect"),
null,
stringBinary("Slartibartfast"));
assertCorrectValues(
columnIndex.getMinValues(),
null,
null,
stringBinary("Beeblebrox"),
stringBinary("Dent"),
null,
stringBinary("Jeltz"),
null,
stringBinary("Slartibartfast"));
} |
private String hash(String password, String salt) {
return new SimpleHash(HASH_ALGORITHM, password, salt).toString();
} | @Test
public void testHash() throws Exception {
assertThat(SHA1HashPasswordAlgorithm.hash("foobar")).isEqualTo("baae906e6bbb37ca5033600fcb4824c98b0430fb");
} |
static RLMQuotaManagerConfig fetchQuotaManagerConfig(RemoteLogManagerConfig rlmConfig) {
return new RLMQuotaManagerConfig(rlmConfig.remoteLogManagerFetchMaxBytesPerSecond(),
rlmConfig.remoteLogManagerFetchNumQuotaSamples(),
rlmConfig.remoteLogManagerFetchQuotaWindowSizeSeconds());
} | @Test
public void testFetchQuotaManagerConfig() {
Properties defaultProps = new Properties();
defaultProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect());
appendRLMConfig(defaultProps);
KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps);
RLMQuotaManagerConfig defaultConfig = RemoteLogManager.fetchQuotaManagerConfig(defaultRlmConfig.remoteLogManagerConfig());
assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND, defaultConfig.quotaBytesPerSecond());
assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM, defaultConfig.numQuotaSamples());
assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds());
Properties customProps = new Properties();
customProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect());
customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP, 100);
customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM_PROP, 31);
customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1);
appendRLMConfig(customProps);
KafkaConfig rlmConfig = KafkaConfig.fromProps(customProps);
RLMQuotaManagerConfig rlmFetchQuotaManagerConfig = RemoteLogManager.fetchQuotaManagerConfig(rlmConfig.remoteLogManagerConfig());
assertEquals(100L, rlmFetchQuotaManagerConfig.quotaBytesPerSecond());
assertEquals(31, rlmFetchQuotaManagerConfig.numQuotaSamples());
assertEquals(1, rlmFetchQuotaManagerConfig.quotaWindowSizeSeconds());
} |
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
TypeSerializerSnapshot<T> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof KryoSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
KryoSerializerSnapshot<T> oldKryoSerializerSnapshot =
(KryoSerializerSnapshot<T>) oldSerializerSnapshot;
if (snapshotData.getTypeClass() != oldKryoSerializerSnapshot.snapshotData.getTypeClass()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
return resolveSchemaCompatibility(oldKryoSerializerSnapshot);
} | @Test
void tryingToRestoreWithNonExistingClassShouldBeIncompatible() throws IOException {
TypeSerializerSnapshot<Animal> restoredSnapshot = kryoSnapshotWithMissingClass();
TypeSerializer<Animal> currentSerializer =
new KryoSerializer<>(Animal.class, new SerializerConfigImpl());
assertThat(
currentSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(restoredSnapshot))
.is(isIncompatible());
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void replaces_incompatible_intellij_idea_plugin() {
RuntimeOptions options = parser
.parse("--plugin", "org.jetbrains.plugins.cucumber.java.run.CucumberJvm3SMFormatter")
.build();
Plugins plugins = new Plugins(new PluginFactory(), options);
plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID));
assertThat(plugins.getPlugins(), not(hasItem(plugin("io.cucumber.core.plugin.PrettyPrinter"))));
} |
@Override
public DeleteTopicsResult deleteTopics(final TopicCollection topics,
final DeleteTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics.");
} | @Test
public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"),
prepareDeleteTopicsResponse(1000,
deletableTopicResult("topic1", Errors.NONE),
deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED),
deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
DeleteTopicsResult result = env.adminClient().deleteTopics(
asList("topic1", "topic2", "topic3"),
new DeleteTopicsOptions().retryOnQuotaViolation(false));
assertNull(result.topicNameValues().get("topic1").get());
ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"),
ThrottlingQuotaExceededException.class);
assertEquals(1000, e.throttleTimeMs());
TestUtils.assertFutureError(result.topicNameValues().get("topic3"), TopicExistsException.class);
// With topic IDs
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
Uuid topicId3 = Uuid.randomUuid();
env.kafkaClient().prepareResponse(
expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3),
prepareDeleteTopicsResponse(1000,
deletableTopicResultWithId(topicId1, Errors.NONE),
deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED),
deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID)));
DeleteTopicsResult resultIds = env.adminClient().deleteTopics(
TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)),
new DeleteTopicsOptions().retryOnQuotaViolation(false));
assertNull(resultIds.topicIdValues().get(topicId1).get());
e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2),
ThrottlingQuotaExceededException.class);
assertEquals(1000, e.throttleTimeMs());
TestUtils.assertFutureError(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class);
}
} |
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
return payload.readInt8();
} | @Test
void assertRead() {
when(payload.readInt8()).thenReturn(1L);
MySQLLongLongBinlogProtocolValue actual = new MySQLLongLongBinlogProtocolValue();
assertThat(actual.read(columnDef, payload), is(1L));
} |
@Override
public Set<ConfigOption<?>> requiredOptions() {
Set<ConfigOption<?>> options = new HashSet<>();
options.add(HOSTNAME);
options.add(KEYSPACE);
options.add(TABLE_NAME);
return options;
} | @Test
public void testValidation() {
// validate illegal port
try {
Map<String, String> properties = getAllOptions();
properties.put("port", "123b");
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
t, "Could not parse value '123b' for key 'port'.")
.isPresent());
}
// validate missing required
Factory factory = new VitessTableFactory();
for (ConfigOption<?> requiredOption : factory.requiredOptions()) {
Map<String, String> properties = getAllOptions();
properties.remove(requiredOption.key());
try {
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(
t,
"Missing required options are:\n\n" + requiredOption.key())
.isPresent());
}
}
// validate unsupported option
try {
Map<String, String> properties = getAllOptions();
properties.put("unknown", "abc");
createTableSource(properties);
fail("exception expected");
} catch (Throwable t) {
assertTrue(
ExceptionUtils.findThrowableWithMessage(t, "Unsupported options:\n\nunknown")
.isPresent());
}
} |
public ProtocolBuilder name(String name) {
this.name = name;
return getThis();
} | @Test
void name() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.name("name");
Assertions.assertEquals("name", builder.build().getName());
} |
@Override
public ByteBuf retainedSlice() {
throw reject();
} | @Test
void testRetainedSlice() {
ByteBuf buf = Unpooled.buffer(10);
int i = 0;
while (buf.isWritable()) {
buf.writeByte(i++);
}
ReplayingDecoderByteBuf buffer = new ReplayingDecoderByteBuf(buf);
ByteBuf slice = buffer.retainedSlice(0, 4);
assertEquals(2, slice.refCnt());
i = 0;
while (slice.isReadable()) {
assertEquals(i++, slice.readByte());
}
slice.release();
buf.release();
assertEquals(0, slice.refCnt());
assertEquals(0, buf.refCnt());
assertEquals(0, buffer.refCnt());
} |
public static Checksum create() {
return CHECKSUM_FACTORY.create();
} | @Test
public void testUpdate() {
final byte[] bytes = "Any String you want".getBytes();
final int len = bytes.length;
Checksum crc1 = Crc32C.create();
Checksum crc2 = Crc32C.create();
Checksum crc3 = Crc32C.create();
crc1.update(bytes, 0, len);
for (byte b : bytes)
crc2.update(b);
crc3.update(bytes, 0, len / 2);
crc3.update(bytes, len / 2, len - len / 2);
assertEquals(crc1.getValue(), crc2.getValue(), "Crc values should be the same");
assertEquals(crc1.getValue(), crc3.getValue(), "Crc values should be the same");
} |
public Map<String, String> getAllProperties()
{
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
return builder.put(CONCURRENT_LIFESPANS_PER_TASK, String.valueOf(getConcurrentLifespansPerTask()))
.put(ENABLE_SERIALIZED_PAGE_CHECKSUM, String.valueOf(isEnableSerializedPageChecksum()))
.put(ENABLE_VELOX_EXPRESSION_LOGGING, String.valueOf(isEnableVeloxExpressionLogging()))
.put(ENABLE_VELOX_TASK_LOGGING, String.valueOf(isEnableVeloxTaskLogging()))
.put(HTTP_SERVER_HTTP_PORT, String.valueOf(getHttpServerPort()))
.put(HTTP_SERVER_REUSE_PORT, String.valueOf(isHttpServerReusePort()))
.put(HTTP_SERVER_BIND_TO_NODE_INTERNAL_ADDRESS_ONLY_ENABLED, String.valueOf(isHttpServerBindToNodeInternalAddressOnlyEnabled()))
.put(REGISTER_TEST_FUNCTIONS, String.valueOf(isRegisterTestFunctions()))
.put(HTTP_SERVER_HTTPS_PORT, String.valueOf(getHttpsServerPort()))
.put(HTTP_SERVER_HTTPS_ENABLED, String.valueOf(isEnableHttpsCommunication()))
.put(HTTPS_CIPHERS, String.valueOf(getHttpsCiphers()))
.put(HTTPS_CERT_PATH, String.valueOf(getHttpsCertPath()))
.put(HTTPS_KEY_PATH, String.valueOf(getHttpsKeyPath()))
.put(HTTP_SERVER_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getHttpServerNumIoThreadsHwMultiplier()))
.put(EXCHANGE_HTTP_CLIENT_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getExchangeHttpClientNumIoThreadsHwMultiplier()))
.put(ASYNC_DATA_CACHE_ENABLED, String.valueOf(getAsyncDataCacheEnabled()))
.put(ASYNC_CACHE_SSD_GB, String.valueOf(getAsyncCacheSsdGb()))
.put(CONNECTOR_NUM_IO_THREADS_HW_MULTIPLIER, String.valueOf(getConnectorNumIoThreadsHwMultiplier()))
.put(PRESTO_VERSION, getPrestoVersion())
.put(SHUTDOWN_ONSET_SEC, String.valueOf(getShutdownOnsetSec()))
.put(SYSTEM_MEMORY_GB, String.valueOf(getSystemMemoryGb()))
.put(QUERY_MEMORY_GB, String.valueOf(getQueryMemoryGb()))
.put(USE_MMAP_ALLOCATOR, String.valueOf(getUseMmapAllocator()))
.put(MEMORY_ARBITRATOR_KIND, String.valueOf(getMemoryArbitratorKind()))
.put(MEMORY_ARBITRATOR_CAPACITY_GB, String.valueOf(getMemoryArbitratorCapacityGb()))
.put(MEMORY_ARBITRATOR_RESERVED_CAPACITY_GB, String.valueOf(getMemoryArbitratorReservedCapacityGb()))
.put(MEMORY_POOL_INIT_CAPACITY, String.valueOf(getMemoryPoolInitCapacity()))
.put(MEMORY_POOL_RESERVED_CAPACITY, String.valueOf(getMemoryPoolReservedCapacity()))
.put(MEMORY_POOL_TRANSFER_CAPACITY, String.valueOf(getMemoryPoolTransferCapacity()))
.put(MEMORY_RECLAIM_WAIT_MS, String.valueOf(getMemoryReclaimWaitMs()))
.put(SPILLER_SPILL_PATH, String.valueOf(getSpillerSpillPath()))
.put(TASK_MAX_DRIVERS_PER_TASK, String.valueOf(getMaxDriversPerTask()))
.put(ENABLE_OLD_TASK_CLEANUP, String.valueOf(getOldTaskCleanupMs()))
.put(SHUFFLE_NAME, getShuffleName())
.put(HTTP_SERVER_ACCESS_LOGS, String.valueOf(isEnableHttpServerAccessLog()))
.put(CORE_ON_ALLOCATION_FAILURE_ENABLED, String.valueOf(isCoreOnAllocationFailureEnabled()))
.build();
} | @Test
public void testNativeExecutionVeloxConfig()
{
// Test defaults
assertRecordedDefaults(ConfigAssertions.recordDefaults(NativeExecutionVeloxConfig.class)
.setCodegenEnabled(false)
.setSpillEnabled(true)
.setAggregationSpillEnabled(true)
.setJoinSpillEnabled(true)
.setOrderBySpillEnabled(true)
.setMaxSpillBytes(500L << 30));
// Test explicit property mapping. Also makes sure properties returned by getAllProperties() covers full property list.
NativeExecutionVeloxConfig expected = new NativeExecutionVeloxConfig()
.setCodegenEnabled(true)
.setSpillEnabled(false)
.setAggregationSpillEnabled(false)
.setJoinSpillEnabled(false)
.setOrderBySpillEnabled(false)
.setMaxSpillBytes(1L);
Map<String, String> properties = expected.getAllProperties();
assertFullMapping(properties, expected);
} |
@Override
public Result reconcile(Request request) {
client.fetch(Tag.class, request.name())
.ifPresent(tag -> {
if (ExtensionUtil.isDeleted(tag)) {
if (removeFinalizers(tag.getMetadata(), Set.of(FINALIZER_NAME))) {
client.update(tag);
}
return;
}
addFinalizers(tag.getMetadata(), Set.of(FINALIZER_NAME));
Map<String, String> annotations = MetadataUtil.nullSafeAnnotations(tag);
String newPattern = tagPermalinkPolicy.pattern();
annotations.put(Constant.PERMALINK_PATTERN_ANNO, newPattern);
String permalink = tagPermalinkPolicy.permalink(tag);
var status = tag.getStatusOrDefault();
status.setPermalink(permalink);
// Update the observed version.
status.setObservedVersion(tag.getMetadata().getVersion() + 1);
client.update(tag);
});
return Result.doNotRetry();
} | @Test
void reconcileDelete() {
Tag tag = tag();
tag.getMetadata().setDeletionTimestamp(Instant.now());
tag.getMetadata().setFinalizers(Set.of(TagReconciler.FINALIZER_NAME));
when(client.fetch(eq(Tag.class), eq("fake-tag")))
.thenReturn(Optional.of(tag));
ArgumentCaptor<Tag> captor = ArgumentCaptor.forClass(Tag.class);
tagReconciler.reconcile(new TagReconciler.Request("fake-tag"));
verify(client, times(1)).update(captor.capture());
verify(tagPermalinkPolicy, times(0)).permalink(any());
} |
@Override
public void transform(Message message, DataType fromType, DataType toType) {
final Map<String, Object> headers = message.getHeaders();
CloudEvent cloudEvent = CloudEvents.v1_0;
headers.putIfAbsent(CloudEvents.CAMEL_CLOUD_EVENT_ID, message.getExchange().getExchangeId());
headers.putIfAbsent(CloudEvent.CAMEL_CLOUD_EVENT_VERSION, cloudEvent.version());
headers.put(CloudEvents.CAMEL_CLOUD_EVENT_TYPE, "org.apache.camel.event.azure.storage.blob.getBlob");
if (message.getHeaders().containsKey(BlobConstants.E_TAG)) {
headers.put(CloudEvents.CAMEL_CLOUD_EVENT_SOURCE,
"azure.storage.blob." + message.getHeader(BlobConstants.E_TAG, String.class));
}
headers.put(CloudEvents.CAMEL_CLOUD_EVENT_SUBJECT, message.getHeader(BlobConstants.BLOB_NAME, String.class));
headers.put(CloudEvents.CAMEL_CLOUD_EVENT_TIME, cloudEvent.getEventTime(message.getExchange()));
headers.put(CloudEvents.CAMEL_CLOUD_EVENT_CONTENT_TYPE, CloudEvents.APPLICATION_OCTET_STREAM_MIME_TYPE);
} | @Test
void shouldMapToCloudEvent() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.getMessage().setHeader(BlobConstants.BLOB_NAME, "myBlob");
exchange.getMessage().setHeader(BlobConstants.E_TAG, "eTag");
exchange.getMessage().setBody(new ByteArrayInputStream("Test1".getBytes(StandardCharsets.UTF_8)));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
Assertions.assertTrue(exchange.getMessage().hasHeaders());
Assertions.assertTrue(exchange.getMessage().getHeaders().containsKey(BlobConstants.BLOB_NAME));
assertEquals("org.apache.camel.event.azure.storage.blob.getBlob",
exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE));
assertEquals("myBlob", exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT));
assertEquals("azure.storage.blob.eTag", exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE));
} |
@Deprecated
public static <T> Task<T> callable(final String name, final Callable<? extends T> callable) {
return Task.callable(name, () -> callable.call());
} | @Test
public void testDelayTaskCompleted() throws InterruptedException {
final Task<Integer> task = Task.callable(() -> 1234);
final Task<Integer> taskWithDelay = task.withDelay(1, TimeUnit.SECONDS);
getEngine().run(taskWithDelay);
taskWithDelay.await(200, TimeUnit.MILLISECONDS);
// Both tasks should not be completed yet, since the delay is currently still ongoing.
assertFalse(taskWithDelay.isDone());
assertFalse(task.isDone());
taskWithDelay.await(3, TimeUnit.SECONDS);
// Both tasks should now be completed.
assertTrue(taskWithDelay.isDone());
assertTrue(task.isDone());
// Both tasks should not have failed.
assertFalse(task.isFailed());
assertFalse(taskWithDelay.isFailed());
// The promise should be resolved and the underlying task's value should be cascaded to the top level task.
assertEquals(1234, taskWithDelay.get().intValue());
} |
@Override
public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) {
AbstractWALEvent result;
byte[] bytes = new byte[data.remaining()];
data.get(bytes);
String dataText = new String(bytes, StandardCharsets.UTF_8);
if (decodeWithTX) {
result = decodeDataWithTX(dataText);
} else {
result = decodeDataIgnoreTX(dataText);
}
result.setLogSequenceNumber(logSequenceNumber);
return result;
} | @Test
void assertDecodeWriteRowEventWithBoolean() {
MppTableData tableData = new MppTableData();
tableData.setTableName("public.test");
tableData.setOpType("INSERT");
tableData.setColumnsName(new String[]{"data"});
tableData.setColumnsType(new String[]{"boolean"});
tableData.setColumnsVal(new String[]{Boolean.TRUE.toString()});
ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes());
WriteRowEvent actual = (WriteRowEvent) new MppdbDecodingPlugin(null, false, false).decode(data, logSequenceNumber);
assertThat(actual.getLogSequenceNumber(), is(logSequenceNumber));
assertThat(actual.getTableName(), is("test"));
Object byteaObj = actual.getAfterRow().get(0);
assertThat(byteaObj.toString(), is(Boolean.TRUE.toString()));
} |
@Override
public ScalarOperator visitBetweenPredicate(BetweenPredicateOperator predicate, Void context) {
return shuttleIfUpdate(predicate);
} | @Test
void visitBetweenPredicate() {
BetweenPredicateOperator operator = new BetweenPredicateOperator(true,
new ColumnRefOperator(1, INT, "id", true),
ConstantOperator.createInt(1), ConstantOperator.createInt(10));
{
ScalarOperator newOperator = shuttle.visitBetweenPredicate(operator, null);
assertEquals(operator, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitBetweenPredicate(operator, null);
assertEquals(operator, newOperator);
}
} |
@TargetApi(Build.VERSION_CODES.N)
public static File getVolumeDirectory(StorageVolume volume) {
try {
Field f = StorageVolume.class.getDeclaredField("mPath");
f.setAccessible(true);
return (File) f.get(volume);
} catch (Exception e) {
// This shouldn't fail, as mPath has been there in every version
throw new RuntimeException(e);
}
} | @Test
@Config(sdk = {P}) // min sdk is N
public void testGetVolumeDirectory() throws Exception {
StorageVolume mock = mock(StorageVolume.class);
Field f = StorageVolume.class.getDeclaredField("mPath");
f.setAccessible(true);
f.set(mock, new File("/storage/emulated/0"));
File result = Utils.getVolumeDirectory(mock);
assertNotNull(result);
assertEquals(new File("/storage/emulated/0"), result);
} |
public synchronized static void clear(){
fallbackProviderCache.clear();
} | @Test
public void clear() {
MyNullResponseFallBackProvider myNullResponseFallBackProvider = new MyNullResponseFallBackProvider();
ZuulBlockFallbackManager.registerProvider(myNullResponseFallBackProvider);
Assert.assertEquals(myNullResponseFallBackProvider.getRoute(), ROUTE);
ZuulBlockFallbackManager.clear();
Assert.assertEquals(ZuulBlockFallbackManager.getFallbackProvider(ROUTE).getRoute(), DEFAULT_ROUTE);
} |
@Udf
public Integer len(
@UdfParameter(description = "The input string") final String input) {
if (input == null) {
return null;
}
return input.length();
} | @Test
public void shouldReturnNullForNullInput() {
assertThat(udf.len((String) null), is(nullValue()));
assertThat(udf.len((ByteBuffer) null), is(nullValue()));
} |
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() +
mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() +
mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ?
-1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() +
mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit());
gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed());
gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax());
gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted());
gauges.put("heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax());
}
});
gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("non-heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getNonHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
for (final MemoryPoolMXBean pool : memoryPools) {
final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-"));
gauges.put(name(poolName, "usage"), new RatioGauge() {
@Override
protected Ratio getRatio() {
MemoryUsage usage = pool.getUsage();
return Ratio.of(usage.getUsed(),
usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax());
gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed());
gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted());
// Only register GC usage metrics if the memory pool supports usage statistics.
if (pool.getCollectionUsage() != null) {
gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () ->
pool.getCollectionUsage().getUsed());
}
gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit());
}
return Collections.unmodifiableMap(gauges);
} | @Test
public void hasAGaugeForNonHeapUsageWhenNonHeapMaxUndefined() {
when(nonHeap.getMax()).thenReturn(-1L);
final Gauge gauge = (Gauge) gauges.getMetrics().get("non-heap.usage");
assertThat(gauge.getValue())
.isEqualTo(3.0);
} |
@Override
public boolean isIn(String ipAddress) {
//is cache expired
//Uses Double Checked Locking using volatile
if (cacheExpiryTimeStamp >= 0 && cacheExpiryTimeStamp < System.currentTimeMillis()) {
synchronized(this) {
//check if cache expired again
if (cacheExpiryTimeStamp < System.currentTimeMillis()) {
reset();
}
}
}
return ipList.isIn(ipAddress);
} | @Test
public void testAddWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(101);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
} |
@Override
public void deleteTag(Long id) {
// 校验存在
validateTagExists(id);
// 校验标签下是否有用户
validateTagHasUser(id);
// 删除
memberTagMapper.deleteById(id);
} | @Test
public void testDeleteTag_success() {
// mock 数据
MemberTagDO dbTag = randomPojo(MemberTagDO.class);
tagMapper.insert(dbTag);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTag.getId();
// 调用
tagService.deleteTag(id);
// 校验数据不存在了
assertNull(tagMapper.selectById(id));
} |
@Override
public List<Distribution.Method> getMethods(final Path container) {
return Collections.singletonList(Distribution.CUSTOM);
} | @Test
public void testGetMethods() {
assertEquals(Collections.singletonList(Distribution.CUSTOM),
new CustomOriginCloudFrontDistributionConfiguration(new Host(new TestProtocol()), new DefaultX509TrustManager(), new DefaultX509KeyManager()).getMethods(
new Path("/bbb", EnumSet.of(Path.Type.directory, Path.Type.volume))));
} |
public static boolean supportCodegenForJavaSerialization(Class<?> cls) {
// bean class can be static nested class, but can't be a non-static inner class
// If a class is a static class, the enclosing class must not be null.
// If enclosing class is null, it must not be a static class.
try {
return cls.getEnclosingClass() == null || Modifier.isStatic(cls.getModifiers());
} catch (Throwable t) {
throw new RuntimeException(t);
}
} | @Test
public void testSupport() {
assertTrue(CodegenSerializer.supportCodegenForJavaSerialization(Cyclic.class));
} |
public static RpcStatus getStatus(String service) {
return SERVICE_STATUS_MAP.computeIfAbsent(service, key -> new RpcStatus());
} | @Test
public void getStatus() {
RpcStatus rpcStatus1 = RpcStatus.getStatus(SERVICE);
Assertions.assertNotNull(rpcStatus1);
RpcStatus rpcStatus2 = RpcStatus.getStatus(SERVICE);
Assertions.assertEquals(rpcStatus1, rpcStatus2);
} |
public String anonymize(final ParseTree tree) {
return build(tree);
} | @Test
public void shouldWorkAsExpectedWhenPassedAParseTreeInsteadOfString() {
// Given:
final ParserRuleContext tree =
DefaultKsqlParser.getParseTree("DESCRIBE my_stream EXTENDED;");
// Then:
Assert.assertEquals("DESCRIBE stream1 EXTENDED;",
anon.anonymize(tree));
} |
static void checkForDuplicates(ClassLoader classLoader, ILogger logger, String resourceName) {
try {
List<URL> resources = Collections.list(classLoader.getResources(resourceName));
if (resources.size() > 1) {
String formattedResourceUrls = resources.stream().map(URL::toString).collect(Collectors.joining(", "));
logger.warning("WARNING: Classpath misconfiguration: found multiple " + resourceName
+ " resources: " + formattedResourceUrls);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
} | @Test
public void should_log_warning_when_duplicate_found() throws Exception {
URLClassLoader classLoader = classLoaderWithJars(dummyJarFile, duplicateJar(dummyJarFile));
DuplicatedResourcesScanner.checkForDuplicates(classLoader, logger, SOME_EXISTING_RESOURCE_FILE);
ArgumentCaptor<String> logCaptor = ArgumentCaptor.forClass(String.class);
verify(logger).warning(logCaptor.capture());
assertThat(logCaptor.getValue()).contains("WARNING: Classpath misconfiguration: found multiple " + SOME_EXISTING_RESOURCE_FILE);
} |
@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
if (readInProgress) {
// If there is still a read in progress we are sure we will see a channelReadComplete(...) call. Thus
// we only need to flush if we reach the explicitFlushAfterFlushes limit.
if (++flushPendingCount == explicitFlushAfterFlushes) {
flushNow(ctx);
}
} else if (consolidateWhenNoReadInProgress) {
// Flush immediately if we reach the threshold, otherwise schedule
if (++flushPendingCount == explicitFlushAfterFlushes) {
flushNow(ctx);
} else {
scheduleFlush(ctx);
}
} else {
// Always flush directly
flushNow(ctx);
}
} | @Test
public void testFlushViaReadComplete() {
final AtomicInteger flushCount = new AtomicInteger();
EmbeddedChannel channel = newChannel(flushCount, false);
// Flush should go through as there is no read loop in progress.
channel.flush();
channel.runPendingTasks();
assertEquals(1, flushCount.get());
// Simulate read loop;
channel.pipeline().fireChannelRead(1L);
assertEquals(1, flushCount.get());
channel.pipeline().fireChannelRead(2L);
assertEquals(1, flushCount.get());
assertNull(channel.readOutbound());
channel.pipeline().fireChannelReadComplete();
assertEquals(2, flushCount.get());
// Now flush again as the read loop is complete.
channel.flush();
channel.runPendingTasks();
assertEquals(3, flushCount.get());
assertEquals(1L, (Long) channel.readOutbound());
assertEquals(2L, (Long) channel.readOutbound());
assertNull(channel.readOutbound());
assertFalse(channel.finish());
} |
public void createTopic(final String addr, final String defaultTopic, final TopicConfig topicConfig,
final long timeoutMillis)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
Validators.checkTopicConfig(topicConfig);
CreateTopicRequestHeader requestHeader = new CreateTopicRequestHeader();
requestHeader.setTopic(topicConfig.getTopicName());
requestHeader.setDefaultTopic(defaultTopic);
requestHeader.setReadQueueNums(topicConfig.getReadQueueNums());
requestHeader.setWriteQueueNums(topicConfig.getWriteQueueNums());
requestHeader.setPerm(topicConfig.getPerm());
requestHeader.setTopicFilterType(topicConfig.getTopicFilterType().name());
requestHeader.setTopicSysFlag(topicConfig.getTopicSysFlag());
requestHeader.setOrder(topicConfig.isOrder());
requestHeader.setAttributes(AttributeParser.parseToString(topicConfig.getAttributes()));
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_AND_CREATE_TOPIC, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
} | @Test
public void testCreateTopic_Success() throws Exception {
doAnswer((Answer<RemotingCommand>) mock -> {
RemotingCommand request = mock.getArgument(1);
RemotingCommand response = RemotingCommand.createResponseCommand(null);
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
return response;
}).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong());
mqClientAPI.createTopic(brokerAddr, topic, new TopicConfig(), 10000);
} |
public Tuple2<Long, Double> increase(String name, ImmutableMap<String, String> labels, Double value, long windowSize, long now) {
ID id = new ID(name, labels);
Queue<Tuple2<Long, Double>> window = windows.computeIfAbsent(id, unused -> new PriorityQueue<>());
synchronized (window) {
window.offer(Tuple.of(now, value));
long waterLevel = now - windowSize;
Tuple2<Long, Double> peek = window.peek();
if (peek._1 > waterLevel) {
return peek;
}
Tuple2<Long, Double> result = peek;
while (peek._1 < waterLevel) {
result = window.poll();
peek = window.element();
}
// Choose the closed slot to the expected timestamp
if (waterLevel - result._1 <= peek._1 - waterLevel) {
return result;
}
return peek;
}
} | @Test
public void testPT2M() {
double[] actuals = parameters().stream().mapToDouble(e -> {
Tuple2<Long, Double> increase = CounterWindow.INSTANCE.increase(
"test", ImmutableMap.<String, String>builder().build(), e._2,
Duration.parse("PT2M").getSeconds() * 1000, e._1
);
return e._2 - increase._2;
}).toArray();
Assertions.assertArrayEquals(new double[] {0, 1d, 2d, 3d, 4d, 0d, 1d, 2d}, actuals, 0.d);
} |
@Override
public void setResponseHeader(final String name, final String value)
{
final String headerName;
if (RestConstants.HEADER_ID.equals(name))
{
headerName = RestConstants.HEADER_ID;
}
else if (RestConstants.HEADER_RESTLI_ID.equals(name))
{
headerName = RestConstants.HEADER_RESTLI_ID;
}
else
{
headerName = null;
}
if (headerName != null)
{
throw new IllegalArgumentException("Illegal to set the \"" + headerName + "\" header. This header is reserved for the ID returned from create method on the resource.");
}
_responseHeaders.put(name, value);
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testSetIdHeader() throws RestLiSyntaxException
{
final ResourceContextImpl context = new ResourceContextImpl();
context.setResponseHeader(RestConstants.HEADER_ID, "foobar");
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
final Calendar calendar = readDateTime(data, 0);
if (calendar == null) {
onInvalidDataReceived(device, data);
return;
}
onDateTimeReceived(device, calendar);
} | @Test
public void onInvalidDataReceived_dataTooShort() {
final DataReceivedCallback callback = new DateTimeDataCallback() {
@Override
public void onDateTimeReceived(@NonNull final BluetoothDevice device, @NonNull final Calendar calendar) {
assertEquals("Incorrect Date and Time reported as correct", 1, 2);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertNotEquals("Incorrect Date and Time size", 7, data.size());
}
};
final Data data = new Data(new byte[] {(byte) 0xE2, 0x07, 4, 17, 15, 51 });
callback.onDataReceived(null, data);
} |
public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree(
String pid, Class<? extends ResourceCalculatorProcessTree> clazz, Configuration conf) {
if (clazz != null) {
try {
Constructor <? extends ResourceCalculatorProcessTree> c = clazz.getConstructor(String.class);
ResourceCalculatorProcessTree rctree = c.newInstance(pid);
rctree.setConf(conf);
rctree.initialize();
return rctree;
} catch(Exception e) {
throw new RuntimeException(e);
}
}
// No class given, try a os specific class
if (ProcfsBasedProcessTree.isAvailable()) {
return new ProcfsBasedProcessTree(pid);
}
if (WindowsBasedProcessTree.isAvailable()) {
return new WindowsBasedProcessTree(pid);
}
// Not supported on this system.
return null;
} | @Test
void testCreateInstance() {
ResourceCalculatorProcessTree tree;
tree = ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1", EmptyProcessTree.class, new Configuration());
assertNotNull(tree);
assertThat(tree, instanceOf(EmptyProcessTree.class));
} |
public HikariDataSource getDataSource() {
return ds;
} | @Test
@Ignore
public void testGetPostgresDataSource() {
DataSource ds = SingletonServiceFactory.getBean(PostgresDataSource.class).getDataSource();
assertNotNull(ds);
try(Connection connection = ds.getConnection()){
assertNotNull(connection);
} catch (SQLException e) {
e.printStackTrace();
}
} |
@Override
public String getName() {
return ANALYZER_NAME;
} | @Test
public void testGetAnalyzerName() {
String expected = "Libman Analyzer";
String actual = analyzer.getName();
assertEquals(expected, actual);
} |
@Override
public void run() {
// top-level command, do nothing
} | @Test
public void test_listSnapshots() {
// Given
// When
run("list-snapshots");
// Then
String actual = captureOut();
assertTrue("output should contain one line (the table header), but contains:\n" + actual,
actual.trim().indexOf('\n') < 0 && !actual.isEmpty());
} |
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator,
EvictionChecker... evictionCheckers) {
Preconditions.isNotNull(compositionOperator, "composition");
Preconditions.isNotNull(evictionCheckers, "evictionCheckers");
if (evictionCheckers.length == 0) {
throw new IllegalArgumentException("EvictionCheckers cannot be empty!");
}
switch (compositionOperator) {
case AND:
return new CompositeEvictionCheckerWithAndComposition(evictionCheckers);
case OR:
return new CompositeEvictionCheckerWithOrComposition(evictionCheckers);
default:
throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator);
}
} | @Test(expected = IllegalArgumentException.class)
public void compositionOperatorCannotBeNull() {
CompositeEvictionChecker.newCompositeEvictionChecker(
null,
mock(EvictionChecker.class),
mock(EvictionChecker.class));
} |
public DataPoint getLastDataPoint() {
return dataPoints.get(dataPoints.size() - 1);
} | @Test
public void testGetLastDataPoint() {
cm = new ChartModel(FOO, BAR);
long time = System.currentTimeMillis();
cm.addDataPoint(time)
.data(FOO, VALUES1[0])
.data(BAR, VALUES2[0]);
cm.addDataPoint(time + 1)
.data(FOO, VALUES1[1])
.data(BAR, VALUES2[1]);
assertEquals("Wrong result", VALUES1[1], cm.getLastDataPoint().get(FOO));
assertEquals("Wrong result", VALUES2[1], cm.getLastDataPoint().get(BAR));
} |
public static void main(String[] args) throws InterruptedException {
var app = new App();
try {
app.promiseUsage();
} finally {
app.stop();
}
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(null));
} |
@Override
public double sd() {
return Math.sqrt(k) * theta;
} | @Test
public void testSd() {
System.out.println("sd");
GammaDistribution instance = new GammaDistribution(3, 2.1);
instance.rand();
assertEquals(3.637307, instance.sd(), 1E-6);
} |
@Override
public int hashCode() {
return uniqueKey.hashCode();
} | @Test
void testEqualsHashCode() {
List<Pair<String>> list = new LinkedList<>();
list.add(new Pair<>("test", 1));
list.add(new Pair<>("test2", 1));
Chooser<String, String> chooser = new Chooser<>("test", list);
assertEquals("test".hashCode(), chooser.hashCode());
assertEquals(chooser, chooser);
assertNotEquals(null, chooser);
assertNotEquals("test", chooser);
Chooser<String, String> chooser1 = new Chooser<>(null, null);
assertNotEquals(chooser, chooser1);
assertNotEquals(chooser1, chooser);
Chooser<String, String> chooser2 = new Chooser<>("test", Collections.emptyList());
assertNotEquals(chooser, chooser2);
assertNotEquals(chooser2, chooser);
Chooser<String, String> chooser3 = new Chooser<>("test1", list);
assertNotEquals(chooser, chooser3);
Chooser<String, String> chooser4 = new Chooser<>("test", list);
assertEquals(chooser, chooser4);
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
// Metadata for the root folder is unsupported
if(file.isRoot()) {
// Retrieve the namespace ID for a users home folder and team root folder
final FullAccount account = new DbxUserUsersRequests(session.getClient()).getCurrentAccount();
if(log.isDebugEnabled()) {
log.debug(String.format("Set root namespace %s", account.getRootInfo().getRootNamespaceId()));
}
return new PathAttributes().withFileId(account.getRootInfo().getRootNamespaceId());
}
final Metadata metadata = new DbxUserFilesRequests(session.getClient(file)).getMetadata(containerService.getKey(file));
if(metadata instanceof FileMetadata) {
if(file.isDirectory()) {
throw new NotfoundException(file.getAbsolute());
}
}
if(metadata instanceof FolderMetadata) {
if(file.isFile()) {
throw new NotfoundException(file.getAbsolute());
}
}
return this.toAttributes(metadata);
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testFindFile() throws Exception {
final Path root = new DefaultHomeFinderService(session).find();
final Path folder = new DropboxDirectoryFeature(session).mkdir(new Path(root,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), null);
final DropboxAttributesFinderFeature f = new DropboxAttributesFinderFeature(session);
assertEquals(-1L, f.find(folder).getModificationDate());
final Path file = new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DropboxTouchFeature(session).touch(file, new TransferStatus());
final PathAttributes attr = f.find(file);
assertEquals("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", attr.getChecksum().hash);
assertNotEquals(-1L, attr.getModificationDate());
assertNotNull(attr.getVersionId());
new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public int prepare(final Xid xid) throws XAException {
try {
return delegate.prepare(xid);
} catch (final XAException ex) {
throw mapXAException(ex);
}
} | @Test
void assertPrepare() throws XAException {
singleXAResource.prepare(xid);
verify(xaResource).prepare(xid);
} |
@Override
public FsCheckpointStateOutputStream createCheckpointStateOutputStream(
CheckpointedStateScope scope) throws IOException {
Path target = getTargetPath(scope);
int bufferSize = Math.max(writeBufferSize, fileStateThreshold);
// Whether the file system dynamically injects entropy into the file paths.
final boolean entropyInjecting = EntropyInjector.isEntropyInjecting(filesystem, target);
final boolean absolutePath = entropyInjecting || scope == CheckpointedStateScope.SHARED;
return new FsCheckpointStateOutputStream(
target, filesystem, bufferSize, fileStateThreshold, !absolutePath);
} | @Test
void testEntropyMakesExclusiveStateAbsolutePaths() throws IOException {
final FsStateBackendEntropyTest.TestEntropyAwareFs fs =
new FsStateBackendEntropyTest.TestEntropyAwareFs();
final FsCheckpointStreamFactory factory = createFactory(fs, 0);
final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream =
factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE);
stream.write(0);
final StreamStateHandle handle = stream.closeAndGetHandle();
assertThat(handle).isInstanceOf(FileStateHandle.class);
assertThat(handle).isNotInstanceOf(RelativeFileStateHandle.class);
assertPathsEqual(
exclusiveStateDir.resolve(fs.generateEntropy()),
((FileStateHandle) handle).getFilePath().getParent());
} |
public void tx(VoidFunc1<Session> func) throws SQLException {
try {
beginTransaction();
func.call(this);
commit();
} catch (Throwable e) {
quietRollback();
throw (e instanceof SQLException) ? (SQLException) e : new SQLException(e);
}
} | @Test
@Disabled
public void txTest() throws SQLException {
Session.create("test").tx(session -> session.update(Entity.create().set("age", 78), Entity.create("user").set("name", "unitTestUser")));
} |
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfShortArrayIPv6() {
IpAddress ipAddress;
byte[] value;
value = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9};
ipAddress = IpAddress.valueOf(IpAddress.Version.INET6, value);
} |
@Override
@Async
public void onApplicationEvent(MockInvocationEvent event) {
log.debug("Received a MockInvocationEvent on {} - v{}", event.getServiceName(), event.getServiceVersion());
// Compute day string representation.
Calendar calendar = Calendar.getInstance();
calendar.setTime(event.getInvocationTimestamp());
// Computing keys based on invocation date.
int month = calendar.get(Calendar.MONTH) + 1;
String monthStr = (month < 10 ? "0" : "") + month;
int dayOfMonth = calendar.get(Calendar.DAY_OF_MONTH);
String dayOfMonthStr = (dayOfMonth < 10 ? "0" : "") + dayOfMonth;
String day = calendar.get(Calendar.YEAR) + monthStr + dayOfMonthStr;
String hourKey = String.valueOf(calendar.get(Calendar.HOUR_OF_DAY));
String minuteKey = String.valueOf((60 * calendar.get(Calendar.HOUR_OF_DAY)) + calendar.get(Calendar.MINUTE));
if (log.isDebugEnabled()) {
log.debug("hourKey for statistic is {}", hourKey);
log.debug("minuteKey for statistic is {}", minuteKey);
}
// First check if there's a statistic document for invocation day.
DailyStatistic statistic = null;
List<DailyStatistic> statistics = statisticsRepository.findByDayAndServiceNameAndServiceVersion(day,
event.getServiceName(), event.getServiceVersion());
if (!statistics.isEmpty()) {
statistic = statistics.get(0);
}
if (statistic == null) {
// No statistic's yet...
log.debug("There's no statistics for {} yet. Create one.", day);
// Initialize a new 0 filled structure.
statistic = new DailyStatistic();
statistic.setDay(day);
statistic.setServiceName(event.getServiceName());
statistic.setServiceVersion(event.getServiceVersion());
statistic.setHourlyCount(initializeHourlyMap());
statistic.setMinuteCount(initializeMinuteMap());
// Now set first values before saving.
statistic.setDailyCount(1);
statistic.getHourlyCount().put(hourKey, 1);
statistic.getMinuteCount().put(minuteKey, 1);
statisticsRepository.save(statistic);
} else {
// Already a statistic document for this day, increment fields.
log.debug("Found an existing statistic document for {}", day);
statisticsRepository.incrementDailyStatistic(day, event.getServiceName(), event.getServiceVersion(), hourKey,
minuteKey);
}
log.debug("Processing of MockInvocationEvent done !");
} | @Test
void testOnApplicationEvent() {
Calendar today = Calendar.getInstance();
MockInvocationEvent event = new MockInvocationEvent(this, "TestService1", "1.0", "123456789", today.getTime(),
100);
// Fire event a first time.
feeder.onApplicationEvent(event);
SimpleDateFormat formater = new SimpleDateFormat("yyyyMMdd");
String day = formater.format(today.getTime());
DailyStatistic stat = statisticsRepository.findByDayAndServiceNameAndServiceVersion(day, "TestService1", "1.0")
.get(0);
assertNotNull(stat);
assertNotNull(stat.getId());
assertEquals(day, stat.getDay());
assertEquals("TestService1", stat.getServiceName());
assertEquals("1.0", stat.getServiceVersion());
assertEquals(1, stat.getDailyCount());
assertEquals(1, stat.getHourlyCount().get(String.valueOf(today.get(Calendar.HOUR_OF_DAY))));
// Fire event a second time.
feeder.onApplicationEvent(event);
stat = statisticsRepository.findByDayAndServiceNameAndServiceVersion(day, "TestService1", "1.0").get(0);
assertNotNull(stat);
assertNotNull(stat.getId());
assertEquals(day, stat.getDay());
assertEquals("TestService1", stat.getServiceName());
assertEquals("1.0", stat.getServiceVersion());
assertEquals(2, stat.getDailyCount());
assertEquals(2, stat.getHourlyCount().get(String.valueOf(today.get(Calendar.HOUR_OF_DAY))));
} |
public static CsvWriter getWriter(String filePath, Charset charset) {
return new CsvWriter(filePath, charset);
} | @Test
@Disabled
public void writeWrapTest(){
List<List<Object>> resultList=new ArrayList<>();
List<Object> list =new ArrayList<>();
list.add("\"name\"");
list.add("\"code\"");
resultList.add(list);
list =new ArrayList<>();
list.add("\"wang\"");
list.add(1);
resultList.add(list);
String path = FileUtil.isWindows() ? "d:/test/csvWrapTest.csv" : "~/test/csvWrapTest.csv";
final CsvWriter writer = CsvUtil.getWriter(path, CharsetUtil.CHARSET_UTF_8);
writer.write(resultList);
} |
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, RateLimiter rateLimiter,
String methodName) throws Throwable {
RateLimiterOperator<?> rateLimiterOperator = RateLimiterOperator.of(rateLimiter);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava2Aspect(rateLimiterOperator, returnValue);
} | @Test
public void testReactorTypes() throws Throwable {
RateLimiter rateLimiter = RateLimiter.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(
rxJava2RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(
rxJava2RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Completable.complete());
assertThat(
rxJava2RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Maybe.just("Test"));
assertThat(
rxJava2RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Observable.just("Test"));
assertThat(
rxJava2RateLimiterAspectExt.handle(proceedingJoinPoint, rateLimiter, "testMethod"))
.isNotNull();
} |
@Override
public S3ClientBuilder createBuilder(S3Options s3Options) {
return createBuilder(S3Client.builder(), s3Options);
} | @Test
public void testSetCredentialsProvider() {
AwsCredentialsProvider credentialsProvider = mock(AwsCredentialsProvider.class);
when(s3Options.getAwsCredentialsProvider()).thenReturn(credentialsProvider);
DefaultS3ClientBuilderFactory.createBuilder(builder, s3Options);
verify(builder).credentialsProvider(credentialsProvider);
verifyNoMoreInteractions(builder);
} |
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) {
return aggregate(initializer, Materialized.with(null, null));
} | @Test
public void shouldNotHaveNullInitializerOnAggregate() {
assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(null));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.