index
int64 0
0
| repo_id
stringlengths 26
205
| file_path
stringlengths 51
246
| content
stringlengths 8
433k
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterConfig.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.config;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.*;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.hadoop.conf.Configuration;
/**
* Config for controlling Iceberg Writer semantics.
*/
public class WriterConfig extends SinkConfig {
private final int writerRowGroupSize;
private final long writerFlushFrequencyBytes;
private final long writerFlushFrequencyMsec;
private final String writerFileFormat;
private final Configuration hadoopConfig;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public WriterConfig(Parameters parameters, Configuration hadoopConfig) {
super(parameters);
this.writerRowGroupSize = (int) parameters.get(
WRITER_ROW_GROUP_SIZE, WRITER_ROW_GROUP_SIZE_DEFAULT);
this.writerFlushFrequencyBytes = Long.parseLong((String) parameters.get(
WRITER_FLUSH_FREQUENCY_BYTES, WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT));
this.writerFlushFrequencyMsec = Long.parseLong((String) parameters.get(
WRITER_FLUSH_FREQUENCY_MSEC, WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT));
this.writerFileFormat = (String) parameters.get(
WRITER_FILE_FORMAT, WRITER_FILE_FORMAT_DEFAULT);
this.hadoopConfig = hadoopConfig;
}
/**
* Returns an int representing maximum number of rows that should exist in a file.
*/
public int getWriterRowGroupSize() {
return writerRowGroupSize;
}
/**
* Returns a long representing flush frequency by size in Bytes.
*/
public long getWriterFlushFrequencyBytes() {
return writerFlushFrequencyBytes;
}
/**
* Returns a long representing flush frequency by size in milliseconds.
*/
public long getWriterFlushFrequencyMsec() {
return writerFlushFrequencyMsec;
}
/**
* Returns the file format for Iceberg writers.
*/
public String getWriterFileFormat() {
return writerFileFormat;
}
/**
* Returns a Hadoop configuration which has metadata for how and where to write files.
*/
public Configuration getHadoopConfig() {
return hadoopConfig;
}
}
| 1,200 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/NoOpPartitioner.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
/**
* Partitioner to use for unpartitioned Iceberg tables.
*/
public class NoOpPartitioner implements Partitioner {
@Override
public StructLike partition(StructLike record) {
return null;
}
}
| 1,201 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/Partitioner.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
public interface Partitioner {
StructLike partition(StructLike record);
}
| 1,202 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/PartitionerFactory.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.Table;
public interface PartitionerFactory {
Partitioner getPartitioner(Table table);
}
| 1,203 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitter.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
/**
* Commits {@link DataFile}s for Iceberg tables.
*
* This class uses Iceberg's Table API and only supports Table#append operations.
*/
public class IcebergCommitter {
private final Table table;
public IcebergCommitter(Table table) {
this.table = table;
}
/**
* Uses Iceberg's Table API to append DataFiles and commit metadata to Iceberg.
*
* @return the current snapshot of the table.
*/
public Map<String, Object> commit(List<DataFile> dataFiles) {
AppendFiles tableAppender = table.newAppend();
dataFiles.forEach(tableAppender::appendFile);
tableAppender.commit();
return table.currentSnapshot() == null ? new HashMap<>() : new HashMap<>(table.currentSnapshot().summary());
}
}
| 1,204 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStage.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties;
import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics;
import io.mantisrx.connector.iceberg.sink.config.SinkProperties;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.schedulers.Schedulers;
/**
* Processing stage which commits table metadata to Iceberg on a time interval.
*/
public class IcebergCommitterStage implements ScalarComputation<DataFile, Map<String, Object>> {
private static final Logger logger = LoggerFactory.getLogger(IcebergCommitterStage.class);
private Transformer transformer;
/**
* Returns a config for this stage which has encoding/decoding semantics and parameter definitions.
*/
public static ScalarToScalar.Config<DataFile, Map<String, Object>> config() {
return new ScalarToScalar.Config<DataFile, Map<String, Object>>()
.description("")
.codec(JacksonCodecs.mapStringObject())
.withParameters(parameters());
}
/**
* Returns a list of parameter definitions for this stage.
*/
public static List<ParameterDefinition<?>> parameters() {
return Arrays.asList(
new StringParameter().name(SinkProperties.SINK_CATALOG)
.description(SinkProperties.SINK_CATALOG_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_DATABASE)
.description(SinkProperties.SINK_DATABASE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_TABLE)
.description(SinkProperties.SINK_TABLE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(CommitterProperties.COMMIT_FREQUENCY_MS)
.description(CommitterProperties.COMMIT_FREQUENCY_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(CommitterProperties.COMMIT_FREQUENCY_MS_DEFAULT)
.build()
);
}
/**
* Use this to instantiate a new transformer from a given {@link Context}.
*/
public static Transformer newTransformer(Context context) {
CommitterConfig config = new CommitterConfig(context.getParameters());
CommitterMetrics metrics = new CommitterMetrics();
Catalog catalog = context.getServiceLocator().service(Catalog.class);
TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable());
Table table = catalog.loadTable(id);
IcebergCommitter committer = new IcebergCommitter(table);
return new Transformer(config, metrics, committer, Schedulers.computation());
}
public IcebergCommitterStage() {
}
/**
* Uses the provided Mantis Context to inject configuration and creates an underlying table appender.
*
* This method depends on a Hadoop Configuration and Iceberg Catalog, both injected
* from the Context's service locator.
*
* Note that this method expects an Iceberg Table to have been previously created out-of-band,
* otherwise initialization will fail. Users should prefer to create tables
* out-of-band so they can be versioned alongside their schemas.
*/
@Override
public void init(Context context) {
transformer = newTransformer(context);
}
@Override
public Observable<Map<String, Object>> call(Context context, Observable<DataFile> dataFileObservable) {
return dataFileObservable.compose(transformer);
}
/**
* Reactive Transformer for committing metadata to Iceberg.
*
* Users may use this class independently of this Stage, for example, if they want to
* {@link Observable#compose(Observable.Transformer)} this transformer with a flow into
* an existing Stage. One benefit of this co-location is to avoid extra network
* cost from worker-to-worker communication, trading off debuggability.
*/
public static class Transformer implements Observable.Transformer<DataFile, Map<String, Object>> {
private final CommitterConfig config;
private final CommitterMetrics metrics;
private final IcebergCommitter committer;
private final Scheduler scheduler;
public Transformer(CommitterConfig config,
CommitterMetrics metrics,
IcebergCommitter committer,
Scheduler scheduler) {
this.config = config;
this.metrics = metrics;
this.committer = committer;
this.scheduler = scheduler;
}
/**
* Periodically commits DataFiles to Iceberg as a batch.
*/
@Override
public Observable<Map<String, Object>> call(Observable<DataFile> source) {
return source
.buffer(config.getCommitFrequencyMs(), TimeUnit.MILLISECONDS, scheduler)
.doOnNext(dataFiles -> metrics.increment(CommitterMetrics.INVOCATION_COUNT))
.filter(dataFiles -> !dataFiles.isEmpty())
.map(dataFiles -> {
try {
long start = scheduler.now();
Map<String, Object> summary = committer.commit(dataFiles);
long now = scheduler.now();
metrics.setGauge(CommitterMetrics.COMMIT_LATENCY_MSEC, now - start);
metrics.setGauge(CommitterMetrics.COMMIT_BATCH_SIZE, dataFiles.size());
return summary;
} catch (RuntimeException e) {
metrics.increment(CommitterMetrics.COMMIT_FAILURE_COUNT);
logger.error("error committing to Iceberg", e);
return new HashMap<String, Object>();
}
})
.filter(summary -> !summary.isEmpty())
.doOnNext(summary -> {
metrics.increment(CommitterMetrics.COMMIT_SUCCESS_COUNT);
logger.info("committed {}", summary);
});
}
}
}
| 1,205 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/metrics/CommitterMetrics.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.metrics;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
public class CommitterMetrics {
public static final String INVOCATION_COUNT = "invocationCount";
private final Counter invocationCount;
public static final String COMMIT_SUCCESS_COUNT = "commitSuccessCount";
private final Counter commitSuccessCount;
public static final String COMMIT_FAILURE_COUNT = "commitFailureCount";
private final Counter commitFailureCount;
public static final String COMMIT_LATENCY_MSEC = "commitLatencyMsec";
private final Gauge commitLatencyMsec;
public static final String COMMIT_BATCH_SIZE = "commitBatchSize";
private final Gauge commitBatchSize;
public CommitterMetrics() {
Metrics metrics = new Metrics.Builder()
.name(CommitterMetrics.class.getCanonicalName())
.addCounter(INVOCATION_COUNT)
.addCounter(COMMIT_SUCCESS_COUNT)
.addCounter(COMMIT_FAILURE_COUNT)
.addGauge(COMMIT_LATENCY_MSEC)
.addGauge(COMMIT_BATCH_SIZE)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
invocationCount = metrics.getCounter(INVOCATION_COUNT);
commitSuccessCount = metrics.getCounter(COMMIT_SUCCESS_COUNT);
commitFailureCount = metrics.getCounter(COMMIT_FAILURE_COUNT);
commitLatencyMsec = metrics.getGauge(COMMIT_LATENCY_MSEC);
commitBatchSize = metrics.getGauge(COMMIT_BATCH_SIZE);
}
public void setGauge(final String metric, final long value) {
switch (metric) {
case COMMIT_LATENCY_MSEC:
commitLatencyMsec.set(value);
break;
case COMMIT_BATCH_SIZE:
commitBatchSize.set(value);
break;
default:
break;
}
}
public void increment(final String metric) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment();
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment();
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment();
break;
default:
break;
}
}
public void increment(final String metric, final long value) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment(value);
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment(value);
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment(value);
break;
case COMMIT_BATCH_SIZE:
commitBatchSize.increment(value);
break;
default:
break;
}
}
}
| 1,206 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterProperties.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
/**
* Property key names and default values for an Iceberg Committer.
*/
public class CommitterProperties {
private CommitterProperties() {
}
/**
* Iceberg committer frequency by time in milliseconds.
*/
public static final String COMMIT_FREQUENCY_MS = "commitFrequencyMs";
// TODO: Change to long.
public static final String COMMIT_FREQUENCY_MS_DEFAULT = "300000"; // 5 min
public static final String COMMIT_FREQUENCY_DESCRIPTION =
String.format("Iceberg Committer frequency by time in milliseconds (default: %s)",
COMMIT_FREQUENCY_MS_DEFAULT);
}
| 1,207 |
0 |
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer
|
Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterConfig.java
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.COMMIT_FREQUENCY_MS;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.COMMIT_FREQUENCY_MS_DEFAULT;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
/**
* Config for controlling Iceberg Committer semantics.
*/
public class CommitterConfig extends SinkConfig {
private final long commitFrequencyMs;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public CommitterConfig(Parameters parameters) {
super(parameters);
this.commitFrequencyMs =
Long.parseLong((String) parameters.get(COMMIT_FREQUENCY_MS, COMMIT_FREQUENCY_MS_DEFAULT));
}
/**
* Returns a long representing Iceberg committer frequency by time (milliseconds).
*/
public long getCommitFrequencyMs() {
return commitFrequencyMs;
}
}
| 1,208 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnectorFactory.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
public class MantisSourceJobConnectorFactory {
public static MantisSourceJobConnector getConnector() {
return new MantisSourceJobConnector();
}
}
| 1,209 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractSourceJobSource.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Optional;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.runtime.parameter.SinkParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractSourceJobSource extends AbstractJobSource {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractSourceJobSource.class);
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Optional)}, forPartition & toPartition params are not used and will be removed in next release
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, Optional.empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int samplePerSec, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, new MantisSourceJobConnector.NoOpSinkConnectionsStatusObserver(), sinkParamsO);
}
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Observer, Optional)}
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, Optional.<SinkParameters>empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableCompressedBinary = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, sinkParamsO);
}
private MantisSSEJob connectToQueryBasedJob(MantisSourceJobConnector connector, String criterion,
String jobName, String clientId, int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput,
Observer<SinkConnectionsStatus> sinkConnObs,
Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to " + jobName);
if (criterion == null || criterion.isEmpty()) {
throw new RuntimeException("Criterion cannot be empty");
}
String subId = Integer.toString(criterion.hashCode());
SinkParameters defaultParams = getDefaultSinkParams(clientId, samplePerSec,
Optional.of(criterion), Optional.of(subId), enableMetaMessages, enableCompressedBinaryInput, 500);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 1,210 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnector.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.sampullara.cli.Args;
import com.sampullara.cli.Argument;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.client.examples.SubmitEphemeralJob;
import io.mantisrx.runtime.parameter.SinkParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.Subscription;
/**
* Used to locate and connect to Mantis Source Jobs.
*/
public class MantisSourceJobConnector {
@Argument(alias = "p", description = "Specify a configuration file")
private static String propFile = "";
private static final Logger LOGGER = LoggerFactory.getLogger(MantisSourceJobConnector.class);
private final Properties props;
public static final String MANTIS_SOURCEJOB_CLIENT_ID_PARAM = "clientId";
public static final String MANTIS_SOURCEJOB_SUBSCRIPTION_ID = "subscriptionId";
public static final String MANTIS_SOURCEJOB_CLIENT_ID = "clientId";
public static final String MANTIS_SOURCEJOB_CRITERION = "criterion";
public static final String MANTIS_SOURCEJOB_NAME_PARAM = "sourceJobName";
public static final String MANTIS_SOURCEJOB_TARGET_KEY = "target";
public static final String MANTIS_SOURCEJOB_IS_BROADCAST_MODE = "isBroadcastMode";
public static final String MANTIS_SOURCEJOB_SAMPLE_PER_SEC_KEY = "sample";
public static final String MANTIS_ENABLE_PINGS = "enablePings";
public static final String MANTIS_ENABLE_META_MESSAGES = "enableMetaMessages";
public static final String MANTIS_META_MESSAGE_INTERVAL_SEC = "metaMessagesSec";
public static final String MANTIS_MQL_THREADING_PARAM = "mantis.mql.threading.enabled";
private static final String ZK_CONNECT_STRING = "mantis.zookeeper.connectString";
private static final String ZK_ROOT = "mantis.zookeeper.root";
private static final String ZK_LEADER_PATH = "mantis.zookeeper.leader.announcement.path";
public MantisSourceJobConnector(Properties props) {
this.props = props;
}
public MantisSourceJobConnector() {
props = new Properties();
final String defaultZkConnect = "127.0.0.1:2181";
final String defaultZkRoot = "/mantis/master";
final String defaultZkLeaderPath = "/leader";
String connectString;
String zookeeperRoot;
String zookeeperLeaderAnnouncementPath;
Map<String, String> env = System.getenv();
if (env == null || env.isEmpty()) {
connectString = defaultZkConnect;
zookeeperRoot = defaultZkRoot;
zookeeperLeaderAnnouncementPath = defaultZkLeaderPath;
} else {
connectString = env.getOrDefault(ZK_CONNECT_STRING, defaultZkConnect);
zookeeperRoot = env.getOrDefault(ZK_ROOT, defaultZkRoot);
zookeeperLeaderAnnouncementPath = env.getOrDefault(ZK_LEADER_PATH, defaultZkLeaderPath);
LOGGER.info("Mantis Zk settings read from ENV: connectString {} root {} path {}", env.get(ZK_CONNECT_STRING), env.get(ZK_ROOT), env.get(ZK_LEADER_PATH));
}
if (connectString != null && !connectString.isEmpty()
&& zookeeperRoot != null && !zookeeperRoot.isEmpty()
&& zookeeperLeaderAnnouncementPath != null && !zookeeperLeaderAnnouncementPath.isEmpty()) {
props.put(ZK_CONNECT_STRING, connectString);
props.put(ZK_ROOT, zookeeperRoot);
props.put(ZK_LEADER_PATH, zookeeperLeaderAnnouncementPath);
props.put("mantis.zookeeper.connectionTimeMs", "2000");
props.put("mantis.zookeeper.connection.retrySleepMs", "500");
props.put("mantis.zookeeper.connection.retryCount", "5");
} else {
throw new RuntimeException("Zookeeper properties not available!");
}
LOGGER.info("Mantis Zk settings used for Source Job connector: connectString {} root {} path {}", connectString, zookeeperRoot, zookeeperLeaderAnnouncementPath);
}
@Deprecated
public MantisSSEJob connecToJob(String jobName) {
return connectToJob(jobName, new SinkParameters.Builder().build(), new NoOpSinkConnectionsStatusObserver());
}
public MantisSSEJob connectToJob(String jobName, SinkParameters params) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions, Observer<SinkConnectionsStatus> sinkObserver) {
return connectToJob(jobName, params, sinkObserver);
}
public MantisSSEJob connectToJob(
String jobName,
SinkParameters params,
Observer<SinkConnectionsStatus> sinkObserver) {
return new MantisSSEJob.Builder(props)
.name(jobName)
.sinkConnectionsStatusObserver(sinkObserver)
.onConnectionReset(throwable -> LOGGER.error("Reconnecting due to error: " + throwable.getMessage()))
.sinkParams(params)
.buildJobConnector();
}
static class NoOpSinkConnectionsStatusObserver implements Observer<SinkConnectionsStatus> {
@Override
public void onCompleted() {
LOGGER.warn("Got Completed on SinkConnectionStatus ");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error on SinkConnectionStatus ", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got Sink Connection Status update " + t);
}
}
public static void main(String[] args) {
try {
SinkParameters params = new SinkParameters.Builder().withParameter("subscriptionId", "id1").
withParameter("criterion", "select * where true").build();
Args.parse(MantisSourceJobConnector.class, args);
final CountDownLatch latch = new CountDownLatch(20);
MantisSourceJobConnector sourceJobConnector = new MantisSourceJobConnector();
MantisSSEJob job = sourceJobConnector.connectToJob("TestSourceJob", params);
Subscription subscription = job.connectAndGetObservable()
.doOnNext(o -> {
LOGGER.info("Got event: data: " + o.getEventAsString());
latch.countDown();
})
.subscribe();
Subscription s2 = job.connectAndGetObservable()
.doOnNext(event -> {
LOGGER.info(" 2nd: Got event: data: " + event.getEventAsString());
latch.countDown();
})
.subscribe();
try {
boolean await = latch.await(300, TimeUnit.SECONDS);
if (await)
System.out.println("PASSED");
else
System.err.println("FAILED!");
} catch (InterruptedException e) {
e.printStackTrace();
}
subscription.unsubscribe();
System.out.println("Unsubscribed");
} catch (IllegalArgumentException e) {
Args.usage(SubmitEphemeralJob.class);
System.exit(1);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
}
| 1,211 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/SinkConnectionStatusObserver.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.SinkConnectionsStatus;
import rx.Observer;
public interface SinkConnectionStatusObserver extends Observer<SinkConnectionsStatus> {
public abstract long getConnectedServerCount();
public abstract long getTotalServerCount();
public abstract long getReceivingDataCount();
public abstract boolean isConnectedToAllSinks();
}
| 1,212 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractJobSource.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.io.UnsupportedEncodingException;
import java.util.Optional;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.source.Source;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractJobSource implements Source<MantisServerSentEvent> {
private static final int DEFAULT_META_MSG_INTERVAL_MSEC = 500;
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJobSource.class);
public SinkParameters getDefaultSinkParams(final String clientId,
final int samplePerSec,
final Optional<String> criterion,
final Optional<String> subscriptionId,
final boolean enableMetaMessages,
boolean enableCompressedBinaryInput, final long metaMessageInterval) {
SinkParameters.Builder defaultParamBuilder = new SinkParameters.Builder();
try {
defaultParamBuilder = defaultParamBuilder
.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID_PARAM, clientId)
.withParameter(MantisSSEConstants.ENABLE_PINGS, "true");
if (samplePerSec >= 1) {
defaultParamBuilder = defaultParamBuilder.withParameter("sample", Integer.toString(samplePerSec));
}
if (criterion.isPresent()) {
defaultParamBuilder =
defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION, criterion.get());
}
if (subscriptionId.isPresent()) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_SUBSCRIPTION_ID, subscriptionId.get());
}
if (enableMetaMessages) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.ENABLE_META_MESSAGES, Boolean.toString(true));
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.META_MESSAGES_SEC, Long.toString(metaMessageInterval));
}
if (enableCompressedBinaryInput) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION, Boolean.toString(true));
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e.getMessage());
}
return defaultParamBuilder.build();
}
public MantisSSEJob getJob(String jobName, String clientId, int samplePerSec,
Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to job " + jobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
MantisSourceJobConnector connector = MantisSourceJobConnectorFactory.getConnector();
SinkParameters defaultParams = getDefaultSinkParams(clientId,
samplePerSec, Optional.<String>empty(), Optional.<String>empty(), enableMetaMessages, enableCompressedBinaryInput, DEFAULT_META_MSG_INTERVAL_MSEC);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 1,213 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MultiSinkConnectionStatusObserver.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import io.mantisrx.client.SinkConnectionsStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MultiSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(MultiSinkConnectionStatusObserver.class);
public static final MultiSinkConnectionStatusObserver INSTANCE = new MultiSinkConnectionStatusObserver();
private final ConcurrentHashMap<String, SinkConnectionStatusObserver> sinkObserverMap = new ConcurrentHashMap<>();
public void addSinkConnectionObserver(String name, SinkConnectionStatusObserver obs) {
sinkObserverMap.put(name, obs);
}
public void removeSinkConnectionObserver(String name) {
sinkObserverMap.remove(name);
}
public SinkConnectionStatusObserver getSinkConnectionObserver(String name) {
return sinkObserverMap.get(name);
}
// for testing
void removeAllSinkConnectionObservers() {
sinkObserverMap.clear();
}
/**
* Iterate through all member connectionObservers and sum up the connectedServer counts.
*/
@Override
public long getConnectedServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total connected server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the totalServer counts.
*/
@Override
public long getTotalServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getTotalServerCount();
}
LOGGER.info("Total server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the receiving data counts.
*/
@Override
public long getReceivingDataCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total receiving server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and return false if any of the constituent client connections
* are not complete.
*/
@Override
public boolean isConnectedToAllSinks() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
boolean connectedToAll = false;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
connectedToAll = ob.isConnectedToAllSinks();
if (!connectedToAll) {
LOGGER.warn("Not connected to sinks of all jobs");
break;
}
}
return connectedToAll;
}
@Override
public void onCompleted() {
// NO OP
}
@Override
public void onError(Throwable e) {
// NO OP
}
@Override
public void onNext(SinkConnectionsStatus t) {
// NO OP
}
}
| 1,214 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/DefaultSinkConnectionStatusObserver.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.concurrent.atomic.AtomicLong;
import io.mantisrx.client.SinkConnectionsStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DefaultSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultSinkConnectionStatusObserver.class);
private static final SinkConnectionStatusObserver INSTANCE = new DefaultSinkConnectionStatusObserver();
private final AtomicLong numConnected = new AtomicLong();
private final AtomicLong total = new AtomicLong();
private final AtomicLong receivingData = new AtomicLong();
public static synchronized SinkConnectionStatusObserver getInstance(boolean singleton) {
if (singleton) {
return INSTANCE;
} else {
return new DefaultSinkConnectionStatusObserver();
}
}
public static SinkConnectionStatusObserver getInstance() {
boolean singleton = true;
return getInstance(singleton);
}
@Override
public void onCompleted() {
LOGGER.error("SinkConnectionStatusObserver completed!");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got SinkConnectionStatus update " + t);
numConnected.set(t.getNumConnected());
total.set(t.getTotal());
receivingData.set(t.getRecevingDataFrom());
}
@Override
public long getConnectedServerCount() {
return numConnected.get();
}
@Override
public long getTotalServerCount() {
return total.get();
}
@Override
public long getReceivingDataCount() {
return receivingData.get();
}
@Override
public boolean isConnectedToAllSinks() {
if (receivingData.get() > 0
&& numConnected.get() > 0
&& total.get() > 0
&& ((numConnected.get() == total.get()) && (total.get() == receivingData.get()))) {
return true;
}
LOGGER.warn("NOT connected to all sinks "
+ " connected : " + numConnected.get()
+ " total " + total.get()
+ " receiving Data " + receivingData.get());
return false;
}
}
| 1,215 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/source/JobSource.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.source;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import com.google.common.collect.Lists;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.connector.job.core.AbstractSourceJobSource;
import io.mantisrx.connector.job.core.DefaultSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.MantisSourceJobConnector;
import io.mantisrx.connector.job.core.MultiSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.SinkConnectionStatusObserver;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public class JobSource extends AbstractSourceJobSource implements Source<MantisServerSentEvent> {
private static final Logger LOGGER = LoggerFactory.getLogger(JobSource.class);
private static JsonParser parser = new JsonParser();
protected List<TargetInfo> targets;
public JobSource(List<TargetInfo> targets) {
this.targets = targets;
}
// For backwards compatibility.
public JobSource() {
this(new ArrayList<>());
}
public JobSource(String targetInfoStr) {
this.targets = parseTargetInfo(targetInfoStr);
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
params.add(new StringParameter()
.name(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY)
.validator(Validators.notNullOrEmpty())
.defaultValue("{}")
.build());
return params;
}
@Override
public Observable<Observable<MantisServerSentEvent>> call(Context context, Index index) {
if (targets.isEmpty()) {
targets = parseInputParameters(context);
}
Observable<Observable<MantisServerSentEvent>> sourceObs = null;
int workerNo = context.getWorkerInfo().getWorkerNumber();
targets = enforceClientIdConsistency(targets, context.getJobId());
for (TargetInfo targetInfo : targets) {
MantisSSEJob job;
String sourceJobName = targetInfo.sourceJobName;
String criterion = targetInfo.criterion;
int samplePerSec = targetInfo.samplePerSec;
boolean enableMetaMessages = targetInfo.enableMetaMessages;
LOGGER.info("Processing job " + sourceJobName);
boolean singleton = false;
SinkConnectionStatusObserver obs = DefaultSinkConnectionStatusObserver.getInstance(singleton);
MultiSinkConnectionStatusObserver.INSTANCE.addSinkConnectionObserver(sourceJobName, obs);
String clientId = targetInfo.clientId;
if (targetInfo.isBroadcastMode) {
clientId = clientId + "_" + workerNo;
}
boolean enableCompressedBinary = targetInfo.enableCompressedBinary;
job = getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, obs, Optional.<SinkParameters>empty());
if (sourceObs == null) {
sourceObs = job.connectAndGet();
} else {
if (job != null) {
Observable<Observable<MantisServerSentEvent>> clientObs = job.connectAndGet();
if (clientObs != null) {
sourceObs = sourceObs.mergeWith(clientObs);
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
}
}
return sourceObs;
}
public static class TargetInfo {
public String sourceJobName;
public String criterion;
public int samplePerSec;
public boolean isBroadcastMode;
public boolean enableMetaMessages;
public boolean enableCompressedBinary;
public String clientId;
public TargetInfo(String jobName,
String criterion,
String clientId,
int samplePerSec,
boolean isBroadcastMode,
boolean enableMetaMessages,
boolean enableCompressedBinary) {
this.sourceJobName = jobName;
this.criterion = criterion;
this.clientId = clientId;
this.samplePerSec = samplePerSec;
this.isBroadcastMode = isBroadcastMode;
this.enableMetaMessages = enableMetaMessages;
this.enableCompressedBinary = enableCompressedBinary;
}
}
protected static List<TargetInfo> parseInputParameters(Context ctx) {
String targetListStr = (String) ctx.getParameters()
.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY, "{}");
return parseTargetInfo(targetListStr);
}
protected static List<TargetInfo> parseTargetInfo(String targetListStr) {
List<TargetInfo> targetList = new ArrayList<TargetInfo>();
JsonObject requestObj = (JsonObject) parser.parse(targetListStr);
JsonArray arr = requestObj.get("targets").getAsJsonArray();
for (int i = 0; i < arr.size(); i++) {
int sample = -1;
boolean isBroadCastMode = false;
JsonObject srcObj = arr.get(i).getAsJsonObject();
String sName = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_NAME_PARAM).getAsString();
String criterion = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION).getAsString();
String clientId = null;
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID) != null) {
clientId = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID).getAsString();
}
if (srcObj.get(MantisSSEConstants.SAMPLE) != null) {
sample = srcObj.get(MantisSSEConstants.SAMPLE).getAsInt();
}
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE) != null) {
isBroadCastMode =
srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE).getAsBoolean();
}
boolean enableMetaMessages = false;
if (srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES) != null) {
enableMetaMessages = srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES).getAsBoolean();
}
boolean enableCompressedBinary = false;
if (srcObj.get(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION) != null) {
enableCompressedBinary = true;
}
TargetInfo ti = new TargetInfo(
sName,
criterion,
clientId,
sample,
isBroadCastMode,
enableMetaMessages,
enableCompressedBinary);
targetList.add(ti);
LOGGER.info("sname: " + sName + " criterion: " + criterion + " isBroadcastMode " + isBroadCastMode);
}
return targetList;
}
public static class TargetInfoBuilder {
private String sourceJobName;
private String criterion;
private String clientId;
private int samplePerSec = -1;
private boolean isBroadcastMode = false;
private boolean enableMetaMessages = false;
private boolean enableCompressedBinary = false;
public TargetInfoBuilder() {
}
public TargetInfoBuilder withSourceJobName(String srcJobName) {
this.sourceJobName = srcJobName;
return this;
}
public TargetInfoBuilder withQuery(String query) {
this.criterion = query;
return this;
}
public TargetInfoBuilder withSamplePerSec(int samplePerSec) {
this.samplePerSec = samplePerSec;
return this;
}
public TargetInfoBuilder withBroadCastMode() {
this.isBroadcastMode = true;
return this;
}
public TargetInfoBuilder withMetaMessagesEnabled() {
this.enableMetaMessages = true;
return this;
}
public TargetInfoBuilder withBinaryCompressionEnabled() {
this.enableCompressedBinary = true;
return this;
}
public TargetInfoBuilder withClientId(String clientId) {
this.clientId = clientId;
return this;
}
public TargetInfo build() {
return new TargetInfo(
sourceJobName,
criterion,
clientId,
samplePerSec,
isBroadcastMode,
enableMetaMessages,
enableCompressedBinary);
}
}
/**
* Ensures that a list of TargetInfo contains a sane set of sourceJobName, ClientId pairs.
* TODO: Currently mutates the list, which isn't problematic here, but it would be prudent to clean this up.
*
* @param targets A List of TargetInfo for which to validate and correct clientId inconsistencies.
*
* @return The original List modified to have consistent clientIds.
*/
public static List<TargetInfo> enforceClientIdConsistency(List<TargetInfo> targets, String defaultClientId) {
targets.sort(Comparator.comparing(t -> t.criterion));
HashSet<Tuple2<String, String>> connectionPairs = new HashSet<>(targets.size());
for (TargetInfo target : targets) {
if (target.clientId == null) {
target.clientId = defaultClientId;
}
Tuple2<String, String> connectionPair = Tuple.of(target.sourceJobName, target.clientId);
int attempts = 0;
while (connectionPairs.contains(connectionPair)) {
connectionPair = Tuple.of(target.sourceJobName, target.clientId + "_" + ++attempts);
}
target.clientId = connectionPair._2;
connectionPairs.add(connectionPair);
}
return targets;
}
}
| 1,216 |
0 |
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job
|
Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/sink/ServerSentEventsSink.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.sink;
import java.util.List;
import java.util.Map;
import io.mantisrx.common.properties.MantisPropertiesService;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import io.mantisrx.runtime.sink.ServerSentEventRequestHandler;
import io.mantisrx.runtime.sink.predicate.Predicate;
import io.mantisrx.server.core.ServiceRegistry;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelOption;
import io.reactivex.mantis.network.push.PushServerSse;
import io.reactivex.mantis.network.push.PushServers;
import io.reactivex.mantis.network.push.Routers;
import io.reactivex.mantis.network.push.ServerConfig;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.subjects.BehaviorSubject;
public class ServerSentEventsSink<T> implements SelfDocumentingSink<T> {
private static final Logger LOG = LoggerFactory.getLogger(ServerSentEventsSink.class);
private final Func2<Map<String, List<String>>, Context, Void> subscribeProcessor;
private final BehaviorSubject<Integer> portObservable = BehaviorSubject.create();
private Func1<T, String> encoder;
private Func1<Throwable, String> errorEncoder;
private Predicate<T> predicate;
private Func2<Map<String, List<String>>, Context, Void> requestPreprocessor;
private Func2<Map<String, List<String>>, Context, Void> requestPostprocessor;
private int port = -1;
private MantisPropertiesService propService;
public ServerSentEventsSink(Func1<T, String> encoder) {
this(encoder, null, null);
}
ServerSentEventsSink(Func1<T, String> encoder,
Func1<Throwable, String> errorEncoder,
Predicate<T> predicate) {
if (errorEncoder == null) {
// default
errorEncoder = Throwable::getMessage;
}
this.encoder = encoder;
this.errorEncoder = errorEncoder;
this.predicate = predicate;
this.propService = ServiceRegistry.INSTANCE.getPropertiesService();
this.subscribeProcessor = null;
}
ServerSentEventsSink(Builder<T> builder) {
this.encoder = builder.encoder;
this.errorEncoder = builder.errorEncoder;
this.predicate = builder.predicate;
this.requestPreprocessor = builder.requestPreprocessor;
this.requestPostprocessor = builder.requestPostprocessor;
this.subscribeProcessor = builder.subscribeProcessor;
this.propService = ServiceRegistry.INSTANCE.getPropertiesService();
}
@Override
public Metadata metadata() {
StringBuilder description = new StringBuilder();
description.append("HTTP server streaming results using Server-sent events. The sink"
+ " supports optional subscription (GET) parameters to change the events emitted"
+ " by the stream. A sampling interval can be applied to the stream using"
+ " the GET parameter sample=numSeconds. This will limit the stream rate to"
+ " events-per-numSeconds.");
if (predicate != null && predicate.getDescription() != null) {
description.append(" Predicate description: ").append(predicate.getDescription());
}
return new Metadata.Builder()
.name("Server Sent Event Sink")
.description(description.toString())
.build();
}
private boolean runNewSseServerImpl(String jobName) {
String legacyServerString = propService.getStringValue("mantis.sse.newServerImpl", "true");
String legacyServerStringPerJob = propService.getStringValue(jobName + ".mantis.sse.newServerImpl", "false");
return Boolean.parseBoolean(legacyServerString) || Boolean.parseBoolean(legacyServerStringPerJob);
}
private int numConsumerThreads() {
String consumerThreadsString = propService.getStringValue("mantis.sse.numConsumerThreads", "1");
return Integer.parseInt(consumerThreadsString);
}
private int maxChunkSize() {
String maxChunkSize = propService.getStringValue("mantis.sse.maxChunkSize", "1000");
return Integer.parseInt(maxChunkSize);
}
private int maxReadTime() {
String maxChunkSize = propService.getStringValue("mantis.sse.maxReadTimeMSec", "250");
return Integer.parseInt(maxChunkSize);
}
private int bufferCapacity() {
String bufferCapacityString = propService.getStringValue("mantis.sse.bufferCapacity", "25000");
return Integer.parseInt(bufferCapacityString);
}
private boolean useSpsc() {
String useSpsc = propService.getStringValue("mantis.sse.spsc", "false");
return Boolean.parseBoolean(useSpsc);
}
@Override
public void call(Context context, PortRequest portRequest, final Observable<T> observable) {
port = portRequest.getPort();
if (runNewSseServerImpl(context.getWorkerInfo().getJobName())) {
LOG.info("Serving modern HTTP SSE server sink on port: " + port);
String serverName = "SseSink";
ServerConfig.Builder<T> config = new ServerConfig.Builder<T>()
.name(serverName)
.groupRouter(Routers.roundRobinSse(serverName, encoder))
.port(port)
.metricsRegistry(context.getMetricsRegistry())
.maxChunkTimeMSec(maxReadTime())
.maxChunkSize(maxChunkSize())
.bufferCapacity(bufferCapacity())
.numQueueConsumers(numConsumerThreads())
.useSpscQueue(useSpsc())
.maxChunkTimeMSec(getBatchInterval());
if (predicate != null) {
config.predicate(predicate.getPredicate());
}
PushServerSse<T, Context> server = PushServers.infiniteStreamSse(config.build(), observable,
requestPreprocessor, requestPostprocessor,
subscribeProcessor, context, true);
server.start();
} else {
LOG.info("Serving legacy HTTP SSE server sink on port: " + port);
int batchInterval = getBatchInterval();
HttpServer<ByteBuf, ServerSentEvent> server = RxNetty.newHttpServerBuilder(
port,
new ServerSentEventRequestHandler<>(
observable,
encoder,
errorEncoder,
predicate,
requestPreprocessor,
requestPostprocessor,
context,
batchInterval))
.pipelineConfigurator(PipelineConfigurators.<ByteBuf>serveSseConfigurator())
.channelOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 5 * 1024 * 1024)
.channelOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 1024 * 1024)
.build();
server.start();
}
portObservable.onNext(port);
}
private int getBatchInterval() {
//default flush interval
String flushIntervalMillisStr =
ServiceRegistry.INSTANCE.getPropertiesService()
.getStringValue("mantis.sse.batchInterval", "100");
LOG.info("Read fast property mantis.sse.batchInterval" + flushIntervalMillisStr);
return Integer.parseInt(flushIntervalMillisStr);
}
private int getHighWaterMark() {
String jobName = propService.getStringValue("JOB_NAME", "default");
int highWaterMark = 5 * 1024 * 1024;
String highWaterMarkStr = propService.getStringValue(
jobName + ".sse.highwater.mark",
Integer.toString(5 * 1024 * 1024));
LOG.info("Read fast property:" + jobName + ".sse.highwater.mark ->" + highWaterMarkStr);
try {
highWaterMark = Integer.parseInt(highWaterMarkStr);
} catch (Exception e) {
LOG.error("Error parsing string " + highWaterMarkStr + " exception " + e.getMessage());
}
return highWaterMark;
}
public int getServerPort() {
return port;
}
/**
* Notifies you when the mantis job is available to listen to, for use when you want to
* write unit or regressions tests with the local runner that verify the output.
*/
public Observable<Integer> portConnections() {
return portObservable;
}
public static class Builder<T> {
private Func1<T, String> encoder;
private Func2<Map<String, List<String>>, Context, Void> requestPreprocessor;
private Func2<Map<String, List<String>>, Context, Void> requestPostprocessor;
private Func1<Throwable, String> errorEncoder = Throwable::getMessage;
private Predicate<T> predicate;
private Func2<Map<String, List<String>>, Context, Void> subscribeProcessor;
public Builder<T> withEncoder(Func1<T, String> encoder) {
this.encoder = encoder;
return this;
}
public Builder<T> withErrorEncoder(Func1<Throwable, String> errorEncoder) {
this.errorEncoder = errorEncoder;
return this;
}
public Builder<T> withPredicate(Predicate<T> predicate) {
this.predicate = predicate;
return this;
}
public Builder<T> withRequestPreprocessor(Func2<Map<String, List<String>>, Context, Void> preProcessor) {
this.requestPreprocessor = preProcessor;
return this;
}
public Builder<T> withSubscribePreprocessor(
Func2<Map<String, List<String>>, Context, Void> subscribeProcessor) {
this.subscribeProcessor = subscribeProcessor;
return this;
}
public Builder<T> withRequestPostprocessor(Func2<Map<String, List<String>>, Context, Void> postProcessor) {
this.requestPostprocessor = postProcessor;
return this;
}
public ServerSentEventsSink<T> build() {
return new ServerSentEventsSink<>(this);
}
}
}
| 1,217 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/ParameterTestUtils.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import io.mantisrx.runtime.parameter.Parameters;
public class ParameterTestUtils {
public static Parameters createParameters(Object... params) {
Map<String, Object> paramsMap = new HashMap();
Set<String> requiredParams = new HashSet<>();
List<Object> paramsList = Arrays.asList(params);
Iterator<Object> iterator = paramsList.iterator();
while (iterator.hasNext()) {
Object token = iterator.next();
if (token instanceof String) {
String paramkey = (String) token;
if (iterator.hasNext()) {
Object pVal = iterator.next();
paramsMap.put(paramkey, pVal);
requiredParams.add(paramkey);
}
} else {
throw new IllegalArgumentException("parameter key must be of type String, parameter key not supported with type " + token.getClass());
}
}
return new Parameters(paramsMap, requiredParams, requiredParams);
}
}
| 1,218 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/KafkaSourceTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static junit.framework.TestCase.fail;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.NoopRegistry;
import info.batey.kafka.unit.KafkaUnit;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.source.Index;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
// Test ignored until kafka-unit dependency for kafka v2.2.+ is released after merging PR https://github.com/chbatey/kafka-unit/pull/69
@Ignore
public class KafkaSourceTest {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSourceTest.class);
private static final KafkaUnit kafkaServer = new KafkaUnit(5000, 9092);
private static final Random random = new Random(System.currentTimeMillis());
private static final AtomicInteger topicNum = new AtomicInteger(1);
@BeforeClass
public static void startup() {
kafkaServer.startup();
}
@AfterClass
public static void shutdown() {
kafkaServer.shutdown();
}
@Test
public void testKafkaSourceSingleConsumerReadsAllMessagesInOrderFromSinglePartition() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final AtomicInteger counter = new AtomicInteger(0);
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
assertEquals(counter.getAndIncrement(), parsedEvent.get().get("messageNum"));
LOGGER.info("got message on topic {} consumer Id {}", parsedEvent.get(), kafkaAckable.getKafkaData().getMantisKafkaConsumerId());
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceSingleConsumerHandlesMessageParseFailures() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
ProducerRecord<String, String> invalidJsonMessage = new ProducerRecord<>(testTopic, "{\"messageNum:"+i+"}");
kafkaServer.sendMessages(invalidJsonMessage);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final AtomicInteger counter = new AtomicInteger(0);
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
assertEquals(counter.getAndIncrement(), parsedEvent.get().get("messageNum"));
LOGGER.info("got message on topic {} consumer Id {}", parsedEvent.get(), kafkaAckable.getKafkaData().getMantisKafkaConsumerId());
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(30, TimeUnit.SECONDS));
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceMultipleConsumersReadsAllMessagesFromMultiplePartitions() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 2;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
Set<Integer> outstandingMsgs = new ConcurrentSkipListSet<>();
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
outstandingMsgs.add(i);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, 2,
KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final Map<Integer, Integer> lastMessageNumByConsumerId = new ConcurrentHashMap<>();
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
Integer messageNum = (Integer)parsedEvent.get().get("messageNum");
assertTrue(outstandingMsgs.contains(messageNum));
outstandingMsgs.remove(messageNum);
int mantisKafkaConsumerId = kafkaAckable.getKafkaData().getMantisKafkaConsumerId();
lastMessageNumByConsumerId.putIfAbsent(mantisKafkaConsumerId, -1);
// assert consumption of higher message numbers across consumer instances
assertTrue(messageNum > lastMessageNumByConsumerId.get(mantisKafkaConsumerId));
lastMessageNumByConsumerId.put(mantisKafkaConsumerId, messageNum);
LOGGER.info("got message on topic {} consumer id {}", parsedEvent.get(), mantisKafkaConsumerId);
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.doOnError(t -> {
LOGGER.error("caught unexpected exception", t);
fail("test failed due to unexpected error "+ t.getMessage());
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
assertEquals(0, outstandingMsgs.size());
assertTrue(lastMessageNumByConsumerId.keySet().size() == 2);
lastMessageNumByConsumerId.keySet().forEach(consumerId -> {
assertTrue(lastMessageNumByConsumerId.get(consumerId) >= 0);
});
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceMultipleConsumersStaticPartitionAssignment() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numConsumers = 3;
int numPartitions = 3;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
Set<Integer> outstandingMsgs = new ConcurrentSkipListSet<>();
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
outstandingMsgs.add(i);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, numConsumers,
KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, true,
KafkaSourceParameters.TOPIC_PARTITION_COUNTS, testTopic + ":" + numPartitions,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
// Force all consumer instances to be created on same JVM by setting total number of workers for this job to 1
int totalNumWorkerForJob = 1;
Index index = new Index(0, totalNumWorkerForJob);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final Map<Integer, Integer> lastMessageNumByConsumerId = new ConcurrentHashMap<>();
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
Integer messageNum = (Integer)parsedEvent.get().get("messageNum");
assertTrue(outstandingMsgs.contains(messageNum));
outstandingMsgs.remove(messageNum);
int mantisKafkaConsumerId = kafkaAckable.getKafkaData().getMantisKafkaConsumerId();
lastMessageNumByConsumerId.putIfAbsent(mantisKafkaConsumerId, -1);
// assert consumption of higher message numbers across consumer instances
assertTrue(messageNum > lastMessageNumByConsumerId.get(mantisKafkaConsumerId));
lastMessageNumByConsumerId.put(mantisKafkaConsumerId, messageNum);
LOGGER.info("got message on topic {} consumer id {}", parsedEvent.get(), mantisKafkaConsumerId);
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.doOnError(t -> {
LOGGER.error("caught unexpected exception", t);
fail("test failed due to unexpected error "+ t.getMessage());
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
assertEquals(0, outstandingMsgs.size());
assertTrue(lastMessageNumByConsumerId.keySet().size() == numConsumers);
lastMessageNumByConsumerId.keySet().forEach(consumerId -> {
assertTrue(lastMessageNumByConsumerId.get(consumerId) >= 0);
});
kafkaServer.deleteTopic(testTopic);
}
}
| 1,219 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfigTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.Map;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaConsumerConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
assertEquals(Boolean.valueOf(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_ENABLED), consumerProperties.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_OFFSET_RESET, consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MAX_WAIT_MS, consumerProperties.get(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MIN_BYTES, consumerProperties.get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_HEARTBEAT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SESSION_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_KEY_DESERIALIZER, consumerProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_VALUE_DESERIALIZER, consumerProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_PARTITION_FETCH_BYTES, consumerProperties.get(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_RECEIVE_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.RECEIVE_BUFFER_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SEND_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.SEND_BUFFER_CONFIG));
assertEquals(Arrays.asList(MantisKafkaConsumerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), consumerProperties.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), consumerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
assertEquals(Arrays.asList(RangeAssignor.class.getName()), consumerProperties.get(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG));
assertEquals(MantisKafkaConsumerConfig.getGroupId(), consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_INTERVAL_MS, consumerProperties.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_RECORDS, consumerProperties.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_REQUEST_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
String testConsumerGroupId = "testKafkaConsumer-1";
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, testConsumerGroupId,
KafkaSourceParameters.PREFIX + ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 500);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
// MantisKafkaConsumerConfig only affects Kafka's ConsumerConfig defined properties
assertFalse(ConsumerConfig.configNames().contains(KafkaSourceParameters.TOPIC));
assertFalse(consumerProperties.containsKey(KafkaSourceParameters.TOPIC));
assertEquals("earliest", consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(testConsumerGroupId, consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(500, consumerProperties.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG));
}
}
| 1,220 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfigTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaSourceConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, "testTopic");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(MantisKafkaSourceConfig.DEFAULT_CONSUMER_POLL_TIMEOUT_MS, mantisKafkaSourceConfig.getConsumerPollTimeoutMs());
assertEquals(CheckpointStrategyOptions.NONE, mantisKafkaSourceConfig.getCheckpointStrategy());
assertEquals(MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING, mantisKafkaSourceConfig.getMaxBytesInProcessing());
assertEquals(ParserType.SIMPLE_JSON.getPropName(), mantisKafkaSourceConfig.getMessageParserType());
assertEquals(MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER, mantisKafkaSourceConfig.getNumConsumerInstances());
assertEquals(MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE, mantisKafkaSourceConfig.getParseMessageInSource());
assertEquals(MantisKafkaSourceConfig.DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS, mantisKafkaSourceConfig.getRetryCheckpointCheckDelayMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
assertEquals(Optional.empty(), mantisKafkaSourceConfig.getTopicPartitionCounts());
assertEquals(Arrays.asList("testTopic"), mantisKafkaSourceConfig.getTopics());
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
int checkpointIntervalOverride = 100;
boolean staticPartitionAssignEnableOverride = true;
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, checkpointIntervalOverride,
KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, staticPartitionAssignEnableOverride,
KafkaSourceParameters.TOPIC_PARTITION_COUNTS, testTopic+":1024");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(checkpointIntervalOverride, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(staticPartitionAssignEnableOverride, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(testTopic, 1024);
assertEquals(Optional.ofNullable(topicPartitionCounts), mantisKafkaSourceConfig.getTopicPartitionCounts());
}
}
| 1,221 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/FileBasedOffsetCheckpointStrategyTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.time.Instant;
import java.util.Collections;
import java.util.Date;
import java.util.Optional;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
public class FileBasedOffsetCheckpointStrategyTest {
private FileBasedOffsetCheckpointStrategy strategy = new FileBasedOffsetCheckpointStrategy();
@Test
public void testSaveAndLoadCheckpoint() {
final TopicPartition topicPartition = new TopicPartition("test-topic", 1);
strategy.init(Collections.singletonMap(FileBasedOffsetCheckpointStrategy.CHECKPOINT_DIR_PROP, FileBasedOffsetCheckpointStrategy.DEFAULT_CHECKPOINT_DIR));
final OffsetAndMetadata oam = new OffsetAndMetadata(100, Date.from(Instant.now()).toString());
strategy.persistCheckpoint(Collections.singletonMap(topicPartition, oam));
final Optional<OffsetAndMetadata> actual = strategy.loadCheckpoint(topicPartition);
assertEquals(true, actual.isPresent());
assertEquals(oam, actual.get());
}
@Test
public void testOffsetAndMetadataSerialization() {
OffsetAndMetadata expected = new OffsetAndMetadata(100, "tempmeta");
final SimpleModule module = new SimpleModule().addSerializer(OffsetAndMetadata.class, new OffsetAndMetadataSerializer())
.addDeserializer(OffsetAndMetadata.class, new OffsetAndMetadataDeserializer());
final ObjectMapper mapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.registerModule(module);
try {
final String s = mapper.writeValueAsString(expected);
final OffsetAndMetadata actual = mapper.readValue(s, OffsetAndMetadata.class);
assertEquals(expected, actual);
} catch (IOException e) {
e.printStackTrace();
}
}
}
| 1,222 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StaticPartitionAssignorTest {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorTest.class);
@Test
public void testStaticAssign1() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssignMoreConsumersThanPartitions() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 40;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
// assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
private Map<String, Integer> generateTopicPartitionCounts(int numTopics, int partitionRange) {
Map<String, Integer> topicPartitionMap = new HashMap<>();
int partitionCnt = 1;
for (int i = 0; i < numTopics; i++) {
topicPartitionMap.put("topic_" + i, partitionCnt++);
if (partitionCnt == partitionRange + 1) {
partitionCnt = 1;
}
}
return topicPartitionMap;
}
@Test
public void testStaticAssign2() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssign3() {
String tpList = "testtopic:1,testtopic2:7,testtopic3:1,testtopic4:46";
Map<String, Integer> tpMap = new HashMap<>();
String[] topicPartitionTuples = tpList.split(",");
for (int i = 0; i < topicPartitionTuples.length; i++) {
String[] topPart = topicPartitionTuples[i].split(":");
tpMap.put(topPart[0], Integer.valueOf(topPart[1]));
}
int totalNumConsumers = 12;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, tpMap, totalNumConsumers);
// assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void invalidConsumerIndexTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(-1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
try {
partitionAssigner.assignPartitionsToConsumer(100, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTotalConsumersTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
int totalNumConsumers = -1;
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTopicPartitionMapTest() {
Map<String, Integer> topicPartitionCounts = null;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, 20);
fail();
} catch (NullPointerException e) {
}
}
}
| 1,223 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfigTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.Map;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaProducerConfigTest {
@Test
public void testDefaultKafkaProducerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
assertEquals(Arrays.asList(MantisKafkaProducerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), producerProperties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), producerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSinkJobParameters.PREFIX + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(StringSerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
}
}
| 1,224 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/sink/KafkaSinkTest.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.NoopRegistry;
import info.batey.kafka.unit.KafkaUnit;
import io.mantisrx.connector.kafka.source.KafkaSourceTest;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.parameter.Parameters;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
// Test ignored until kafka-unit dependency for kafka v2.2.+ is released after merging PR https://github.com/chbatey/kafka-unit/pull/69
@Ignore
public class KafkaSinkTest {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSourceTest.class);
private static final KafkaUnit kafkaServer = new KafkaUnit(5000, 9092);
private static final Random random = new Random(System.currentTimeMillis());
private static final AtomicInteger topicNum = new AtomicInteger();
@BeforeClass
public static void startup() {
kafkaServer.startup();
}
@AfterClass
public static void shutdown() {
kafkaServer.shutdown();
}
@Test
public void testKafkaSink() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
KafkaSink<String> kafkaSink = new KafkaSink<>(new NoopRegistry(), s -> s.getBytes());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSinkJobParameters.TOPIC, testTopic);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
kafkaSink.call(context, mock(PortRequest.class), Observable.range(0, numMessages).map(x -> String.valueOf(x)));
List<String> messages = kafkaServer.readAllMessages(testTopic);
LOGGER.info("got {}", messages);
assertEquals(numMessages, messages.size());
for (int i = 0; i < numMessages; i++) {
assertEquals(i, Integer.parseInt(messages.get(i)));
}
kafkaServer.deleteTopic(testTopic);
}
}
| 1,225 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaDataNotification.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaDataNotification {
public enum Kind {
ACK,
NACK,
ERR
}
public static KafkaDataNotification ack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.ACK, null, elapsedMillis);
}
public static KafkaDataNotification nack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.NACK, null, elapsedMillis);
}
public static KafkaDataNotification error(KafkaData request, Throwable t, long elapsedMillis) {
return new KafkaDataNotification(request, Kind.ERR, t, elapsedMillis);
}
private final KafkaData value;
private final Kind kind;
private final Throwable error;
private long elapsedMillis;
protected KafkaDataNotification(KafkaData value, Kind kind, Throwable error, long elapsedMillis) {
this.value = value;
this.kind = kind;
this.error = error;
this.elapsedMillis = elapsedMillis;
}
public Throwable getError() {
return error;
}
public boolean hasError() {
return error != null;
}
/**
* @return
*/
public Kind getKind() {
return kind;
}
public KafkaData getValue() {
return value;
}
public boolean hasValue() {
return value != null;
}
public boolean isError() {
return kind.equals(Kind.ERR);
}
public boolean isSuccess() {
return kind.equals(Kind.ACK);
}
/**
* Time it took to execute the operation for which this notification is generated
*
* @return
*/
public long getElapsed() {
return elapsedMillis;
}
}
| 1,226 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaAckable.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import java.util.concurrent.TimeUnit;
/**
* Ackable used to wrap the data read from kafka to allow providing feedback to the source when the payload is consumed.
*/
public class KafkaAckable {
private final Subject<KafkaDataNotification, KafkaDataNotification> subject;
private final KafkaData kafkaData;
private final long createTimeNano = System.nanoTime();
public KafkaAckable(KafkaData data, SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public KafkaAckable(KafkaData data, Subject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public void ack() {
KafkaDataNotification n = KafkaDataNotification.ack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* NACK indicating that the message was not processed and should be
* returned to the source.
*
*/
public void nack() {
KafkaDataNotification n = KafkaDataNotification.nack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* There was an error processing the message. Depending on the implementation
* of the source the message may either be,
* 1. Dropped
* 2. Replayed
* 3. Posted to a poison queue
* @param t
*/
public void error(Throwable t) {
KafkaDataNotification n = KafkaDataNotification.error(getKafkaData(), t,
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* @return Get the internal message being Ackable'd
*/
public KafkaData getKafkaData() {
return kafkaData;
}
}
| 1,227 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaData.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class KafkaData {
private final String topic;
private final int partition;
private final long offset;
private final byte[] rawBytes;
/* parsedEvent is present if the raw bytes were already decoded */
private volatile Optional<Map<String, Object>> parsedEvent;
private final Optional<String> key;
private final String streamId;
private int mantisKafkaConsumerId;
public KafkaData(String topic,
int partition,
long offset,
byte[] rawBytes,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this.topic = topic;
this.partition = partition;
this.offset = offset;
this.rawBytes = rawBytes;
this.parsedEvent = parsedEvent;
this.key = key;
this.mantisKafkaConsumerId = mantisKafkaConsumerId;
this.streamId = new StringBuilder(topic).append('-').append(partition).toString();
}
public KafkaData(ConsumerRecord<String, byte[]> m,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this(m.topic(), m.partition(), m.offset(), m.value(), parsedEvent, key, mantisKafkaConsumerId);
}
public String getTopic() {
return topic;
}
public int getPartition() {
return partition;
}
public long getOffset() {
return offset;
}
public byte[] getRawBytes() {
return rawBytes;
}
public int getMantisKafkaConsumerId() {
return mantisKafkaConsumerId;
}
public String getStreamId() {
return this.streamId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KafkaData kafkaData = (KafkaData) o;
return partition == kafkaData.partition &&
offset == kafkaData.offset &&
mantisKafkaConsumerId == kafkaData.mantisKafkaConsumerId &&
topic.equals(kafkaData.topic) &&
Arrays.equals(rawBytes, kafkaData.rawBytes) &&
parsedEvent.equals(kafkaData.parsedEvent) &&
key.equals(kafkaData.key);
}
@Override
public int hashCode() {
int result = Objects.hash(topic, partition, offset, parsedEvent, key, mantisKafkaConsumerId);
result = 31 * result + Arrays.hashCode(rawBytes);
return result;
}
public Optional<Map<String, Object>> getParsedEvent() {
return parsedEvent;
}
public void setParsedEvent(final Map<String, Object> parsedEvent) {
this.parsedEvent = Optional.ofNullable(parsedEvent);
}
public Optional<String> getKey() {
return key;
}
@Override
public String toString() {
return "KafkaData{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", offset=" + offset +
", rawBytes=" + Arrays.toString(rawBytes) +
", parsedEvent=" + parsedEvent +
", key=" + key +
'}';
}
}
| 1,228 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaSourceParameters.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaSourceParameters {
public static final String PREFIX= "kafka.source.consumer.";
public static final String CHECKPOINT_STRATEGY = "checkpointStrategy";
public static final String CONSUMER_POLL_TIMEOUT_MS = "consumerPollTimeoutMs";
public static final String NUM_KAFKA_CONSUMER_PER_WORKER = "numKafkaConsumerPerWorker";
public static final String TOPIC = PREFIX + "topic";
public static final String MAX_BYTES_IN_PROCESSING = "maxBytesInProcessing";
public static final String PARSER_TYPE = "messageParserType";
public static final String PARSE_MSG_IN_SOURCE = "parseMessageInKafkaConsumerThread";
public static final String RETRY_CHECKPOINT_CHECK_DELAY_MS = "retryCheckpointCheckDelayMs";
public static final String CHECKPOINT_INTERVAL_MS = "checkpointIntervalMs";
// Enable static partition assignment, this disables Kafka's default consumer group management
public static final String ENABLE_STATIC_PARTITION_ASSIGN = "enableStaticPartitionAssign";
// Number of partitions per topic, used only when Static Partition assignment is enabled
public static final String TOPIC_PARTITION_COUNTS = "numPartitionsPerTopic";
}
| 1,229 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaSource.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Registry;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.connector.kafka.source.serde.ParseException;
import io.mantisrx.connector.kafka.source.serde.Parser;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaData;
import io.mantisrx.connector.kafka.KafkaDataNotification;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.BooleanParameter;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.record.InvalidRecordException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.observables.SyncOnSubscribe;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.CONSUMER_RECORD_OVERHEAD_BYTES;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE;
/**
* Mantis Kafka Source wraps a kafka v2.2.+ consumer with back pressure semantics, the consumer polls data from kafka
* only as fast as the data is processed & ack'ed by the processing stage of the Mantis Job.
* <p>
* The {@value KafkaSourceParameters#NUM_KAFKA_CONSUMER_PER_WORKER} Job param decides the number of Kafka consumer instances spawned on each Mantis worker,
* Each kafka consumer instance runs in their own thread and poll data from kafka as part of the same consumer group
*/
public class KafkaSource implements Source<KafkaAckable> {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSource.class);
private final AtomicBoolean done = new AtomicBoolean();
private final Map<Integer, MantisKafkaConsumer<?>> idToConsumerMap = new HashMap<>();
private final Registry registry;
private final SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject =
new SerializedSubject<>(PublishSubject.create());
public KafkaSource(final Registry registry) {
this.registry = registry;
}
private Observable<MantisKafkaConsumer<?>> createConsumers(final Context context,
final MantisKafkaSourceConfig kafkaSourceConfig,
final int totalNumWorkers) {
final List<MantisKafkaConsumer<?>> consumers = new ArrayList<>();
for (int i = 0; i < kafkaSourceConfig.getNumConsumerInstances(); i++) {
final int consumerIndex = context.getWorkerInfo().getWorkerIndex() + (totalNumWorkers * i);
MantisKafkaConsumer<?> mantisKafkaConsumer = new MantisKafkaConsumer.Builder()
.withKafkaSourceConfig(kafkaSourceConfig)
.withTotalNumConsumersForJob(totalNumWorkers * kafkaSourceConfig.getNumConsumerInstances())
.withContext(context)
.withConsumerIndex(consumerIndex)
.withRegistry(registry)
.build();
idToConsumerMap.put(mantisKafkaConsumer.getConsumerId(), mantisKafkaConsumer);
LOGGER.info("created consumer {}", mantisKafkaConsumer);
consumers.add(mantisKafkaConsumer);
}
return Observable.from(consumers);
}
private int getPayloadSize(ConsumerRecord<String, byte[]> record) {
return record.value().length + CONSUMER_RECORD_OVERHEAD_BYTES;
}
/**
* Create an observable with back pressure semantics from the consumer records fetched using consumer.
*
* @param mantisKafkaConsumer non thread-safe KafkaConsumer
* @param kafkaSourceConfig configuration for the Mantis Kafka Source
*/
private Observable<KafkaAckable> createBackPressuredConsumerObs(final MantisKafkaConsumer<?> mantisKafkaConsumer,
final MantisKafkaSourceConfig kafkaSourceConfig) {
CheckpointStrategy checkpointStrategy = mantisKafkaConsumer.getStrategy();
final CheckpointTrigger trigger = mantisKafkaConsumer.getTrigger();
final ConsumerMetrics consumerMetrics = mantisKafkaConsumer.getConsumerMetrics();
final TopicPartitionStateManager partitionStateManager = mantisKafkaConsumer.getPartitionStateManager();
int mantisKafkaConsumerId = mantisKafkaConsumer.getConsumerId();
SyncOnSubscribe<Iterator<ConsumerRecord<String, byte[]>>, KafkaAckable> syncOnSubscribe = SyncOnSubscribe.createStateful(
() -> {
final ConsumerRecords<String, byte[]> records = mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("topic listing for consumer {}", mantisKafkaConsumer.listTopics());
}
LOGGER.info("consumer subscribed to topic-partitions {}", mantisKafkaConsumer.assignment());
return records.iterator();
},
(consumerRecordIterator, observer) -> {
Iterator<ConsumerRecord<String, byte[]>> it = consumerRecordIterator;
final Set<TopicPartition> partitions = mantisKafkaConsumer.assignment();
if (trigger.shouldCheckpoint()) {
long startTime = System.currentTimeMillis();
final Map<TopicPartition, OffsetAndMetadata> checkpoint =
partitionStateManager.createCheckpoint(partitions);
checkpointStrategy.persistCheckpoint(checkpoint);
long now = System.currentTimeMillis();
consumerMetrics.recordCheckpointDelay(now - startTime);
consumerMetrics.incrementCommitCount();
trigger.reset();
}
if (!done.get()) {
try {
if (!consumerRecordIterator.hasNext()) {
final ConsumerRecords<String, byte[]> consumerRecords =
mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("poll returned {} records", consumerRecords.count());
}
it = consumerRecords.iterator();
}
if (it.hasNext()) {
final ConsumerRecord<String, byte[]> m = it.next();
final TopicPartition topicPartition = new TopicPartition(m.topic(), m.partition());
consumerMetrics.incrementInCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("updating read offset to " + m.offset() + " read " + m.value());
}
if (m.value() != null) {
try {
trigger.update(getPayloadSize(m));
if (kafkaSourceConfig.getParseMessageInSource()) {
final Parser parser = ParserType.parser(kafkaSourceConfig.getMessageParserType()).getParser();
if (parser.canParse(m.value())) {
final Map<String, Object> parsedKafkaValue = parser.parseMessage(m.value());
final KafkaData kafkaData = new KafkaData(m, Optional.ofNullable(parsedKafkaValue), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
} else {
consumerMetrics.incrementParseFailureCount();
}
} else {
final KafkaData kafkaData = new KafkaData(m, Optional.empty(), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
}
} catch (ParseException pe) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("failed to parse {}:{} message {}", m.topic(), m.partition(), m.value(), pe);
}
} else {
consumerMetrics.incrementKafkaMessageValueNullCount();
}
} else {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition, waiting for more data");
}
TimeUnit.MILLISECONDS.sleep(200);
}
} catch (TimeoutException toe) {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition waiting for more data");
}
} catch (OffsetOutOfRangeException oore) {
LOGGER.warn("offsets out of range " + oore.partitions() + " will seek to beginning", oore);
final Set<TopicPartition> topicPartitionSet = oore.partitions();
for (TopicPartition tp : topicPartitionSet) {
LOGGER.info("partition {} consumer position {}", tp, mantisKafkaConsumer.position(tp));
}
mantisKafkaConsumer.seekToBeginning(oore.partitions().toArray(new TopicPartition[oore.partitions().size()]));
} catch (InvalidRecordException ire) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("iterator error with invalid message. message will be dropped " + ire.getMessage());
} catch (KafkaException e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("Other Kafka exception, message will be dropped. " + e.getMessage());
} catch (InterruptedException ie) {
LOGGER.error("consumer interrupted", ie);
Thread.currentThread().interrupt();
} catch (Exception e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("caught exception", e);
}
} else {
mantisKafkaConsumer.close();
}
return it;
},
consumerRecordIterator -> {
LOGGER.info("closing Kafka consumer on unsubscribe" + mantisKafkaConsumer.toString());
mantisKafkaConsumer.close();
});
return Observable.create(syncOnSubscribe)
.subscribeOn(Schedulers.newThread())
.doOnUnsubscribe(() -> LOGGER.info("consumer {} stopped due to unsubscribe", mantisKafkaConsumerId))
.doOnError((t) -> {
LOGGER.error("consumer {} stopped due to error", mantisKafkaConsumerId, t);
consumerMetrics.incrementErrorCount();
})
.doOnTerminate(() -> LOGGER.info("consumer {} terminated", mantisKafkaConsumerId));
}
@Override
public Observable<Observable<KafkaAckable>> call(Context context, Index index) {
final int totalNumWorkers = index.getTotalNumWorkers();
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
startAckProcessor();
return Observable.create((Observable.OnSubscribe<Observable<KafkaAckable>>) child -> {
final Observable<MantisKafkaConsumer<?>> consumers =
createConsumers(context, mantisKafkaSourceConfig, totalNumWorkers);
consumers.subscribe(consumer -> {
final Observable<KafkaAckable> mantisKafkaAckableObs =
createBackPressuredConsumerObs(consumer, mantisKafkaSourceConfig);
child.onNext(mantisKafkaAckableObs);
});
})
.doOnUnsubscribe(() -> {
LOGGER.info("unsubscribed");
done.set(true);
}).doOnSubscribe(() -> {
LOGGER.info("subscribed");
done.set(false);
});
}
private void processAckNotification(final KafkaDataNotification notification) {
final KafkaData kafkaData = notification.getValue();
final TopicPartition topicPartition = new TopicPartition(kafkaData.getTopic(), kafkaData.getPartition());
MantisKafkaConsumer<?> mantisKafkaConsumer = idToConsumerMap.get(kafkaData.getMantisKafkaConsumerId());
if (mantisKafkaConsumer != null) {
mantisKafkaConsumer.getPartitionStateManager().recordMessageAck(topicPartition, kafkaData.getOffset());
if (!notification.isSuccess()) {
// TODO provide a hook for the user to add handling for messages that could not be processed
LOGGER.debug("Got negative acknowledgement {}", notification);
}
mantisKafkaConsumer.getConsumerMetrics().incrementProcessedCount();
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("got Ack for consumer id {} not in idToConsumerMap (topic {})", kafkaData.getMantisKafkaConsumerId(), kafkaData.getTopic());
}
}
}
private void startAckProcessor() {
LOGGER.info("Acknowledgement processor started");
ackSubject.subscribe((KafkaDataNotification notification) -> processAckNotification(notification));
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC)
.description("Kafka topic to connect to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
// Optional parameters
params.add(new StringParameter()
.name(KafkaSourceParameters.CHECKPOINT_STRATEGY)
.description("checkpoint strategy one of " + CheckpointStrategyOptions.values() + " (ensure enable.auto.commit param is set to false when enabling this)")
.defaultValue(CheckpointStrategyOptions.NONE)
.validator(Validators.alwaysPass())
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER)
.description("No. of Kafka consumer instances per Mantis worker")
.validator(Validators.range(1, 16))
.defaultValue(DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER)
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING)
.description("The maximum amount of data per-consumer awaiting acks to trigger an offsets commit. " +
"These commits are in addition to any commits triggered by commitIntervalMs timer")
.defaultValue(DEFAULT_MAX_BYTES_IN_PROCESSING)
.validator(Validators.range(1, Integer.MAX_VALUE))
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS)
.validator(Validators.range(100, 10_000))
.defaultValue(250)
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.PARSER_TYPE)
.validator(Validators.notNullOrEmpty())
.defaultValue(ParserType.SIMPLE_JSON.getPropName())
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.PARSE_MSG_IN_SOURCE)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_PARSE_MSG_IN_SOURCE)
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN)
.description("Disable Kafka's default consumer group management and statically assign partitions to job workers. When enabling static partition assignments, disable auto-scaling and set the numPartitionsPerTopic job parameter")
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC_PARTITION_COUNTS)
.validator(Validators.alwaysPass())
.defaultValue("")
.description("Configures number of partitions on a kafka topic when static partition assignment is enabled. Format <topic1>:<numPartitions Topic1>,<topic2>:<numPartitions Topic2> Example: nf_errors_log:9,clevent:450")
.build());
params.addAll(MantisKafkaConsumerConfig.getJobParameterDefinitions());
return params;
}
}
| 1,230 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaConsumerRebalanceListener.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.Collection;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
public class KafkaConsumerRebalanceListener<S> implements ConsumerRebalanceListener {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerRebalanceListener.class);
private final KafkaConsumer<?, ?> consumer;
private final TopicPartitionStateManager partitionStateManager;
private final CheckpointStrategy<S> checkpointStrategy;
public KafkaConsumerRebalanceListener(final KafkaConsumer<?, ?> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> checkpointStrategy) {
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.checkpointStrategy = checkpointStrategy;
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
// When partitions are revoked, clear all partition state. We don't try to checkpoint here as we can be stuck indefinitely if the processing is slow and
// we try to wait for all acks to create a checkpoint and commit the offsets/state to data store.
LOGGER.info("partitions revoked, resetting partition state: {}", partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
}
/**
* Assumption is onPartitionsRevoked will always be called before onPartitionsAssigned
*/
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
LOGGER.info("new partitions assigned: {}", partitions.toString());
try {
for (TopicPartition tp : partitions) {
Optional<S> checkpointState = checkpointStrategy.loadCheckpoint(tp);
checkpointState
.filter(x -> x instanceof OffsetAndMetadata)
.map(OffsetAndMetadata.class::cast)
.ifPresent(oam -> {
long offset = oam.offset();
LOGGER.info("seeking consumer to checkpoint'ed offset {} for partition {} on assignment", offset, tp);
try {
consumer.seek(tp, offset);
} catch (Exception e) {
LOGGER.error("caught exception seeking consumer to offset {} on topic partition {}", offset, tp, e);
}
});
}
} catch (Exception e) {
LOGGER.error("caught exception on partition assignment {}", partitions, e);
}
}
}
| 1,231 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/TopicPartitionStateManager.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import io.netty.util.internal.ConcurrentSet;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
public class TopicPartitionStateManager {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicPartitionStateManager.class);
// Default to 20ms delay between retries for checkpoint ready
private final int checkpointReadyCheckDelayMs;
private final Counter waitingForAckCount;
public TopicPartitionStateManager(Registry registry, String kafkaClientId, int checkpointReadyCheckDelayMs) {
this.checkpointReadyCheckDelayMs = checkpointReadyCheckDelayMs;
this.waitingForAckCount = registry.counter("waitingOnAck", "client-id", kafkaClientId);
}
public static final long DEFAULT_LAST_READ_OFFSET = 0;
private class State {
private final AtomicLong lastReadOffset = new AtomicLong(DEFAULT_LAST_READ_OFFSET);
private final ConcurrentSet<Long> unAckedOffsets = new ConcurrentSet<>();
}
private final ConcurrentMap<TopicPartition, State> partitionState = new ConcurrentHashMap<>();
/**
* Track the message with this offset as read from Kafka but waiting on acknowledgement from the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageRead(final TopicPartition tp, final long offset) {
// add to set
if (!partitionState.containsKey(tp)) {
partitionState.putIfAbsent(tp, new State());
}
partitionState.get(tp).unAckedOffsets.add(offset);
partitionState.get(tp).lastReadOffset.set(offset);
}
/**
* Records the message identified by this offset has been processed and ack'ed by the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageAck(final TopicPartition tp, final long offset) {
// remove from set
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.remove(offset);
}
/**
* Get last read offset from this topic partition.
*
* @param tp TopicPartition
*
* @return last offset read from give TopicPartition
*/
public Optional<Long> getLastOffset(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return Optional.empty();
}
return Optional.of(partitionState.get(tp).lastReadOffset.get());
}
private boolean allMessagesAcked(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
// no messages, no acks needed
return true;
}
return partitionState.get(tp).unAckedOffsets.size() == 0;
}
public Map<TopicPartition, OffsetAndMetadata> createCheckpoint(final Collection<TopicPartition> partitions) {
if (partitionState.isEmpty()) {
return Collections.emptyMap();
}
final Map<TopicPartition, OffsetAndMetadata> checkpoint = new HashMap<>(partitions.size());
for (TopicPartition tp : partitions) {
while (!allMessagesAcked(tp)) {
try {
waitingForAckCount.increment();
Thread.sleep(checkpointReadyCheckDelayMs);
} catch (InterruptedException e) {
LOGGER.info("thread interrupted when creating checkpoint for {}", tp);
Thread.currentThread().interrupt();
throw new RuntimeException("thread interrupted when creating checkpoint", e);
}
}
final State pState = partitionState.get(tp);
final Optional<Long> lastOffset = Optional.ofNullable(pState != null ? pState.lastReadOffset.get() : null);
if (lastOffset.isPresent() && lastOffset.get() != DEFAULT_LAST_READ_OFFSET) {
checkpoint.put(tp, new OffsetAndMetadata(lastOffset.get() + 1, String.valueOf(System.currentTimeMillis())));
}
}
return checkpoint;
}
/* reset partition counters */
public void resetCounters(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.clear();
partitionState.get(tp).lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
}
/* reset all counters */
public void resetCounters() {
LOGGER.info("resetting all counters");
if (partitionState.isEmpty()) {
return;
}
partitionState.values().stream().forEach(state -> {
state.unAckedOffsets.clear();
state.lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
});
}
}
| 1,232 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumer.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignor;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignorImpl;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyFactory;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTriggerFactory;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
public class MantisKafkaConsumer<S> {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumer.class);
private final int consumerId;
private final KafkaConsumer<String, byte[]> consumer;
private final CheckpointStrategy<S> strategy;
private final CheckpointTrigger trigger;
private final ConsumerMetrics consumerMetrics;
private final TopicPartitionStateManager partitionStateManager;
private final AtomicLong pollTimestamp = new AtomicLong(System.currentTimeMillis());
private final AtomicLong pollReturnedDataTimestamp = new AtomicLong(System.currentTimeMillis());
private volatile Subscription metricSubscription = null;
public MantisKafkaConsumer(final int consumerId,
final KafkaConsumer<String, byte[]> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> strategy,
final CheckpointTrigger trigger,
final ConsumerMetrics metrics) {
this.consumerId = consumerId;
this.consumerMetrics = metrics;
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.strategy = strategy;
this.trigger = trigger;
setupMetricPublish();
}
private void setupMetricPublish() {
if (metricSubscription == null) {
this.metricSubscription = Observable.interval(1, TimeUnit.SECONDS).subscribe((tick) -> {
consumerMetrics.recordTimeSinceLastPollMs(timeSinceLastPollMs());
consumerMetrics.recordTimeSinceLastPollWithDataMs(timeSinceLastPollWithDataMs());
});
}
}
public int getConsumerId() {
return consumerId;
}
public KafkaConsumer<String, byte[]> getConsumer() {
return consumer;
}
public CheckpointStrategy<S> getStrategy() {
return strategy;
}
public CheckpointTrigger getTrigger() {
return trigger;
}
public TopicPartitionStateManager getPartitionStateManager() {
return partitionStateManager;
}
public long timeSinceLastPollMs() {
return (System.currentTimeMillis() - pollTimestamp.get());
}
public long timeSinceLastPollWithDataMs() {
return (System.currentTimeMillis() - pollReturnedDataTimestamp.get());
}
public ConsumerMetrics getConsumerMetrics() {
return consumerMetrics;
}
public void close() {
if (metricSubscription != null && !metricSubscription.isUnsubscribed()) {
metricSubscription.unsubscribe();
}
if (trigger.isActive()) {
final Set<TopicPartition> partitions = consumer.assignment();
LOGGER.warn("clearing partition state when closing consumer {}, partitions {}", this.toString(), partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
consumer.close();
trigger.shutdown();
}
}
/**
* {@link KafkaConsumer#poll(Duration)} ()}
*/
public ConsumerRecords<String, byte[]> poll(final long consumerPollTimeoutMs) {
final long now = System.currentTimeMillis();
pollTimestamp.set(now);
final ConsumerRecords<String, byte[]> consumerRecords = consumer.poll(Duration.ofMillis(consumerPollTimeoutMs));
if (consumerRecords.count() > 0) {
pollReturnedDataTimestamp.set(now);
}
return consumerRecords;
}
/**
* {@link KafkaConsumer#assignment()}
*/
public Set<TopicPartition> assignment() {
return consumer.assignment();
}
/**
* {@link KafkaConsumer#listTopics()}
*/
public Map<String, List<PartitionInfo>> listTopics() {
return consumer.listTopics();
}
/**
* {@link KafkaConsumer#position(TopicPartition)} ()}
*/
public long position(TopicPartition partition) {
return consumer.position(partition);
}
/**
* {@link KafkaConsumer#seekToBeginning(Collection)} ()}
*/
public void seekToBeginning(TopicPartition... partitions) {
consumer.seekToBeginning(Arrays.asList(partitions));
}
/**
* {@link KafkaConsumer#pause(Collection)} ()}
*/
public void pause(TopicPartition... partitions) {
LOGGER.debug("pausing {} partitions", partitions.length);
consumer.pause(Arrays.asList(partitions));
consumerMetrics.incrementPausePartitionCount();
}
/**
* {@link KafkaConsumer#resume(Collection)} ()}
*/
public void resume(TopicPartition... partitions) {
try {
LOGGER.debug("resuming {} partitions", partitions.length);
consumer.resume(Arrays.asList(partitions));
consumerMetrics.incrementResumePartitionCount();
} catch (IllegalStateException e) {
LOGGER.warn("resuming partitions failed", e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisKafkaConsumer that = (MantisKafkaConsumer) o;
return consumerId == that.consumerId &&
consumer.equals(that.consumer) &&
strategy.equals(that.strategy) &&
trigger.equals(that.trigger) &&
consumerMetrics.equals(that.consumerMetrics) &&
partitionStateManager.equals(that.partitionStateManager);
}
@Override
public int hashCode() {
return Objects.hash(consumerId, consumer, strategy, trigger, consumerMetrics, partitionStateManager);
}
@Override
public String toString() {
return "MantisKafkaConsumer{" +
"consumerId=" + consumerId +
", consumer=" + consumer +
", strategy=" + strategy +
", trigger=" + trigger +
'}';
}
static class Builder {
private Context context;
private int consumerIndex;
private int totalNumConsumersForJob;
private Registry registry;
private MantisKafkaSourceConfig kafkaSourceConfig;
private static final AtomicInteger consumerId = new AtomicInteger(0);
private final StaticPartitionAssignor staticPartitionAssignor = new StaticPartitionAssignorImpl();
public Builder withContext(Context context) {
this.context = context;
return this;
}
public Builder withKafkaSourceConfig(MantisKafkaSourceConfig kafkaSourceConfig) {
this.kafkaSourceConfig = kafkaSourceConfig;
return this;
}
public Builder withConsumerIndex(int consumerIndex) {
this.consumerIndex = consumerIndex;
return this;
}
public Builder withTotalNumConsumersForJob(int totalNumConsumersForJob) {
this.totalNumConsumersForJob = totalNumConsumersForJob;
return this;
}
public Builder withRegistry(Registry registry) {
this.registry = registry;
return this;
}
private void doStaticPartitionAssignment(final KafkaConsumer<String, byte[]> consumer,
final ConsumerRebalanceListener rebalanceListener,
final int consumerIndex,
final int totalNumConsumers,
final Map<String, Integer> topicPartitionCounts,
final Registry registry) {
if (totalNumConsumers <= 0) {
LOGGER.error("total num consumers {} is invalid", totalNumConsumers);
context.completeAndExit();
return;
}
if (consumerIndex < 0 || consumerIndex >= totalNumConsumers) {
LOGGER.error("consumerIndex {} is invalid (numConsumers: {})", consumerIndex, totalNumConsumers);
context.completeAndExit();
return;
}
final List<TopicPartition> topicPartitions = staticPartitionAssignor.assignPartitionsToConsumer(consumerIndex, topicPartitionCounts, totalNumConsumers);
if (topicPartitions.isEmpty()) {
LOGGER.error("topic partitions to assign list is empty");
throw new RuntimeException("static partition assignment is enabled and no topic partitions were assigned, please check numPartitionsPerTopic job param is set correctly and the job has num(kafka consumer) <= num(partition)");
} else {
LOGGER.info("Statically assigned topic partitions(): {}", topicPartitions);
topicPartitions.forEach(tp ->
registry.gauge("staticPartitionAssigned",
"topic", tp.topic(), "partition", String.valueOf(tp.partition())).set(1.0));
consumer.assign(topicPartitions);
// reuse onPartitionsAssigned() so the consumer can seek to checkpoint'ed offset from offset store
rebalanceListener.onPartitionsAssigned(topicPartitions);
}
}
public MantisKafkaConsumer<?> build() {
Preconditions.checkNotNull(context, "context");
Preconditions.checkNotNull(kafkaSourceConfig, "kafkaSourceConfig");
Preconditions.checkNotNull(registry, "registry");
Preconditions.checkArg(consumerIndex >= 0, "consumerIndex must be greater than or equal to 0");
Preconditions.checkArg(totalNumConsumersForJob > 0, "total number of consumers for job must be greater than 0");
final int kafkaConsumerId = consumerId.incrementAndGet();
Map<String, Object> consumerProps = kafkaSourceConfig.getConsumerConfig().getConsumerProperties();
final String clientId = String.format("%s-%d-%d", context.getJobId(), context.getWorkerInfo().getWorkerNumber(), kafkaConsumerId);
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
// hard-coding key to String type and value to byte[]
final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps);
final TopicPartitionStateManager partitionStateManager = new TopicPartitionStateManager(registry, clientId, kafkaSourceConfig.getRetryCheckpointCheckDelayMs());
final ConsumerMetrics metrics = new ConsumerMetrics(registry, kafkaConsumerId, context);
final CheckpointStrategy<?> strategy = CheckpointStrategyFactory.getNewInstance(context, consumer, kafkaSourceConfig.getCheckpointStrategy(), metrics);
if (kafkaSourceConfig.getStaticPartitionAssignmentEnabled()) {
final KafkaConsumerRebalanceListener kafkaConsumerRebalanceListener = new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy);
kafkaSourceConfig.getTopicPartitionCounts().ifPresent(topicPartitionCounts -> {
doStaticPartitionAssignment(consumer, kafkaConsumerRebalanceListener, consumerIndex, totalNumConsumersForJob, topicPartitionCounts, registry);
});
} else {
if (kafkaSourceConfig.getCheckpointStrategy() != CheckpointStrategyOptions.NONE) {
consumer.subscribe(kafkaSourceConfig.getTopics(),
new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy));
} else {
consumer.subscribe(kafkaSourceConfig.getTopics());
}
}
final CheckpointTrigger trigger = CheckpointTriggerFactory.getNewInstance(kafkaSourceConfig);
return new MantisKafkaConsumer<>(kafkaConsumerId, consumer, partitionStateManager, strategy, trigger, metrics);
}
}
}
| 1,233 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfig.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import com.google.common.base.Splitter;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaSourceConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaSourceConfig.class);
public static final int DEFAULT_CONSUMER_POLL_TIMEOUT_MS = 100;
public static final int DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS = 20;
public static final boolean DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN = false;
public static final int CONSUMER_RECORD_OVERHEAD_BYTES = 100;
public static final int DEFAULT_MAX_BYTES_IN_PROCESSING = 128_000_000;
public static final int DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER = 1;
public static final boolean DEFAULT_PARSE_MSG_IN_SOURCE = true;
private final List<String> topics;
private final int numConsumerInstances;
private final int consumerPollTimeoutMs;
private final int maxBytesInProcessing;
private final String messageParserType;
private final String checkpointStrategy;
private final Boolean parseMessageInSource;
private final int retryCheckpointCheckDelayMs;
private final int checkpointIntervalMs;
private final Boolean staticPartitionAssignmentEnabled;
private final Optional<Map<String, Integer>> topicPartitionCounts;
private final MantisKafkaConsumerConfig consumerConfig;
public MantisKafkaSourceConfig(Context context) {
final Parameters parameters = context.getParameters();
final String topicStr = (String) parameters.get(KafkaSourceParameters.TOPIC);
this.topics = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicStr);
this.numConsumerInstances = (int) parameters.get(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER);
this.consumerPollTimeoutMs = (int) parameters.get(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS, DEFAULT_CONSUMER_POLL_TIMEOUT_MS);
this.maxBytesInProcessing = (int) parameters.get(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING, DEFAULT_MAX_BYTES_IN_PROCESSING);
this.messageParserType = (String) parameters.get(KafkaSourceParameters.PARSER_TYPE, ParserType.SIMPLE_JSON.getPropName());
this.checkpointStrategy = (String) parameters.get(KafkaSourceParameters.CHECKPOINT_STRATEGY, CheckpointStrategyOptions.NONE);
this.parseMessageInSource = (boolean) parameters.get(KafkaSourceParameters.PARSE_MSG_IN_SOURCE, DEFAULT_PARSE_MSG_IN_SOURCE);
this.retryCheckpointCheckDelayMs = (int) parameters.get(KafkaSourceParameters.RETRY_CHECKPOINT_CHECK_DELAY_MS, DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS);
this.checkpointIntervalMs = (int) parameters.get(KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS);
this.staticPartitionAssignmentEnabled = (boolean) parameters.get(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN);
if (staticPartitionAssignmentEnabled) {
final String topicPartitionsStr = (String) parameters.get(KafkaSourceParameters.TOPIC_PARTITION_COUNTS, "");
this.topicPartitionCounts = Optional.ofNullable(getTopicPartitionCounts(topicPartitionsStr, topics));
} else {
this.topicPartitionCounts = Optional.empty();
}
consumerConfig = new MantisKafkaConsumerConfig(context);
LOGGER.info("checkpointStrategy: {} numConsumerInstances: {} topics: {} consumerPollTimeoutMs: {} retryCheckpointCheckDelayMs {} consumer config: {}",
checkpointStrategy, numConsumerInstances, topics, consumerPollTimeoutMs, retryCheckpointCheckDelayMs, consumerConfig.values().toString());
}
private Map<String, Integer> getTopicPartitionCounts(String topicPartitionsStr, List<String> topicList) {
final List<String> topicPartitionCountList = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicPartitionsStr);
final Map<String, Integer> topicPartitionCounts = new HashMap<>();
// parse topic partition counts only if Static partition assignment is enabled
for (String tp : topicPartitionCountList) {
final String[] topicPartitionCount = tp.split(":");
if (topicPartitionCount.length == 2) {
final String topic = topicPartitionCount[0];
if (topicList.contains(topic)) {
topicPartitionCounts.put(topic, Integer.parseInt(topicPartitionCount[1]));
} else {
final String errorMsg = String.format("topic %s specified in Job Parameter '%s' does not match topics specified for Job Parameter '%s'",
topic, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
final String errorMsg = String.format("failed to parse topic partition count string %s", tp);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
}
// validate all topics have partition counts specified
final Set<String> partitionCountTopics = topicPartitionCounts.keySet();
if (!partitionCountTopics.containsAll(topicList) ||
!topicList.containsAll(partitionCountTopics)) {
final String errorMsg = String.format("topics '%s' specified for Job Parameter '%s' don't match topics '%s' specified for Job Parameter '%s'",
partitionCountTopics, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, topicList, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
LOGGER.info("enableStaticPartitionAssignment: {} [ topic partition counts: {} ]", staticPartitionAssignmentEnabled, topicPartitionCounts);
return topicPartitionCounts;
}
public List<String> getTopics() {
return topics;
}
public int getNumConsumerInstances() {
return numConsumerInstances;
}
public int getConsumerPollTimeoutMs() {
return consumerPollTimeoutMs;
}
public int getMaxBytesInProcessing() {
return maxBytesInProcessing;
}
public String getMessageParserType() {
return messageParserType;
}
public String getCheckpointStrategy() {
return checkpointStrategy;
}
public Boolean getParseMessageInSource() {
return parseMessageInSource;
}
public int getRetryCheckpointCheckDelayMs() {
return retryCheckpointCheckDelayMs;
}
public int getCheckpointIntervalMs() {
return checkpointIntervalMs;
}
public Boolean getStaticPartitionAssignmentEnabled() {
return staticPartitionAssignmentEnabled;
}
public Optional<Map<String, Integer>> getTopicPartitionCounts() {
return topicPartitionCounts;
}
public MantisKafkaConsumerConfig getConsumerConfig() {
return consumerConfig;
}
}
| 1,234 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfig.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.sink.KafkaSinkJobParameters;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility Class for handling Kafka ConsumerConfig defaults and Job parameter overrides
*/
public class MantisKafkaConsumerConfig extends ConsumerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumerConfig.class);
public MantisKafkaConsumerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaConsumerConfig(Context context) {
this(defaultProps(), context);
}
public static final String DEFAULT_AUTO_OFFSET_RESET = "latest";
public static final String DEFAULT_AUTO_COMMIT_ENABLED = "false";
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final int DEFAULT_AUTO_COMMIT_INTERVAL_MS = 5000;
public static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 3000;
public static final int DEFAULT_SESSION_TIMEOUT_MS = 10_000;
public static final int DEFAULT_FETCH_MIN_BYTES = 1024;
public static final int DEFAULT_FETCH_MAX_WAIT_MS = 100;
public static final int DEFAULT_REQUEST_TIMEOUT_MS = 40000;
public static final int DEFAULT_CHECKPOINT_INTERVAL_MS = 5_000;
public static final int DEFAULT_MAX_POLL_INTERVAL_MS = 300_000;
public static final int DEFAULT_MAX_POLL_RECORDS = 500;
public static final int DEFAULT_MAX_PARTITION_FETCH_BYTES = 10_000_000;
public static final int DEFAULT_RECEIVE_BUFFER_BYTES = 32768;
public static final int DEFAULT_SEND_BUFFER_BYTES = 131072;
public static final Class<StringDeserializer> DEFAULT_KEY_DESERIALIZER = StringDeserializer.class;
public static final Class<ByteArrayDeserializer> DEFAULT_VALUE_DESERIALIZER = ByteArrayDeserializer.class;
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, DEFAULT_AUTO_COMMIT_ENABLED);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_AUTO_COMMIT_INTERVAL_MS));
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, DEFAULT_AUTO_OFFSET_RESET);
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, String.valueOf(DEFAULT_FETCH_MAX_WAIT_MS));
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, String.valueOf(DEFAULT_FETCH_MIN_BYTES));
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_HEARTBEAT_INTERVAL_MS));
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_SESSION_TIMEOUT_MS));
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, DEFAULT_KEY_DESERIALIZER);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, DEFAULT_VALUE_DESERIALIZER);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(DEFAULT_MAX_PARTITION_FETCH_BYTES));
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, String.valueOf(DEFAULT_RECEIVE_BUFFER_BYTES));
props.put(ConsumerConfig.SEND_BUFFER_CONFIG, String.valueOf(DEFAULT_SEND_BUFFER_BYTES));
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_REQUEST_TIMEOUT_MS));
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_RECORDS));
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS));
props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RangeAssignor.class.getName());
return props;
}
/**
* Get kafka consumer group ID to use by default.
* @return default group ID to use for kafka consumer based on Mantis Job Id when running in cloud, else default to local consumer id
*/
@VisibleForTesting
static String getGroupId() {
String jobId = System.getenv("JOB_ID");
if (jobId != null && !jobId.isEmpty()) {
LOGGER.info("default consumer groupId to {} if not overridden by job param", "mantis-kafka-source-" + jobId);
return "mantis-kafka-source-" + jobId;
}
return "mantis-kafka-source-fallback-consumer-id";
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
if (!parsedValues.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
// set consumerGroupId if not already set
final String consumerGroupId = (String) parameters.get(KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
parsedValues.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
}
for (String key : configNames()) {
Object value = parameters.get(KafkaSourceParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
return parsedValues;
}
/**
* Helper class to get all Kafka Consumer configs as Job Parameters to allow overriding Kafka consumer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSourceParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSourceParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
public String getConsumerConfigStr() {
return values().toString();
}
public Map<String, Object> getConsumerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
}
| 1,235 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/metrics/ConsumerMetrics.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.metrics;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
public class ConsumerMetrics {
private static final String METRICS_PREFIX = "MantisKafkaConsumer_";
private static final String METRIC_KAFKA_IN_COUNT = "kafkaInCount";
private static final String METRIC_KAFKA_PROCESSED_COUNT = "kafkaProcessedCount";
private static final String METRIC_KAFKA_ERROR_COUNT = "kafkaErrorCount";
private static final String METRIC_KAFKA_WAIT_FOR_DATA_COUNT = "kafkaWaitForDataCount";
private static final String METRIC_KAFKA_COMMIT_COUNT = "kafkaCommitCount";
private static final String METRIC_CHECKPOINT_DELAY = "checkpointDelay";
private static final String METRIC_PARSE_FAILURE_COUNT = "parseFailureCount";
private static final String METRIC_KAFKA_MSG_VALUE_NULL_COUNT = "kafkaMessageValueNull";
private static final String METRIC_TIME_SINCE_LAST_POLL_MS = "timeSinceLastPollMs";
private static final String METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS = "timeSinceLastPollWithDataMs";
private static final String METRIC_KAFKA_PAUSE_PARTITIONS = "kafkaPausePartitions";
private static final String METRIC_KAFKA_RESUME_PARTITIONS = "kafkaResumePartitions";
private final Registry registry;
private final List<Tag> commonTags;
private final Counter kafkaInCount;
private final Counter kafkaProcessedCount;
private final Counter kafkaErrorCount;
private final Counter kafkaWaitForDataCount;
private final Counter kafkaCommitCount;
private final Counter parseFailureCount;
private final Counter kafkaPausePartitions;
private final Counter kafkaResumePartitions;
private final Counter kafkaMsgValueNullCount;
private final Gauge checkpointDelay;
private final Gauge timeSinceLastPollMs;
private final Gauge timeSinceLastPollWithDataMs;
private final ConcurrentMap<TopicPartition, Gauge> committedOffsets = new ConcurrentHashMap<>();
private final ConcurrentMap<TopicPartition, Gauge> readOffsets = new ConcurrentHashMap<>();
public ConsumerMetrics(final Registry registry, final int consumerId, final Context context) {
this.registry = registry;
this.commonTags = createCommonTags(context, consumerId);
this.kafkaErrorCount = registry.counter(createId(METRIC_KAFKA_ERROR_COUNT));
this.kafkaInCount = registry.counter(createId(METRIC_KAFKA_IN_COUNT));
this.kafkaProcessedCount = registry.counter(createId(METRIC_KAFKA_PROCESSED_COUNT));
this.kafkaWaitForDataCount = registry.counter(createId(METRIC_KAFKA_WAIT_FOR_DATA_COUNT));
this.kafkaCommitCount = registry.counter(createId(METRIC_KAFKA_COMMIT_COUNT));
this.checkpointDelay = registry.gauge(createId(METRIC_CHECKPOINT_DELAY));
this.timeSinceLastPollMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_MS));
this.timeSinceLastPollWithDataMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS));
this.parseFailureCount = registry.counter(createId(METRIC_PARSE_FAILURE_COUNT));
this.kafkaPausePartitions = registry.counter(createId(METRIC_KAFKA_PAUSE_PARTITIONS));
this.kafkaResumePartitions = registry.counter(createId(METRIC_KAFKA_RESUME_PARTITIONS));
this.kafkaMsgValueNullCount = registry.counter(createId(METRIC_KAFKA_MSG_VALUE_NULL_COUNT));
}
private List<Tag> createCommonTags(final Context context, final int consumerId) {
return Arrays.asList(Tag.of("mantisWorkerNum", Integer.toString(context.getWorkerInfo().getWorkerNumber())),
Tag.of("mantisWorkerIndex", Integer.toString(context.getWorkerInfo().getWorkerIndex())),
Tag.of("mantisJobName", context.getWorkerInfo().getJobClusterName()),
Tag.of("mantisJobId", context.getJobId()),
Tag.of("consumerId", String.valueOf(consumerId)));
}
private Id createId(final String metricName) {
return registry.createId(METRICS_PREFIX + metricName, commonTags);
}
public void recordCheckpointDelay(final long value) {
checkpointDelay.set(value);
}
public void recordTimeSinceLastPollMs(long value) {
timeSinceLastPollMs.set(value);
}
public void recordTimeSinceLastPollWithDataMs(long value) {
timeSinceLastPollWithDataMs.set(value);
}
public void incrementInCount() {
kafkaInCount.increment();
}
public void incrementProcessedCount() {
kafkaProcessedCount.increment();
}
public void incrementErrorCount() {
kafkaErrorCount.increment();
}
public void incrementWaitForDataCount() {
kafkaWaitForDataCount.increment();
}
public void incrementCommitCount() {
kafkaCommitCount.increment();
}
public void incrementParseFailureCount() {
parseFailureCount.increment();
}
public void incrementPausePartitionCount() {
kafkaPausePartitions.increment();
}
public void incrementResumePartitionCount() {
kafkaResumePartitions.increment();
}
public void incrementKafkaMessageValueNullCount() {
kafkaMsgValueNullCount.increment();
}
public void recordCommittedOffset(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : checkpoint.entrySet()) {
final TopicPartition tp = entry.getKey();
if (!committedOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("committedOffsets", tags);
committedOffsets.putIfAbsent(tp, gauge);
}
committedOffsets.get(tp).set(entry.getValue().offset());
}
}
public void recordReadOffset(final TopicPartition tp, final long offset) {
if (!readOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("minReadOffsets", tags);
readOffsets.putIfAbsent(tp, gauge);
}
readOffsets.get(tp).set(offset);
}
}
| 1,236 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTrigger.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
public interface CheckpointTrigger {
/**
* true indicates checkpoint now, else don't.
*
* @return
*/
boolean shouldCheckpoint();
/**
* update internal state based on provided count (typically current message size).
*
* @param count
*/
void update(int count);
/**
* hook to reset all internal state after a checkpoint is persisted.
*/
void reset();
/**
* true indicates the trigger is in active and valid state.
*/
boolean isActive();
/**
* cleanup resources.
*/
void shutdown();
}
| 1,237 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointingDisabledTrigger.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import java.util.concurrent.atomic.AtomicBoolean;
public class CheckpointingDisabledTrigger implements CheckpointTrigger {
private final AtomicBoolean isActive;
public CheckpointingDisabledTrigger() {
this.isActive = new AtomicBoolean(true);
}
@Override
public boolean shouldCheckpoint() {
return false;
}
@Override
public void update(final int count) {
// do nothing
}
@Override
public void reset() {
// do nothing
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
isActive.compareAndSet(true, false);
}
}
}
| 1,238 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTriggerFactory.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
public final class CheckpointTriggerFactory {
private CheckpointTriggerFactory() { }
/**
* Factory method to create instance of {@link CheckpointTrigger}
* @param kafkaSourceConfig mantis kafka source configuration
* @return {@link CheckpointTrigger} instance based on config
*/
public static CheckpointTrigger getNewInstance(final MantisKafkaSourceConfig kafkaSourceConfig) {
switch (kafkaSourceConfig.getCheckpointStrategy()) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
case CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING:
return new CountingCheckpointTrigger(kafkaSourceConfig.getMaxBytesInProcessing(), kafkaSourceConfig.getCheckpointIntervalMs());
case CheckpointStrategyOptions.NONE:
default:
return new CheckpointingDisabledTrigger();
}
}
}
| 1,239 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CountingCheckpointTrigger.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action1;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Time and count based CheckpointTrigger that triggers a checkpoint either if accumulated count exceeds threshold or time
* sine last checkpoint exceeds the configured checkpoint trigger interval.
*/
public class CountingCheckpointTrigger implements CheckpointTrigger {
private final int threshold;
private final AtomicInteger counter;
private final AtomicBoolean checkpoint = new AtomicBoolean(false);
private final AtomicBoolean isActive;
private final Subscription checkpointOffsetsTimer;
public CountingCheckpointTrigger(final int threshold, final int triggerIntervalMs) {
this.threshold = threshold;
this.counter = new AtomicInteger(0);
this.isActive = new AtomicBoolean(true);
checkpointOffsetsTimer = Observable.interval(triggerIntervalMs, TimeUnit.MILLISECONDS).subscribe(new Action1<Long>() {
@Override
public void call(Long aLong) {
checkpoint.set(true);
}
});
}
@Override
public boolean shouldCheckpoint() {
return (counter.get() > threshold) || checkpoint.get();
}
@Override
public void update(final int count) {
counter.addAndGet(count);
}
@Override
public void reset() {
counter.set(0);
checkpoint.set(false);
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
checkpointOffsetsTimer.unsubscribe();
reset();
isActive.compareAndSet(true, false);
}
}
}
| 1,240 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/FileBasedOffsetCheckpointStrategy.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import com.google.common.base.Strings;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
/**
* DO NOT USE IN PRODUCTION. This strategy is created only for unit test purposes and demonstrates using an alternative
* storage backend for committing topic partition offsets.
*/
public class FileBasedOffsetCheckpointStrategy implements CheckpointStrategy<OffsetAndMetadata> {
private static final Logger LOGGER = LoggerFactory.getLogger(FileBasedOffsetCheckpointStrategy.class);
private static final ObjectMapper MAPPER = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
MAPPER.registerModule(new Jdk8Module());
SimpleModule offsetAndMetadataModule = new SimpleModule();
offsetAndMetadataModule.addSerializer(OffsetAndMetadata.class, new OffsetAndMetadataSerializer());
offsetAndMetadataModule.addDeserializer(OffsetAndMetadata.class, new OffsetAndMetadataDeserializer());
MAPPER.registerModule(offsetAndMetadataModule);
}
public static final String DEFAULT_CHECKPOINT_DIR = "/tmp/FileBasedOffsetCheckpointStrategy";
public static final String CHECKPOINT_DIR_PROP = "checkpointDirectory";
private final AtomicReference<String> checkpointDir = new AtomicReference<>(null);
private String filePath(final TopicPartition tp) {
return checkpointDir.get() + "/" + tp.topic().concat("-").concat(String.valueOf(tp.partition()));
}
@Override
public void init(final Context context) {
String checkptDir = (String) context.getParameters().get(CHECKPOINT_DIR_PROP);
checkpointDir.compareAndSet(null, checkptDir);
createDirectoryIfDoesNotExist(checkpointDir.get());
}
@Override
public boolean persistCheckpoint(Map<TopicPartition, OffsetAndMetadata> checkpoint) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : checkpoint.entrySet()) {
final TopicPartition tp = entry.getKey();
final Path filePath = Paths.get(filePath(tp));
try {
if (Files.notExists(filePath)) {
LOGGER.info("file {} does not exist, creating one", filePath);
Files.createFile(filePath);
}
Files.write(filePath, Collections.singletonList(MAPPER.writeValueAsString(entry.getValue())));
} catch (IOException e) {
LOGGER.error("error writing checkpoint {} to file {}", entry.getValue(), filePath, e);
throw new RuntimeException(e);
}
}
return true;
}
@Override
public Optional<OffsetAndMetadata> loadCheckpoint(TopicPartition tp) {
try {
final List<String> lines = Files.readAllLines(Paths.get(filePath(tp)));
if (!lines.isEmpty()) {
final String checkpointString = lines.get(0);
LOGGER.info("read from file {}", checkpointString);
return Optional.ofNullable(MAPPER.readValue(checkpointString, OffsetAndMetadata.class));
}
} catch (IOException e) {
LOGGER.error("error loading checkpoint from file {}", filePath(tp), e);
}
return Optional.empty();
}
@Override
public void init(Map<String, String> properties) {
if (!properties.containsKey(CHECKPOINT_DIR_PROP) || Strings.isNullOrEmpty(properties.get(CHECKPOINT_DIR_PROP))) {
throw new IllegalArgumentException("missing required property " + CHECKPOINT_DIR_PROP);
}
String checkptDir = properties.get(CHECKPOINT_DIR_PROP);
checkpointDir.compareAndSet(null, checkptDir);
createDirectoryIfDoesNotExist(checkpointDir.get());
}
private void createDirectoryIfDoesNotExist(String dir) {
if (Files.notExists(Paths.get(dir))) {
LOGGER.info("file {} does not exist, creating one", dir);
try {
Files.createDirectory(Paths.get(dir));
} catch (IOException e) {
LOGGER.error("failed to create checkpoint directory {}", dir);
throw new RuntimeException(e);
}
}
}
@Override
public Map<TopicPartition, Optional<OffsetAndMetadata>> loadCheckpoints(
List<TopicPartition> tpList) {
Map<TopicPartition, Optional<OffsetAndMetadata>> tpChkMap = new HashMap<>();
for (TopicPartition tp : tpList) {
tpChkMap.put(tp, loadCheckpoint(tp));
}
return tpChkMap;
}
@Override
public String type() {
return CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING;
}
}
| 1,241 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/NoopCheckpointStrategy.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import org.apache.kafka.common.TopicPartition;
import io.mantisrx.runtime.Context;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class NoopCheckpointStrategy implements CheckpointStrategy<Void> {
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(Map<TopicPartition, Void> checkpoint) {
return true;
}
@Override
public Optional<Void> loadCheckpoint(TopicPartition tp) {
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<Void>> loadCheckpoints(
List<TopicPartition> tpList) {
return Collections.emptyMap();
}
@Override
public String type() {
return CheckpointStrategyOptions.NONE;
}
}
| 1,242 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyOptions.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
public final class CheckpointStrategyOptions {
/**
* Leverages Kafka for committing offsets.
*/
public static final String OFFSETS_ONLY_DEFAULT = "offsetsOnlyDefaultKafka";
/**
* Sample strategy for storing Offsets outside Kafka to a File based storage, this is only used for Unit testing.
*/
public static final String FILE_BASED_OFFSET_CHECKPOINTING = "fileBasedOffsetCheckpointing";
/**
* Default CheckpointStrategy to disable committing offsets, note this would disable atleast once semantics as
* offsets are no longer committed to resume from after a worker/process failure.
*/
public static final String NONE = "disableCheckpointing";
private CheckpointStrategyOptions() {
}
public static String values() {
return OFFSETS_ONLY_DEFAULT + ", " + FILE_BASED_OFFSET_CHECKPOINTING + ", " + NONE;
}
}
| 1,243 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/KafkaOffsetCheckpointStrategy.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import org.apache.kafka.clients.consumer.InvalidOffsetException;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* Leverages the default Kafka facilities to commit offsets to Kafka using {@link KafkaConsumer#commitSync(Map) commitSync(Map)}.
*/
public class KafkaOffsetCheckpointStrategy implements CheckpointStrategy<OffsetAndMetadata> {
private static Logger logger = LoggerFactory.getLogger(KafkaOffsetCheckpointStrategy.class);
private final KafkaConsumer<?, ?> consumer;
private final ConsumerMetrics consumerMetrics;
public KafkaOffsetCheckpointStrategy(KafkaConsumer<?, ?> consumer, ConsumerMetrics metrics) {
this.consumer = consumer;
this.consumerMetrics = metrics;
}
@Override
public void init(Map<String, String> properties) {
}
@Override
public boolean persistCheckpoint(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
if (!checkpoint.isEmpty()) {
try {
logger.debug("committing offsets {}", checkpoint.toString());
consumer.commitSync(checkpoint);
consumerMetrics.recordCommittedOffset(checkpoint);
} catch (InvalidOffsetException ioe) {
logger.warn("failed to commit offsets " + checkpoint.toString() + " will seek to beginning", ioe);
final Set<TopicPartition> topicPartitionSet = ioe.partitions();
for (TopicPartition tp : topicPartitionSet) {
logger.info("partition " + tp.toString() + " consumer position " + consumer.position(tp));
}
consumer.seekToBeginning(ioe.partitions());
} catch (KafkaException cfe) {
// should not be retried
logger.warn("unrecoverable exception on commit offsets " + checkpoint.toString(), cfe);
return false;
}
}
return true;
}
@Override
public Optional<OffsetAndMetadata> loadCheckpoint(TopicPartition tp) {
logger.trace("rely on default kafka protocol to seek to last committed offset");
return Optional.empty();
}
@Override
public void init(Context context) {
// no-op
}
@Override
public Map<TopicPartition, Optional<OffsetAndMetadata>> loadCheckpoints(List<TopicPartition> tpList) {
Map<TopicPartition, Optional<OffsetAndMetadata>> mp = new HashMap<>();
for (TopicPartition tp : tpList) {
mp.put(tp, loadCheckpoint(tp));
}
return mp;
}
@Override
public String type() {
return CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT;
}
}
| 1,244 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategyFactory.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class CheckpointStrategyFactory {
private CheckpointStrategyFactory() { }
private static final Logger LOGGER = LoggerFactory.getLogger(CheckpointStrategyFactory.class);
/**
* Factory method to create instance of {@link CheckpointStrategy}
* @param context Mantis runtime context
* @param consumer Kafka consumer
* @param strategy checkpoint strategy string
* @param metrics consumer metrics
* @return instance of {@link CheckpointStrategy}
*/
public static CheckpointStrategy<?> getNewInstance(final Context context,
final KafkaConsumer<?, ?> consumer,
final String strategy,
final ConsumerMetrics metrics) {
switch (strategy) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
final KafkaOffsetCheckpointStrategy cs = new KafkaOffsetCheckpointStrategy(consumer, metrics);
cs.init(context);
return cs;
case CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING:
final FileBasedOffsetCheckpointStrategy fcs = new FileBasedOffsetCheckpointStrategy();
LOGGER.info("initializing file checkpoint strategy");
fcs.init(context);
return fcs;
case CheckpointStrategyOptions.NONE:
default:
return new NoopCheckpointStrategy();
}
}
}
| 1,245 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/CheckpointStrategy.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import org.apache.kafka.common.TopicPartition;
import io.mantisrx.runtime.Context;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public interface CheckpointStrategy<S> {
/**
* initialization when creating the strategy.
*/
void init(Context context);
/**
* initialization when creating the strategy.
*/
void init(Map<String, String> initParams);
/**
* persist checkpoint state by TopicPartition.
*
* @param checkpoint
* @return true on persist success, false otherwise
*/
boolean persistCheckpoint(Map<TopicPartition, S> checkpoint);
/**
* return the persisted checkpoint state for topic-partition (if exists).
*
* @param tp topic-partition
*
* @return CheckpointState if persisted, else empty Optional
*/
Optional<S> loadCheckpoint(TopicPartition tp);
/**
* Bulk API to Load checkpoints.
*
* @param tpList list of TopicPartitions to load checkpointState
* @return
*/
Map<TopicPartition, Optional<S>> loadCheckpoints(List<TopicPartition> tpList);
/**
* Get checkpoint strategy type, one of {@link CheckpointStrategyOptions}
* @return {@link CheckpointStrategyOptions checkpointStrategy} implemented
*/
String type();
}
| 1,246 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorImpl.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Is invoked during initialization of the KafkaSource if Static partitioning ins enabled.
*/
public class StaticPartitionAssignorImpl implements StaticPartitionAssignor {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorImpl.class);
/**
* Does a simple round robin assignment of each TopicName-PartitionNumber combination to the list of consumers
* Returns only the assignments for the current consumer.
*
* @param consumerIndex Current workers consumerIndex
* @param topicPartitionCounts Map of topic -> no of partitions
* @param totalNumConsumers Total number of consumers
*
* @return
*/
@Override
public List<TopicPartition> assignPartitionsToConsumer(int consumerIndex, Map<String, Integer> topicPartitionCounts, int totalNumConsumers) {
Objects.requireNonNull(topicPartitionCounts, "TopicPartitionCount Map cannot be null");
if (consumerIndex < 0) {
throw new IllegalArgumentException("Consumer Index cannot be negative " + consumerIndex);
}
if (totalNumConsumers < 0) {
throw new IllegalArgumentException("Total Number of consumers cannot be negative " + totalNumConsumers);
}
if (consumerIndex >= totalNumConsumers) {
throw new IllegalArgumentException("Consumer Index " + consumerIndex + " cannot be greater than or equal to Total Number of consumers " + totalNumConsumers);
}
List<TopicPartition> topicPartitions = new ArrayList<>();
int currConsumer = 0;
for (Map.Entry<String, Integer> topicPartitionCount : topicPartitionCounts.entrySet()) {
final String topic = topicPartitionCount.getKey();
final Integer numPartitions = topicPartitionCount.getValue();
if (numPartitions <= 0) {
LOGGER.warn("Number of partitions is " + numPartitions + " for Topic " + topic + " skipping");
continue;
}
for (int i = 0; i < numPartitions; i++) {
if (currConsumer == totalNumConsumers) {
currConsumer = 0;
}
if (currConsumer == consumerIndex) {
topicPartitions.add(new TopicPartition(topic, i));
}
currConsumer++;
}
}
return topicPartitions;
}
}
| 1,247 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignor.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import org.apache.kafka.common.TopicPartition;
import java.util.List;
import java.util.Map;
public interface StaticPartitionAssignor {
List<TopicPartition> assignPartitionsToConsumer(int consumerIndex,
Map<String, Integer> topicPartitionCounts,
int totalNumConsumers);
}
| 1,248 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/MapDeserializerBase.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import org.apache.kafka.common.serialization.Deserializer;
import java.util.Map;
public abstract class MapDeserializerBase implements Parser, Deserializer<Map<String, Object>> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Map<String, Object> deserialize(String topic, byte[] data) {
if (data == null)
return null;
else if (canParse(data))
return parseMessage(data);
else throw new UnsupportedOperationException("Message cannot be deserialized with parser");
}
@Override
public void close() {
}
}
| 1,249 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/Parser.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
public interface Parser {
/**
* Determine if the payload byte array is parsable.
*
* @param message
*
* @return boolean indicate if payload is parsable
*/
boolean canParse(byte[] message);
/**
* parse a payload byte array into a map.
*
* @param message
*
* @return map
*
* @throws ParseException
*/
Map<String, Object> parseMessage(byte[] message) throws ParseException;
/**
* Returns partial readable payload information, if encoding is not supported fallback to Base64.
*
* @param payload
*
* @return string message
*
* @throws UnsupportedEncodingException
*/
default String getPartialPayLoadForLogging(byte[] payload) {
String msg = new String(payload, StandardCharsets.UTF_8);
return msg.length() <= 128 ? msg : msg.substring(0, 127);
}
}
| 1,250 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/OffsetAndMetadataDeserializer.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import java.io.IOException;
public class OffsetAndMetadataDeserializer extends JsonDeserializer<OffsetAndMetadata> {
@Override
public OffsetAndMetadata deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
final JsonNode node = p.getCodec().readTree(p);
final long offset = node.get("offset").longValue();
final String metadata = node.get("metadata").textValue();
return new OffsetAndMetadata(offset, metadata);
}
}
| 1,251 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParseException.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
public class ParseException extends RuntimeException {
/**
* genreated id
*/
private static final long serialVersionUID = 7066656417880807188L;
public ParseException(String message) {
super(message);
}
public ParseException(Throwable cause) {
super(cause);
}
public ParseException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,252 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/SimpleJsonDeserializer.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
public class SimpleJsonDeserializer extends MapDeserializerBase {
private final static Logger LOGGER = LoggerFactory.getLogger(SimpleJsonDeserializer.class);
private final ObjectMapper jsonMapper = new ObjectMapper();
private final com.fasterxml.jackson.core.type.TypeReference<Map<String, Object>> typeRef =
new com.fasterxml.jackson.core.type.TypeReference<Map<String, Object>>() {};
@Override
public boolean canParse(byte[] message) {
// no easy way of pre-determine if the json is valid without actually parsing it (unlike chaski format message).
// so we'll always assume the message can be parsed and move onto deserialization phase
return true;
}
@Override
public Map<String, Object> parseMessage(byte[] message) throws ParseException {
Map<String, Object> result;
try {
result = jsonMapper.readValue(message, typeRef);
} catch (Exception ex) {
LOGGER.error("Json parser failed to parse message! PAYLOAD:" + getPartialPayLoadForLogging(message), ex);
throw new ParseException("Json not able to parse raw message", ex);
}
return result;
}
}
| 1,253 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/ParserType.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
/**
* Parser types supported for Kafka message payloads.
*/
public enum ParserType {
SIMPLE_JSON("simplejson", new SimpleJsonDeserializer());
private String propName;
private Parser parser;
ParserType(String propName, Parser parserInstance) {
this.propName = propName;
this.parser = parserInstance;
}
public String getPropName() {
return propName;
}
public Parser getParser() {
return parser;
}
public boolean equalsName(String otherName) {
return (otherName != null) && propName.equals(otherName);
}
@Override
public String toString() {
return this.propName;
}
public static ParserType parser(String parserType) {
if ("simplejson".equals(parserType)) {
return SIMPLE_JSON;
} else {
throw new IllegalArgumentException("Invalid parser type");
}
}
}
| 1,254 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/serde/OffsetAndMetadataSerializer.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.serde;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import java.io.IOException;
public class OffsetAndMetadataSerializer extends JsonSerializer<OffsetAndMetadata> {
@Override
public void serialize(OffsetAndMetadata oam, JsonGenerator gen, SerializerProvider serializers) throws IOException {
gen.writeStartObject();
gen.writeNumberField("offset", oam.offset());
gen.writeStringField("metadata", oam.metadata());
gen.writeEndObject();
}
}
| 1,255 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSinkJobParameters.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
public class KafkaSinkJobParameters {
public static final String PREFIX = "kafka.sink.producer.";
public static final String TOPIC = PREFIX + "topic";
}
| 1,256 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/KafkaSink.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.spectator.api.Registry;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
public class KafkaSink<T> implements SelfDocumentingSink<T> {
private static final Logger logger = LoggerFactory.getLogger(KafkaSink.class);
private final Func1<T, byte[]> encoder;
private final Registry registry;
private final AtomicReference<KafkaProducer<byte[], byte[]>> kafkaProducerAtomicRef = new AtomicReference<>(null);
KafkaSink(Registry registry, Func1<T, byte[]> encoder) {
this.encoder = encoder;
this.registry = registry;
}
@Override
public void call(Context context, PortRequest ignore, Observable<T> dataO) {
if (kafkaProducerAtomicRef.get() == null) {
MantisKafkaProducerConfig mantisKafkaProducerConfig = new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<>(producerProperties);
kafkaProducerAtomicRef.compareAndSet(null, kafkaProducer);
logger.info("Kafka Producer initialized");
}
KafkaProducer<byte[], byte[]> kafkaProducer = kafkaProducerAtomicRef.get();
Parameters parameters = context.getParameters();
String topic = (String)parameters.get(KafkaSinkJobParameters.TOPIC);
dataO.map(encoder::call)
.flatMap((dataBytes) ->
Observable.from(kafkaProducer.send(new ProducerRecord<>(topic, dataBytes)))
.subscribeOn(Schedulers.io()))
.subscribe();
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSinkJobParameters.TOPIC)
.description("Kafka topic to write to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
params.addAll(MantisKafkaProducerConfig.getJobParameterDefinitions());
return params;
}
@Override
public Metadata metadata() {
StringBuilder description = new StringBuilder();
description.append("Writes the output of the job into the configured Kafka topic");
return new Metadata.Builder()
.name("Mantis Kafka Sink")
.description(description.toString())
.build();
}
}
| 1,257 |
0 |
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka
|
Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfig.java
|
/*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.producer.ProducerConfig;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaProducerConfig extends ProducerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaProducerConfig.class);
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final String DEFAULT_ACKS_CONFIG = "all";
public static final int DEFAULT_RETRIES_CONFIG = 1;
public MantisKafkaProducerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaProducerConfig(Context context) {
this(defaultProps(), context);
}
@Override
protected Map<String, Object> postProcessParsedConfig(Map<String, Object> parsedValues) {
return super.postProcessParsedConfig(parsedValues);
}
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, DEFAULT_ACKS_CONFIG);
props.put(ProducerConfig.RETRIES_CONFIG, DEFAULT_RETRIES_CONFIG);
return props;
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
Object value = parameters.get(KafkaSinkJobParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
final String bootstrapBrokers = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, defaultProps.get(BOOTSTRAP_SERVERS_CONFIG));
parsedValues.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
final String clientId = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.CLIENT_ID_CONFIG, context.getJobId());
parsedValues.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
return parsedValues;
}
public Map<String, Object> getProducerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
/**
* Helper class to get all Kafka Producer configs as Job Parameters to allow overriding Kafka producer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSinkJobParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSinkJobParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
}
| 1,258 |
0 |
Create_ds/porting-advisor-for-graviton/sample-projects
|
Create_ds/porting-advisor-for-graviton/sample-projects/java-samples/main.java
|
class Main {
public static void main(String[] args) {
System.out.println("Hello World");
}
}
| 1,259 |
0 |
Create_ds/porting-advisor-for-graviton/sample-projects
|
Create_ds/porting-advisor-for-graviton/sample-projects/java-samples/submain.java
|
class Submain {
public static void main(String[] args) {
System.out.println("Hello World 2");
}
}
| 1,260 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/test/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/test/java/com/amazonaws/labs/GravitonReadyAssessor/ConfigFileTest.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.junit.Test;
import static org.junit.Assert.*;
import org.osgi.framework.Version;
import org.osgi.framework.VersionRange;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Date;
public class ConfigFileTest {
@Test
public void shouldLoadConfigFile() {
try {
URL url = new URL("file:src/test/files/config.json");
Config c = Config.fromURL(url);
System.out.println(c);
} catch (IOException e) {
fail(e.toString());
}
}
@Test
public void shouldPrintJSON() {
try {
Config c = generateConfig();
System.out.println(c.toJson());
} catch(JsonProcessingException e) {
fail(e.toString());
}
}
@Test
public void shouldSerializeDeserialize() {
try {
Config c1 = generateConfig();
String json = c1.toJson();
Config c2 = Config.fromJson(json);
assertEquals(c1, c2);
} catch(JsonProcessingException e) {
fail(e.toString());
}
}
@Test
public void versionInRange() {
Config config = generateConfig();
assert(config.getClassInfos().size() == 1);
ClassInfo info = config.getClassInfos().get(0);
// TODO
return;
}
private Config generateConfig() {
try {
ClassInfo i = ClassInfo.builder()
.implementationTitle("ImplementationTitle")
.implementationVendor("ImplementationVendor")
.implementationVersionRange(
new VersionRange(
VersionRange.LEFT_CLOSED, new Version(1, 0, 0),
new Version(2, 0, 0), VersionRange.RIGHT_OPEN)
)
.specificationTitle("SpecificationTitle")
.specificationVendor("SpecificationVendor")
.specificationVersionRange(
new VersionRange(
VersionRange.LEFT_CLOSED, new Version(1, 0, 0),
new Version(2, 0, 0), VersionRange.RIGHT_OPEN)
)
.description("Description goes here")
.status("OK")
.url(new URL("http://example.com"))
.lastUpdated(new Date())
.build();
return Config.builder()
.classInfo(i)
.build();
} catch (MalformedURLException e) {
fail(e.toString());
return null;
}
}
}
| 1,261 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/test/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/test/java/com/amazonaws/labs/GravitonReadyAssessor/AppTest.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Unit test for simple App.
*/
public class AppTest
{
/**
* Rigorous Test :-)
*/
@Test
public void shouldAnswerWithTrue()
{
assertTrue( true );
}
}
| 1,262 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/NativeCodeManifest.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import lombok.Getter;
import lombok.NonNull;
import java.io.IOException;
import java.util.Arrays;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import java.util.List;
import java.util.stream.Collectors;
/**
* <p>A native code bundle JAR manifest entry.</p>
*
* <p>JAR files have
* <a href="https://docs.oracle.com/javase/tutorial/deployment/jar/manifestindex.html">manifests</a>
* in them which contain various metadata in them. These metadata are known as
* <a href="https://docs.oracle.com/javase/tutorial/deployment/jar/secman.html">attributes</a>. Some
* JAR files have a <code>Bundle-NativeCode</code> attribute in them that indicates where native code
* can be found. The format of this attribute's value is defined by the OSGI Framework and is
* documented <a href="http://docs.osgi.org/specification/osgi.core/7.0.0/framework.module.html#framework.module-loading.native.code.libraries">here</a>.</p>
*/
public class NativeCodeManifest {
final static String BundleNativeCode = "Bundle-NativeCode";
@Getter
private List<NativeCodeManifestRecord> records;
/**
* Constructs a NativeCodeManifest from a JarFile object.
* @param jarFile the JarFile
* @return the NativeCodeManifest
* @throws IOException
*/
public static NativeCodeManifest fromJarFile(@NonNull JarFile jarFile) throws IOException {
Manifest manifest = jarFile.getManifest();
Attributes attrs = manifest.getMainAttributes();
String bundleNativeCode = attrs.getValue(BundleNativeCode);
if (bundleNativeCode == null) return null;
return fromString(bundleNativeCode);
}
/**
* Constructs a NativeCodeManifest from a JarFile object.
* @param attributeValue the value of the Bundle-NativeCode Manifest attribute
* @return the NativeCodeManifest
*/
private static NativeCodeManifest fromString(@NonNull String attributeValue) {
NativeCodeManifest manifest = new NativeCodeManifest();
// Records are separated by `,`
manifest.records = Arrays.stream(attributeValue.split(","))
.map(String::trim)
.map(NativeCodeManifestRecord::fromString)
.collect(Collectors.toUnmodifiableList());
return manifest;
}
}
| 1,263 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/JarNativeInfo.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.zip.ZipException;
import static java.nio.file.StandardOpenOption.APPEND;
import lombok.*;
/**
* JarNativeInfo contains information about native libraries inside a JAR file.
*/
public class JarNativeInfo {
private final static String[] IGNORED_PREFIXES = {
"jdk.internal"
};
/**
* The actual on-disk path to the JAR file, which may be a temporary file if the JAR
* was embedded in another JAR.
*/
@Getter
@NonNull
private Path realJarPath;
/**
* Only used for embedded JARs. Points to the path inside the JAR.
*/
@Getter
private Path nominalJarPath;
/**
* Shared libraries
*/
@Getter
private final List<String> sharedLibs = new ArrayList<>();
/**
* Native methods
*/
@Getter
private final List<Method> nativeMethods = new ArrayList<>();
/**
* Native information associated with embedded JARs
*/
@Getter
private final List<JarNativeInfo> children = new ArrayList<>();
final Logger log = SimpleLogger.getLogger();
static ConcurrentHashMap<Path, ClassLoader> cache = new ConcurrentHashMap<>();
/**
* Builds a JarNativeInfo object
* @param jarPath the path to the JAR file
* @throws IOException
*/
public JarNativeInfo(@NonNull Path jarPath) throws IOException {
this(jarPath, null);
}
/**
* Builds a JarNativeInfo object
* @param realJarPath the path to the JAR file on disk
* @param nominalPath for embedded JARs, the path in the enclosing JAR file where this JAR is located
* @throws IOException
*/
public JarNativeInfo(@NonNull Path realJarPath, Path nominalPath) throws IOException {
this.realJarPath = realJarPath;
this.nominalJarPath = nominalPath;
if (nominalPath == null) {
log.info("🛃 Checking JAR " + realJarPath);
} else {
log.info("🛃 Checking embedded JAR " + nominalPath.toString());
}
try {
@Cleanup JarFile jarFile = new JarFile(realJarPath.toFile());
final Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
final JarEntry entry = entries.nextElement();
final String entryName = entry.getName();
if (entry.isDirectory()) continue;
if (entryName.endsWith(".jar")) {
// Embedded JAR file
// Extract the JAR file to a temporary location
@Cleanup InputStream is = jarFile.getInputStream(entry);
Path tmpJarPath = Files.createTempFile(null, null);
tmpJarPath.toFile().deleteOnExit();
@Cleanup OutputStream os = Files.newOutputStream(tmpJarPath, APPEND);
is.transferTo(os);
// Process the embedded JAR recursively
JarNativeInfo nativeInfo = new JarNativeInfo(tmpJarPath, Path.of(entryName));
children.add(nativeInfo);
} else if (entryName.endsWith(".class")) {
String className = entryName
.substring(0, entry.getName().length() - ".class".length())
.replace('/', '.');
// Skip JDK internal classes
if (Arrays.stream(IGNORED_PREFIXES).anyMatch(className::startsWith))
continue;
// Load the class and find its native methods
Class<?> c = loadClass(className, realJarPath);
if (c != null) {
try {
nativeMethods.addAll(findNativeMethods(c));
} catch (NoClassDefFoundError ignored) {
}
}
}
}
// No need to proceed if there aren't any native methods.
if (nativeMethods.isEmpty()) return;
JarChecker scanner;
// First try to find the shared libraries by scanning the JAR manifest
scanner = new JarManifestScanner(jarFile);
sharedLibs.addAll(scanner.getSharedLibraryPaths());
// Then try to find shared libraries by examining the JAR table of contents
scanner = new JarFileScanner(jarFile);
sharedLibs.addAll(scanner.getSharedLibraryPaths());
} catch (ZipException e) {
// Treat empty JAR files as though they have no methods at all.
if (e.getMessage().equals("zip file is empty")) {
return;
}
throw e;
}
}
public boolean hasNativeMethods() {
return !nativeMethods.isEmpty();
}
private List<Method> findNativeMethods(@NonNull Class<?> c) {
log.fine("🧐 Getting native methods for class " + c.getName());
return Stream.of(c.getDeclaredMethods())
.peek(m -> log.finer("Checking method " + m.getName()))
.filter(m -> Modifier.isNative(m.getModifiers()))
.collect(Collectors.toList());
}
private Class<?> loadClass(@NonNull String name, @NonNull Path jarPath) {
ClassLoader cl;
Class<?> cls = null;
try {
cl = cache.computeIfAbsent(jarPath, k -> {
try {
URL[] urls = {new URL("jar:file:" + k + "!/")};
return new URLClassLoader(urls);
} catch (MalformedURLException e) {
e.printStackTrace();
return null;
}
});
assert cl != null;
cls = cl.loadClass(name);
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (NoClassDefFoundError|IllegalAccessError ignored) {
}
return cls;
}
}
| 1,264 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/Command.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.*;
import java.lang.reflect.Method;
import java.util.logging.*;
import java.util.stream.Collectors;
import java.util.concurrent.Callable;
import lombok.Getter;
import lombok.NonNull;
import picocli.CommandLine;
import picocli.CommandLine.Option;
import picocli.CommandLine.Parameters;
@CommandLine.Command(name = "Arm64LinuxJarChecker",
description = "Checks JAR/WAR files for compatibility with Arm64 CPU architecture on Linux",
mixinStandardHelpOptions = true,
exitCodeListHeading = "Exit Codes:%n",
exitCodeList = {
"0: Successful execution, no problems found",
"3: Found native classes but no Arm64/Linux shared libraries in JARs"
})
final public class Command implements Callable<Integer> {
@Parameters(description = "Files or directories in which JARs are located (default: current working directory)")
private final List<String> searchPaths = new ArrayList<>();
@Option(names = {"-v", "--verbose"}, description = "Run verbosely")
private boolean verbose;
@Override
public Integer call() throws IOException {
int exitCode = 0;
Logger log = SimpleLogger.getLogger();
if (verbose) {
SimpleLogger.setLevel(Level.ALL);
}
final class JarSearcher extends SimpleFileVisitor<Path> {
private final PathMatcher jarFileMatcher = FileSystems.getDefault().getPathMatcher("regex:.*\\.(jar|war)$");
@Getter
private final List<Path> nativeJarFiles = new ArrayList<>();
private final Map<Path, List<String>> nativeLibraryFiles = new HashMap<>();
private final Map<Path, List<Method>> nativeMethods = new HashMap<>();
private void processNativeInfo(@NonNull JarNativeInfo info) {
if (info.hasNativeMethods()) {
nativeJarFiles.add(info.getNominalJarPath());
nativeLibraryFiles.put(info.getNominalJarPath(), info.getSharedLibs());
nativeMethods.put(info.getNominalJarPath(), info.getNativeMethods());
}
for (JarNativeInfo childInfo : info.getChildren()) {
processNativeInfo(childInfo);
}
}
@Override
public FileVisitResult visitFile(@NonNull Path path, @NonNull BasicFileAttributes attrs) throws IOException {
if (!jarFileMatcher.matches(path))
return FileVisitResult.CONTINUE;
processNativeInfo(new JarNativeInfo(path));
return FileVisitResult.CONTINUE;
}
public List<String> getNativeLibraries(Path path) {
return nativeLibraryFiles.get(path);
}
public List<Method> getNativeMethods(Path path) {
return nativeMethods.get(path);
}
public boolean hasNativeJars() {
return !nativeJarFiles.isEmpty();
}
public boolean hasNativeLibraries(Path path) {
return !nativeLibraryFiles.get(path).isEmpty();
}
}
log.info("🟢 Starting search for native classes in JAR files");
if (searchPaths.isEmpty()) {
searchPaths.add("");
}
// Search JARs and classes
JarSearcher finder = new JarSearcher();
for (String searchPath : searchPaths) {
Files.walkFileTree(
Paths.get(searchPath), // start with current working directory
finder);
}
final List<Path> nativeJars = finder.getNativeJarFiles();
if (!finder.hasNativeJars()) {
log.info("🎉 No native methods found in scanned JAR files. These should work on any supported CPU architecture.");
System.exit(0);
}
for (Path jarPath : nativeJars) {
log.info("ℹ️ JAR with native methods: " + jarPath);
log.fine("ℹ️ Methods: " + finder.getNativeMethods(jarPath)
.stream()
.map(m -> String.format("%s::%s", m.getDeclaringClass().getName(), m.getName()))
.distinct()
.collect(Collectors.joining(", ")));
if (finder.hasNativeLibraries(jarPath)) {
log.info("✅ Native libraries: " +
finder.getNativeLibraries(jarPath)
.stream()
.distinct()
.collect(Collectors.joining(", ")));
} else {
log.info("🚨 JAR " + jarPath + " has native methods but no libraries found for aarch64/Linux");
log.info("Native methods: " + finder.getNativeMethods(jarPath).stream().distinct().map(Method::toString).collect(Collectors.joining(", ")));
exitCode = 3;
}
}
if (exitCode == 0) {
log.info(String.format("%n🎉 JAR files scanned and native libraries appear to be all there. You're good to go!"));
} else {
log.info(String.format("%n🚨 Found JAR files with native methods but no Linux/arm64 support."));
}
return exitCode;
}
public static void main(String... args) {
int exitCode = new CommandLine(new Command()).execute(args);
System.exit(exitCode);
}
}
| 1,265 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/JarCheckerInterface.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import java.io.IOException;
import java.util.List;
public interface JarCheckerInterface {
List<String> getSharedLibraryPaths() throws IOException;
}
| 1,266 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/JarChecker.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
public abstract class JarChecker implements JarCheckerInterface {
}
| 1,267 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/JarFileScanner.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.logging.Logger;
/**
* A JAR file scanner that locates native code libraries via simple
* path-matching. If the file ends in .so and has the strings "aarch64"
* and "linux", it is considered a match.
*/
@RequiredArgsConstructor
public class JarFileScanner extends JarChecker {
Logger logger = SimpleLogger.getLogger();
@NonNull
private JarFile jarFile;
/**
* Return a list of path names corresponding to shared library files
* in the JAR file.
*
* @return list of shared library pathnames
* @throws IOException
*/
public List<String> getSharedLibraryPaths() throws IOException {
final List<String> sharedLibraryPaths = new ArrayList<>();
final Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
final JarEntry entry = entries.nextElement();
final String entryName = entry.getName();
if (!entry.isDirectory() &&
entryName.endsWith(".so") &&
entryName.toLowerCase().contains("aarch64") &&
entryName.toLowerCase().contains("linux"))
sharedLibraryPaths.add(entryName);
}
return sharedLibraryPaths;
}
}
| 1,268 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/ClassInfo.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import java.net.URL;
import java.util.Date;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
import lombok.Builder;
import lombok.Data;
import lombok.extern.jackson.Jacksonized;
import org.osgi.framework.VersionRange;
@Data
@Builder
@Jacksonized
public class ClassInfo {
private String implementationTitle;
private String implementationVendor;
@JsonSerialize(using = ToStringSerializer.class)
private VersionRange implementationVersionRange;
private String specificationTitle;
private String specificationVendor;
@JsonSerialize(using = ToStringSerializer.class)
private VersionRange specificationVersionRange;
private String status;
private String description;
private URL url;
@JsonFormat(shape = JsonFormat.Shape.STRING)
private Date lastUpdated;
}
| 1,269 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/Config.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Builder;
import lombok.Data;
import lombok.Singular;
import lombok.extern.jackson.Jacksonized;
import java.io.IOException;
import java.net.URL;
import java.util.List;
@Data
@Builder
@Jacksonized
public class Config {
@JsonProperty("classes")
@Singular public List<ClassInfo> classInfos;
public static Config fromURL(URL url) throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readerFor(Config.class).readValue(url);
}
public static Config fromJson(String s) throws JsonProcessingException, JsonMappingException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readerFor(Config.class).readValue(s);
}
public String toJson() throws JsonProcessingException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(this);
}
}
| 1,270 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/SimpleLogger.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import lombok.NonNull;
import java.io.OutputStream;
import java.util.Properties;
import java.util.logging.*;
/**
* A simple unbuffered logger that simply prints each log line as-is to standard output (System.out).
*/
public class SimpleLogger {
private static Logger logger;
private static Handler handler;
static {
Properties logProps = System.getProperties();
logProps.setProperty("java.util.logging.SimpleFormatter.format", "%5$s%n");
System.setProperties(logProps);
}
/**
* Obtain the singleton Logger instance.
*
* @return The logger instance
* @throws SecurityException
*/
public static Logger getLogger() throws SecurityException {
if (logger != null) {
return logger;
}
logger = Logger.getLogger(SimpleLogger.class.toString());
logger.setUseParentHandlers(false);
handler = getAutoFlushingStreamHandler(System.out, new SimpleFormatter());
logger.addHandler(handler);
return logger;
}
/**
* Sets the lowest log level that this logger will emit. Logs with a level lower than
* this will be omitted from the output.
*
* @param level The log level
*/
public static void setLevel(@NonNull Level level) {
if (logger == null) getLogger();
handler.setLevel(level);
logger.setLevel(level);
}
/**
* Returns a StreamHandler that flushes after every publish() invocation.
* @param o the OutputStream passed to the StreamHandler constructor
* @param f the Formatter passed to the StreamHandler constructor
* @return
*/
private static StreamHandler getAutoFlushingStreamHandler(@NonNull OutputStream o, @NonNull Formatter f) {
return new StreamHandler(o, f) {
@Override
public synchronized void publish(@NonNull final LogRecord record) {
super.publish(record);
flush();
}
};
}
}
| 1,271 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/JarManifestScanner.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import java.io.IOException;
import java.util.List;
import java.util.jar.JarFile;
import java.util.stream.Collectors;
/**
* A JAR file scanner that locates native code libraries by looking at
* the JAR's manifest. It uses the OSGI <code>Bundle-NativeCode</code>
* attribute for this purpose.
*/
@RequiredArgsConstructor
public class JarManifestScanner extends JarChecker {
@NonNull
private JarFile jarFile;
/**
* Return a list of path names corresponding to shared library files
* in the JAR file.
*
* @return list of shared library pathnames
* @throws IOException
*/
public List<String> getSharedLibraryPaths() throws IOException {
NativeCodeManifest manifest = NativeCodeManifest.fromJarFile(this.jarFile);
// No native code manifest found
if (manifest == null) return List.of();
return manifest.getRecords().stream()
.filter(NativeCodeManifestRecord::isAarch64)
.filter(NativeCodeManifestRecord::isLinux)
.map(NativeCodeManifestRecord::getLibpath)
.collect(Collectors.toList());
}
}
| 1,272 |
0 |
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs
|
Create_ds/porting-advisor-for-graviton/src/advisor/tools/graviton-ready-java/src/main/java/com/amazonaws/labs/GravitonReadyAssessor/NativeCodeManifestRecord.java
|
package com.amazonaws.labs.GravitonReadyAssessor;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* A record in a Bundle-NativeCode JAR manifest attribute.
*/
public class NativeCodeManifestRecord {
@Getter
@Setter
private String libpath;
private final List<String> osnames = new ArrayList<>();
private final List<String> arches = new ArrayList<>();
/**
* Creates a NativeCodeManifestRecord from its string representation.
* @param text The raw text
* @return a NativeCodeManifestRecord
*/
public static NativeCodeManifestRecord fromString(@NonNull String text) {
NativeCodeManifestRecord entry = new NativeCodeManifestRecord();
List<String> kvPairs = List.of(text.split(";"));
entry.setLibpath(kvPairs.get(0));
// Record any processor architectures or OS names found within
kvPairs.stream().skip(1).forEach(pair -> {
String key = pair.split("=")[0];
String val = pair.split("=")[1];
if (key.equals("osname")) {
entry.addOSName(val);
}
if (key.equals("processor")) {
entry.addArch(val);
}
});
return entry;
}
public void addOSName(String osName) {
osnames.add(osName);
}
public void addArch(String arch) {
arches.add(arch);
}
public boolean isLinux() {
return osnames.stream().anyMatch(name -> name.equalsIgnoreCase("linux"));
}
public boolean isAarch64() {
return arches.stream().anyMatch(name -> name.equalsIgnoreCase("aarch64"));
}
@Override
public String toString() {
return "libpath: " + libpath + "; arches=" + this.arches + "; osnames=" + this.osnames;
}
}
| 1,273 |
0 |
Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink/kafka/TestKafkaSink.java
|
package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.*;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.thrift.TMessageSet;
import kafka.admin.TopicCommand;
import kafka.api.FetchRequestBuilder;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndMetadata;
import kafka.message.MessageAndOffset;
import kafka.server.KafkaConfig;
import kafka.utils.ZkUtils;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestRule;
import rx.functions.Action3;
import scala.Option;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.*;
public class TestKafkaSink {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
public static ZkExternalResource zk = new ZkExternalResource();
public static KafkaServerExternalResource kafkaServer = new KafkaServerExternalResource(zk);
@ClassRule
public static TestRule chain = RuleChain
.outerRule(zk)
.around(kafkaServer);
private static final String TOPIC_NAME = "routingKey";
private static final String TOPIC_NAME_MULTITHREAD = "routingKeyMultithread";
private static final String TOPIC_NAME_PARTITION_BY_KEY = "routingKey_partitionByKey";
private static final String TOPIC_NAME_BACKWARD_COMPAT = "routingKey_backwardCompat";
private static ObjectMapper jsonMapper = new DefaultObjectMapper();
@BeforeClass
public static void startup() {
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafka"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
}
@Test
public void testDefaultParameters() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
while (msgIterator.hasNext()) {
sink.writeTo(new StringMessage(msgIterator.next()));
}
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
assertEquals(sink.getNumOfPendingMessages(), 0);
System.out.println(sink.getStat());
// get the leader
Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
int leader = (Integer) leaderOpt.get();
KafkaConfig config;
if (leader == kafkaServer.getServer(0).config().brokerId()) {
config = kafkaServer.getServer(0).config();
} else {
config = kafkaServer.getServer(1).config();
}
SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());
List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
assertEquals("Should have fetched 2 messages", 2, messageSet.size());
assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
@Test
public void testMultithread() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>() {
});
sink.open();
int msgCount = 10000;
sendMessages(TOPIC_NAME_MULTITHREAD, sink, msgCount);
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
System.out.println(sink.getStat());
assertEquals(sink.getNumOfPendingMessages(), 0);
checkConsumer(TOPIC_NAME_MULTITHREAD, msgCount - (int) sink.droppedRecords.get());
}
@Test
public void testPartitionByKey() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_PARTITION_BY_KEY,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_PARTITION_BY_KEY);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
keyTopicMap + "\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i % numPartitions))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_PARTITION_BY_KEY, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
sink.close();
System.out.println(sink.getStat());
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_PARTITION_BY_KEY, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_PARTITION_BY_KEY).get(0);
Map<Integer, Set<Map<String, Object>>> resultSet = new HashMap<Integer, Set<Map<String, Object>>>();
for (int i = 0; i < messageCount; ++i) {
MessageAndMetadata<byte[], byte[]> msgAndMeta = stream.iterator().next();
System.out.println(new String(msgAndMeta.message()));
Map<String, Object> msg = jsonMapper.readValue(new String(msgAndMeta.message()), new TypeReference<Map<String, Object>>() {});
Set<Map<String, Object>> s = resultSet.get(msgAndMeta.partition());
if (s == null) {
s = new HashSet<Map<String, Object>>();
resultSet.put(msgAndMeta.partition(), s);
}
s.add(msg);
}
int sizeSum = 0;
for (Map.Entry<Integer, Set<Map<String, Object>>> e : resultSet.entrySet()) {
sizeSum += e.getValue().size();
String key = (String) e.getValue().iterator().next().get("key");
for (Map<String, Object> ss : e.getValue()) {
assertEquals(key, (String) ss.get("key"));
}
}
assertEquals(sizeSum, messageCount);
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testCheckPause() throws IOException, InterruptedException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "check_pause",
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
" \"buffer.memory\": 1000,\n" +
" \"batch.size\": 1000\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final AtomicBoolean exceptionCaught = new AtomicBoolean(false);
final AtomicBoolean checkPaused = new AtomicBoolean(false);
final AtomicBoolean pending = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (dropped > 0) {
exceptionCaught.set(true);
if (sink.checkPause() > 0) {
checkPaused.set(true);
}
if (sink.getNumOfPendingMessages() > 0) {
pending.set(true);
}
latch.countDown();
}
}
});
for (int i = 0; i < 100; ++i) {
sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "check_pause", getBigData()), jsonMapper));
}
assertTrue(latch.await(10, TimeUnit.SECONDS));
assertTrue(exceptionCaught.get());
assertTrue(checkPaused.get());
assertTrue(pending.get());
}
@Test
public void testBlockingOnBufferFull() throws Throwable {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "buffer_full",
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
" \"block.on.buffer.full\": true,\n" +
" \"buffer.memory\": 1000,\n" +
" \"batch.size\": 1000\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch shutdownLatch = new CountDownLatch(1);
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 100; ++i) {
try {
sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "buffer_full", getBigData()), jsonMapper));
} catch (Exception e) {
fail("exception thrown: " + e.toString());
}
if (i == 50) {
try{
kafkaServer.shutdown(); // to simulate kafka latency
}finally {
shutdownLatch.countDown();
}
}
}
latch.countDown();
}
}).start();
latch.await(3, TimeUnit.SECONDS);
assertEquals(latch.getCount(), 1); // blocked
// Make sure the kafka server is restarted only if shutdown is successful.
shutdownLatch.await();
kafkaServer.before();
}
@Test
public void testConfigBackwardCompatible() throws IOException {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_BACKWARD_COMPAT);
String description1 = "{\n" +
" \"type\": \"Kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"ack\": 1,\n" +
" \"compression.type\": \"snappy\",\n" +
keyTopicMap + "\n" +
"}";
String description2 = "{\n" +
" \"type\": \"Kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
" \"compression.codec\": \"snappy\",\n" +
keyTopicMap + "\n" +
"}";
// setup sinks, both old and new versions
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
sink1.open();
sink2.open();
List<Sink> sinks = new ArrayList<Sink>();
sinks.add(sink1);
sinks.add(sink2);
// setup Kafka consumer (to read back messages)
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);
// Send 20 test message, using the old and new Kafka sinks.
// Retrieve the messages and ensure that they are identical and sent to the same partition.
Random rand = new Random();
int messageCount = 20;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", new Long( rand.nextLong() ) )
.put("value", "message:" + i).build();
// send message to both sinks
for( Sink sink : sinks ){
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
// read two copies of message back from Kafka and check that partitions and data match
MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
String msg1Str = new String( msgAndMeta1.message() );
String msg2Str = new String( msgAndMeta2.message() );
System.out.println( "iteration: "+i+" message1: "+msg1Str );
System.out.println( "iteration: "+i+" message2: "+msg2Str );
assertEquals(msg1Str, msg2Str);
}
// close sinks
sink1.close();
sink2.close();
// close consumer
try {
stream.iterator().next();
fail(); // there should be no data left to consume
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testStartWithKafkaOutage() throws Throwable {
String topicName = TOPIC_NAME + "kafkaoutage";
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName,
"--replication-factor", "2", "--partitions", "1"}));
String[] brokerList = kafkaServer.getBrokerListStr().split(",");
int port1 = Integer.parseInt(brokerList[0].split(":")[1]);
int port2 = Integer.parseInt(brokerList[1].split(":")[1]);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
" }" +
"}";
kafkaServer.shutdown();
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final int msgCount = 10000;
final CountDownLatch latch = new CountDownLatch(1);
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent == msgCount - sink.droppedRecords.get()) {
latch.countDown();
}
}
});
sendMessages(topicName, sink, msgCount);
kafkaServer.startServer(port1, port2); // running up
assertTrue(latch.await(10, TimeUnit.SECONDS));
sendMessages(topicName, sink, msgCount);
sink.close();
checkConsumer(topicName, 2 * msgCount - (int) sink.droppedRecords.get());
}
@Test
public void testRunningKafkaOutage() throws IOException, InterruptedException {
String topicName1 = TOPIC_NAME + "kafkaoutage2";
final String topicName2 = TOPIC_NAME + "kafkaoutage3";
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName1,
"--replication-factor", "2", "--partitions", "1"}));
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName2,
"--replication-factor", "2", "--partitions", "1"}));
String[] brokerList = kafkaServer.getBrokerListStr().split(",");
int port1 = Integer.parseInt(brokerList[0].split(":")[1]);
int port2 = Integer.parseInt(brokerList[1].split(":")[1]);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final CountDownLatch latch = new CountDownLatch(1);
final int msgCount = 10000;
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent == msgCount - sink.droppedRecords.get()) {
latch.countDown();
}
}
});
sendMessages(topicName1, sink, msgCount);
assertTrue(latch.await(10, TimeUnit.SECONDS));
final int numSentForTopicName1 = msgCount - (int) sink.droppedRecords.get();
checkConsumer(topicName1, numSentForTopicName1);
kafkaServer.shutdown();
sendMessages(topicName2, sink, msgCount);
kafkaServer.startServer(port1, port2);
final CountDownLatch latch2 = new CountDownLatch(1);
final AtomicInteger numSent = new AtomicInteger();
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent + dropped == 3 * msgCount) {
numSent.set((int) (sent - numSentForTopicName1));
latch2.countDown();
}
}
});
sendMessages(topicName2, sink, msgCount);
sink.close();
assertTrue(latch2.await(10, TimeUnit.SECONDS));
assertTrue(numSent.get() > 0);
checkConsumer(topicName2, numSent.get());
}
private void sendMessages(String topicName, KafkaSink sink, int msgCount) throws JsonProcessingException {
for (int i = 0; i < msgCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(topicName, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
}
private void checkConsumer(String topicName, int msgCount) throws IOException {
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topicName, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topicName).get(0);
for (int i = 0; i < msgCount; ++i) {
try {
stream.iterator().next();
} catch (ConsumerTimeoutException e) {
fail(String.format("%d messages are consumed among %d", i, msgCount));
}
}
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "40000");
props.put("zookeeper.sync.time.ms", "20000");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
props.put("consumer.timeout.ms", "3000");
return new ConsumerConfig(props);
}
private byte[] extractMessage(List<MessageAndOffset> messageSet, int offset) {
ByteBuffer bb = messageSet.get(offset).message().payload();
byte[] bytes = new byte[bb.remaining()];
bb.get(bytes, 0, bytes.length);
return bytes;
}
public static TMessageSet createMessageSet(String topic, int numMsgs) {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig()).withCompression(Compression.LZF);
for (int i = 0; i < numMsgs; ++i) {
builder.withMessage(topic, ("testMessage" + i).getBytes());
}
return builder.build();
}
public byte[] getBigData() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 900; ++i) {
sb.append('a');
}
return sb.toString().getBytes();
}
}
| 1,274 |
0 |
Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink/kafka/TestKafkaSinkV2.java
|
package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.*;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.thrift.TMessageSet;
import kafka.admin.TopicCommand;
import kafka.api.FetchRequestBuilder;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndMetadata;
import kafka.message.MessageAndOffset;
import kafka.server.KafkaConfig;
import kafka.utils.ZkUtils;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestRule;
import scala.Option;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import static org.junit.Assert.*;
public class TestKafkaSinkV2 {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
public static ZkExternalResource zk = new ZkExternalResource();
public static KafkaServerExternalResource kafkaServer = new KafkaServerExternalResource(zk);
@ClassRule
public static TestRule chain = RuleChain
.outerRule(zk)
.around(kafkaServer);
private static final String TOPIC_NAME = "routingKey";
private static final String TOPIC_NAME_MULTITHREAD = "routingKeyMultithread";
private static final String TOPIC_NAME_PARTITION_BY_KEY = "routingKey_partitionByKey";
private static final String TOPIC_NAME_BACKWARD_COMPAT = "routingKey_backwardCompat";
@Test
public void testDefaultParameters() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
// create send test messages to Kafka
Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
while (msgIterator.hasNext()) {
StringMessage next = new StringMessage(msgIterator.next());
sink.writeTo(next); // send
sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
}
sink.close();
assertEquals(sink.getNumOfPendingMessages(), 0);
System.out.println(sink.getStat());
// get the leader
Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
int leader = (Integer) leaderOpt.get();
KafkaConfig config;
if (leader == kafkaServer.getServer(0).config().brokerId()) {
config = kafkaServer.getServer(0).config();
} else {
config = kafkaServer.getServer(1).config();
}
// get data back from Kafka
SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());
List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
assertEquals("Should have fetched 2 messages", 2, messageSet.size());
for( int i=0; i<messageSet.size(); i++ ){
// ensure that received message was one that was sent
String receivedPayload = new String(extractMessage(messageSet, i));
System.out.println( "Got message: " + new String( receivedPayload ) );
assert( sentPayloads.remove( receivedPayload ) );
}
assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
@Test
public void testMultithread() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
" \"batchSize\": 10,\n" +
" \"jobQueueSize\": 3\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
int msgCount = 10000;
for (int i = 0; i < msgCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
System.out.println(sink.getStat());
assertEquals(sink.getNumOfPendingMessages(), 0);
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
for (int i = 0; i < msgCount; ++i) {
stream.iterator().next();
}
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testFileBasedQueuePartitionByKey() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_PARTITION_BY_KEY,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String fileQueue = String.format(
" \"queue4Sink\": {\n" +
" \"type\": \"file\",\n" +
" \"path\": \"%s\",\n" +
" \"name\": \"testKafkaSink\"\n" +
" }\n", tempDir.newFolder().getAbsolutePath());
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_PARTITION_BY_KEY);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
fileQueue + ",\n" +
keyTopicMap + "\n" +
"}";
// setup sink
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
// create and send 10 test messages to Kafka
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i % numPartitions))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_PARTITION_BY_KEY, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
sink.close();
System.out.println(sink.getStat());
// read data back from Kafka
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_PARTITION_BY_KEY, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_PARTITION_BY_KEY).get(0);
Map<Integer, Set<Map<String, Object>>> resultSet = new HashMap<Integer, Set<Map<String, Object>>>();
for (int i = 0; i < messageCount; ++i) {
MessageAndMetadata<byte[], byte[]> msgAndMeta = stream.iterator().next();
System.out.println(new String(msgAndMeta.message()));
Map<String, Object> msg = jsonMapper.readValue(new String(msgAndMeta.message()), new TypeReference<Map<String, Object>>() {});
Set<Map<String, Object>> s = resultSet.get(msgAndMeta.partition());
if (s == null) {
s = new HashSet<Map<String, Object>>();
resultSet.put(msgAndMeta.partition(), s);
}
s.add(msg);
}
// verify we received what was sent
int sizeSum = 0;
for (Map.Entry<Integer, Set<Map<String, Object>>> e : resultSet.entrySet()) {
sizeSum += e.getValue().size();
String key = (String) e.getValue().iterator().next().get("key");
for (Map<String, Object> ss : e.getValue()) {
assertEquals(key, (String) ss.get("key"));
}
}
assertEquals(sizeSum, messageCount);
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_BACKWARD_COMPAT);
String description1 = "{\n" +
" \"type\": \"kafkaV1\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"ack\": 1,\n" +
keyTopicMap + "\n" +
"}";
String description2 = "{\n" +
" \"type\": \"kafkaV2\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
keyTopicMap + "\n" +
"}";
// setup sinks, both old and new versions
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
sinkV1.open();
sinkV2.open();
List<Sink> sinks = new ArrayList<Sink>();
sinks.add(sinkV1);
sinks.add(sinkV2);
// setup Kafka consumer (to read back messages)
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);
// Send 20 test message, using the old and new Kafka sinks.
// Retrieve the messages and ensure that they are identical and sent to the same partition.
Random rand = new Random();
int messageCount = 20;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", new Long( rand.nextLong() ) )
.put("value", "message:" + i).build();
// send message to both sinks
for( Sink sink : sinks ){
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
// read two copies of message back from Kafka and check that partitions and data match
MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
String msg1Str = new String( msgAndMeta1.message() );
String msg2Str = new String( msgAndMeta2.message() );
System.out.println( "iteration: "+i+" message1: "+msg1Str );
System.out.println( "iteration: "+i+" message2: "+msg2Str );
assertEquals(msg1Str, msg2Str);
}
// close sinks
sinkV1.close();
sinkV2.close();
// close consumer
try {
stream.iterator().next();
fail(); // there should be no data left to consume
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testBlockingThreadPoolExecutor() {
int jobQueueSize = 5;
int corePoolSize = 3;
int maxPoolSize = 3;
try {
testQueue(corePoolSize, maxPoolSize, new ArrayBlockingQueue<Runnable>(jobQueueSize));
fail("RejectedExecutionException should be thrown");
} catch (RejectedExecutionException e) {
// good to go
}
BlockingQueue<Runnable> jobQueue = new ArrayBlockingQueue<Runnable>(jobQueueSize) {
@Override
public boolean offer(Runnable runnable) {
try {
put(runnable); // not to reject the task, slowing down
} catch (InterruptedException e) {
// do nothing
}
return true;
}
};
testQueue(corePoolSize, maxPoolSize, jobQueue);
}
private void testQueue(int corePoolSize, int maxPoolSize, BlockingQueue<Runnable> jobQueue) {
ThreadPoolExecutor senders = new ThreadPoolExecutor(
corePoolSize,
maxPoolSize,
10, TimeUnit.SECONDS,
jobQueue);
for (int i = 0; i < 100; ++i) {
senders.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail();
}
}
});
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "40000");
props.put("zookeeper.sync.time.ms", "20000");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
props.put("consumer.timeout.ms", "3000");
return new ConsumerConfig(props);
}
private byte[] extractMessage(List<MessageAndOffset> messageSet, int offset) {
ByteBuffer bb = messageSet.get(offset).message().payload();
byte[] bytes = new byte[bb.remaining()];
bb.get(bytes, 0, bytes.length);
return bytes;
}
public static TMessageSet createMessageSet(String topic, int numMsgs) {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig()).withCompression(Compression.LZF);
for (int i = 0; i < numMsgs; ++i) {
builder.withMessage(topic, ("testMessage" + i).getBytes());
}
return builder.build();
}
}
| 1,275 |
0 |
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/ServoReporter.java
|
package com.netflix.suro.sink.kafka;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DoubleGauge;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.servo.Servo;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.metrics.MetricsReporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
public class ServoReporter implements MetricsReporter {
private static final Logger log = LoggerFactory.getLogger(ServoReporter.class);
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("ServoReporter-%d").build());
private ConcurrentMap<DoubleGauge, KafkaMetric> gauges = new ConcurrentHashMap<>();
@Override
public void init(List<KafkaMetric> metrics) {
for (KafkaMetric metric : metrics) {
addMetric(metric);
}
}
private void addMetric(KafkaMetric metric) {
MetricName metricName = metric.metricName();
MonitorConfig.Builder builder = MonitorConfig.builder(metricName.name())
.withTag("group", metricName.group());
for(Map.Entry<String, String> tag : metricName.tags().entrySet()) {
builder.withTag(tag.getKey(), tag.getValue());
}
MonitorConfig monitorConfig = builder.build();
gauges.put(Servo.getDoubleGauge(monitorConfig), metric);
}
@Override
public void metricChange(KafkaMetric metric) {
addMetric(metric);
}
@Override
public void close() {
scheduler.shutdownNow();
}
@Override
public void configure(Map<String, ?> configs) {
long millis = TimeUnit.MINUTES.toMillis(1);
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
for (Map.Entry<DoubleGauge, KafkaMetric> e : gauges.entrySet()) {
e.getKey().set(e.getValue().value());
}
}
}, millis, millis, TimeUnit.MILLISECONDS);
}
}
| 1,276 |
0 |
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaSinkV2.java
|
/*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.queue.MemoryQueue4Sink;
import com.netflix.suro.queue.MessageQueue4Sink;
import com.netflix.suro.sink.QueuedSink;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.ThreadPoolQueuedSink;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
/**
* Kafka 0.8.2 Sink, using new Java-native producer, rather than Scala produer.
* Requests are re-queued indefinitely if they fail.
*
* The configuration parameters for the new kafka producer are listed in:
* http://kafka.apache.org/documentation.html#newproducerconfigs
*
* @author jbae
* @author starzia
*/
public class KafkaSinkV2 extends ThreadPoolQueuedSink implements Sink {
public final static String TYPE = "KafkaV2";
private String clientId;
private final Map<String, String> keyTopicMap;
private final KafkaProducer<byte[], byte[]> producer;
private long msgId = 0;
private AtomicLong receivedCount = new AtomicLong(0);
private AtomicLong sentCount = new AtomicLong(0);
private AtomicLong sentByteCount = new AtomicLong(0);
/** number of times a message send failed without retrying */
private AtomicLong droppedCount = new AtomicLong(0);
/** number of times a message send failed but was requeued */
private AtomicLong requeuedCount = new AtomicLong(0);
@JsonCreator
public KafkaSinkV2(
@JsonProperty("queue4Sink") MessageQueue4Sink queue4Sink,
@JsonProperty("client.id") String clientId,
@JsonProperty("metadata.broker.list") String bootstrapServers,
@JsonProperty("compression.codec") String codec,
@JsonProperty("send.buffer.bytes") int sendBufferBytes,
@JsonProperty("batchSize") int batchSize,
@JsonProperty("batchTimeout") int batchTimeout,
@JsonProperty("request.timeout.ms") int requestTimeout,
@JsonProperty("kafka.etc") Properties etcProps,
@JsonProperty("keyTopicMap") Map<String, String> keyTopicMap,
@JsonProperty("jobQueueSize") int jobQueueSize,
@JsonProperty("corePoolSize") int corePoolSize,
@JsonProperty("maxPoolSize") int maxPoolSize,
@JsonProperty("jobTimeout") long jobTimeout,
@JsonProperty("pauseOnLongQueue") boolean pauseOnLongQueue
) {
super(jobQueueSize, corePoolSize, maxPoolSize, jobTimeout,
KafkaSink.class.getSimpleName() + "-" + clientId);
Preconditions.checkNotNull(bootstrapServers);
Preconditions.checkNotNull(clientId);
this.clientId = clientId;
initialize(
"kafka_" + clientId,
queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink,
batchSize,
batchTimeout,
pauseOnLongQueue);
Properties props = new Properties();
props.put("client.id", clientId);
// metadata.broker.list was renamed to bootstrap.servers in the new kafka producer
props.put("bootstrap.servers", bootstrapServers);
if (codec != null) {
props.put("compression.codec", codec);
}
if (sendBufferBytes > 0) {
props.put("send.buffer.bytes", Integer.toString(sendBufferBytes));
}
if (requestTimeout > 0) {
props.put("request.timeout.ms", Integer.toString(requestTimeout));
}
if (etcProps != null) {
props.putAll(etcProps);
}
this.keyTopicMap = keyTopicMap != null ? keyTopicMap : Maps.<String, String>newHashMap();
producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer());
Monitors.registerObject(clientId, this);
}
@Override
public void writeTo(MessageContainer message) {
long key = msgId++;
if (!keyTopicMap.isEmpty()) {
try {
Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {});
Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey()));
if (keyField != null) {
key = keyField.hashCode();
}
} catch (Exception e) {
QueuedSink.log.error("Exception on getting key field: " + e.getMessage());
}
}
QueuedSink.log.trace( "KafkaSink writeTo()" );
receivedCount.incrementAndGet();
enqueue(new SuroKeyedMessage(key, message.getMessage()));
}
@Override
public void open() {
setName(KafkaSink.class.getSimpleName() + "-" + clientId);
start();
}
@Override
protected void beforePolling() throws IOException { /*do nothing */}
@Override
protected void write(List<Message> msgList) {
QueuedSink.log.trace( "KafkaSink write() with {} messages", msgList.size() );
// prepare "final" copies of the messages to be used in the anonymous class below
final ArrayList<SuroKeyedMessage> msgCopies =
new ArrayList<SuroKeyedMessage>( msgList.size() );
for( Message m : msgList ){
SuroKeyedMessage sKeyedMsg = (SuroKeyedMessage) m;
msgCopies.add( new SuroKeyedMessage( sKeyedMsg.getKey(),
new Message( m.getRoutingKey(), m.getPayload() )));
}
// The new KafkaProducer does not have interface for sending multiple messages,
// so we loop and create lots of Runnables -- this seems inefficient, but the alternative
// has its own problems. If we create one "big Runnable" that loops over messages we'll
// drain the queue4sink too quickly -- all the messages will be queued in the in-memory
// job queue storing the Runnables.
for( final SuroKeyedMessage m : msgCopies ) {
senders.submit(new Runnable() {
@Override
public void run() {
String topic = m.getRoutingKey();
// calculate the kafka partition, with backward compatibility with old kafka producer
int numPartitions = producer.partitionsFor(topic).size();
int partition = Math.abs((int)(m.getKey() ^ (m.getKey() >>> 32))) % numPartitions;
ProducerRecord r = new ProducerRecord( topic,
partition,
null, // don't store the key
m.getPayload() );
QueuedSink.log.trace( "Will send message to Kafka" );
long startTimeMs = System.currentTimeMillis();
// send
Future<RecordMetadata> responseFtr = producer.send( r );
QueuedSink.log.trace( "Started aysnc producer" );
boolean failure = true;
boolean retry = true;
if( responseFtr.isCancelled() ){
QueuedSink.log.warn( "Kafka producer request was cancelled" );
// we assume that cancelled requests should not be retried.
retry = false;
}
try {
// wait for request to finish
RecordMetadata response = responseFtr.get();
if( response.topic() == null ){
QueuedSink.log.warn( "Kafka producer got null topic in response" );
}
sentCount.incrementAndGet();
sentByteCount.addAndGet( m.getPayload().length );
failure = false;
retry = false;
} catch (InterruptedException e) {
// Assume that Interrupted means we're trying to shutdown so don't retry
QueuedSink.log.warn( "Caught InterruptedException: "+ e );
retry = false;
} catch( UnknownTopicOrPartitionException e ){
QueuedSink.log.warn( "Caught UnknownTopicOrPartitionException for topic: " + m.getRoutingKey()
+" This may be simply because KafkaProducer does not yet have information about the brokers."
+" Request will be retried.");
} catch (ExecutionException e) {
QueuedSink.log.warn( "Caught ExecutionException: "+ e );
} catch (Exception e){
QueuedSink.log.warn( "Caught Exception: "+e );
}
long durationMs = System.currentTimeMillis() - startTimeMs;
if (failure){
QueuedSink.log.warn( "Kafka producer send failed after {} milliseconds", durationMs );
requeuedCount.incrementAndGet();
if( retry ){
enqueue( m );
}else{
QueuedSink.log.info("Dropped message");
droppedCount.incrementAndGet();
}
} else{
QueuedSink.log.trace( "Kafka producer send succeeded after {} milliseconds", durationMs );
}
}
});
}
}
@Override
protected void innerClose() {
super.innerClose();
producer.close();
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
Map<MetricName,? extends Metric> metrics = producer.metrics();
StringBuilder sb = new StringBuilder();
// add kafka producer stats, which are rates
for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){
sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n');
}
// also report our counters
sb.append("messages-in-queue4sink: ").append( this.queue4Sink.size() ).append('\n');
sb.append("queued-jobs: ").append( this.jobQueue.size() ).append('\n');
sb.append("active-threads: ").append( this.senders.getActiveCount() ).append('\n');
sb.append("received-messages: ").append( this.receivedCount.get() ).append('\n');
sb.append("sent-messages: ").append( this.sentCount.get() ).append('\n');
sb.append("sent-bytes: ").append( this.sentByteCount.get() ).append('\n');
sb.append("dropped-messages: ").append( this.droppedCount.get() ).append('\n');
sb.append("requeued-messages: ").append( this.requeuedCount.get() ).append('\n');
return sb.toString();
}
}
| 1,277 |
0 |
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaSink.java
|
package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.Sink;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action3;
import java.lang.reflect.Field;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
/**
* Kafka 0.8 Sink
*
* @author jbae
*/
public class KafkaSink implements Sink {
private static final Logger log = LoggerFactory.getLogger(KafkaSink.class);
public final static String TYPE = "Kafka";
private final Map<String, String> keyTopicMap;
private final boolean blockOnBufferFull;
private final Properties props;
private KafkaProducer<byte[], byte[]> producer;
private final KafkaRetentionPartitioner retentionPartitioner;
private final Set<String> metadataFetchedTopicSet;
private final BlockingQueue<MessageContainer> metadataWaitingQueue;
private final ExecutorService executor;
private final static MessageContainer SHUTDOWN_POISON_MSG = new StringMessage("suro-KafkaSink-shutdownMsg-routingKey",
"suro-KafkaSink-shutdownMsg-body");
@JsonCreator
public KafkaSink(
@JsonProperty("client.id") String clientId,
@JsonProperty("metadata.broker.list") String brokerList,
@JsonProperty("bootstrap.servers") String bootstrapServers,
@JsonProperty("request.required.acks") Integer requiredAcks,
@JsonProperty("acks") String acks,
@JsonProperty("buffer.memory") long bufferMemory,
@JsonProperty("batch.size") int batchSize,
@JsonProperty("compression.codec") String codec,
@JsonProperty("compression.type") String compression,
@JsonProperty("retries") int retries,
@JsonProperty("block.on.buffer.full") boolean blockOnBufferFull,
@JsonProperty("metadata.waiting.queue.size") int metadataWaitingQueueSize,
@JsonProperty("kafka.etc") Properties etcProps,
@JsonProperty("keyTopicMap") Map<String, String> keyTopicMap,
@JacksonInject KafkaRetentionPartitioner retentionPartitioner) {
Preconditions.checkArgument(bootstrapServers != null | brokerList != null);
Preconditions.checkNotNull(clientId);
props = new Properties();
props.put("client.id", clientId);
props.put("bootstrap.servers", brokerList != null ? brokerList : bootstrapServers);
if (acks != null || requiredAcks != null) {
props.put("acks", requiredAcks != null ? requiredAcks.toString() : acks);
}
if (bufferMemory > 0) {
props.put("buffer.memory", bufferMemory);
}
if (batchSize > 0) {
props.put("batch.size", batchSize);
}
if (compression != null || codec != null) {
props.put("compression.type", codec != null ? codec : compression);
}
if (retries > 0) {
props.put("retries", retries);
}
this.blockOnBufferFull = blockOnBufferFull;
props.put("block.on.buffer.full", blockOnBufferFull);
setServoReporter();
if (etcProps != null) {
props.putAll(etcProps);
}
this.keyTopicMap = keyTopicMap != null ? keyTopicMap : Maps.<String, String>newHashMap();
this.retentionPartitioner = retentionPartitioner;
this.metadataFetchedTopicSet = new CopyOnWriteArraySet<>();
this.metadataWaitingQueue = new ArrayBlockingQueue<>(metadataWaitingQueueSize > 0 ? metadataWaitingQueueSize : 1024);
this.executor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("KafkaSink-MetadataFetcher-%d").build());
}
private void setServoReporter() {
props.put("metric.reporters", Lists.newArrayList(ServoReporter.class.getName()));
// this should be needed because ProducerConfig cannot retrieve undefined key
try {
Field f = ProducerConfig.class.getDeclaredField("config");
f.setAccessible(true);
ConfigDef config = (ConfigDef) f.get(ConfigDef.class);
config.define(ServoReporter.class.getName(), ConfigDef.Type.CLASS, ServoReporter.class, ConfigDef.Importance.LOW, "");
} catch (Exception e) {
// swallow exception
}
props.put(ServoReporter.class.getName(), ServoReporter.class);
}
private AtomicLong queuedRecords = new AtomicLong(0);
private AtomicLong sentRecords = new AtomicLong(0);
@VisibleForTesting
protected AtomicLong droppedRecords = new AtomicLong(0);
private volatile Action3 recordCounterListener;
public void setRecordCounterListener(Action3 action) {
this.recordCounterListener = action;
}
@Override
public void writeTo(final MessageContainer message) {
queuedRecords.incrementAndGet();
DynamicCounter.increment(
MonitorConfig
.builder("queuedRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
runRecordCounterListener();
if (metadataFetchedTopicSet.contains(message.getRoutingKey())) {
sendMessage(message);
} else {
if(!metadataWaitingQueue.offer(message)) {
dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull");
}
}
}
private void runRecordCounterListener() {
if (recordCounterListener != null) {
recordCounterListener.call(queuedRecords.get(), sentRecords.get(), droppedRecords.get());
}
}
private void sendMessage(final MessageContainer message) {
try {
List<PartitionInfo> partitionInfos = producer.partitionsFor(message.getRoutingKey());
int partition = retentionPartitioner.getKey(message.getRoutingKey(), partitionInfos);
if (!keyTopicMap.isEmpty()) {
try {
Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {
});
Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey()));
if (keyField != null) {
long hashCode = keyField.hashCode();
partition = Math.abs((int) (hashCode ^ (hashCode >>> 32))) % partitionInfos.size();
}
} catch (Exception e) {
log.error("Exception on getting key field: " + e.getMessage());
}
}
producer.send(
new ProducerRecord(message.getRoutingKey(), partition, null, message.getMessage().getPayload()),
new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
log.error("Exception while sending", e);
DynamicCounter.increment(
MonitorConfig
.builder("failedRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
droppedRecords.incrementAndGet();
runRecordCounterListener();
} else {
DynamicCounter.increment(
MonitorConfig
.builder("sentRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
sentRecords.incrementAndGet();
runRecordCounterListener();
}
}
});
}
catch (Throwable e) {
log.error("Exception before sending", e);
dropMessage(message.getRoutingKey(), e.getClass().getName());
}
}
@Override
public void open() {
producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer());
executor.submit(new Runnable() {
@Override
public void run() {
while(true) {
final MessageContainer message;
try {
message = metadataWaitingQueue.poll(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
continue;
}
if(message == null) {
continue;
}
// check poison msg for shutdown
if(message == SHUTDOWN_POISON_MSG) {
break;
}
try {
if (!metadataFetchedTopicSet.contains(message.getRoutingKey())) {
producer.partitionsFor(message.getRoutingKey());
metadataFetchedTopicSet.add(message.getRoutingKey());
}
sendMessage(message);
} catch(Throwable t) {
log.error("failed to get metadata: " + message.getRoutingKey(), t);
// try to put back to the queue if there is still space
if(!metadataWaitingQueue.offer(message)) {
dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull");
}
}
}
}
});
}
@Override
public void close() {
try {
// try to insert a poison msg for shutdown
// ignore success or failure
metadataWaitingQueue.offer(SHUTDOWN_POISON_MSG);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
try {
producer.close();
} catch(Exception e) {
log.error("failed to close kafka producer", e);
}
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
Map<MetricName,? extends Metric> metrics = producer.metrics();
StringBuilder sb = new StringBuilder();
// add kafka producer stats, which are rates
for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){
sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n');
}
return sb.toString();
}
@Override
public long getNumOfPendingMessages() {
return queuedRecords.get() - sentRecords.get() - droppedRecords.get();
}
@Override
public long checkPause() {
if (blockOnBufferFull) {
return 0; // do not pause here, will be blocked
} else {
//producer.metrics().get(new MetricName("buffer-total-bytes", "producer-metrics", "desc", "client-id", "kafkasink"))
double totalBytes = producer.metrics().get(
new MetricName(
"buffer-total-bytes",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double availableBytes = producer.metrics().get(
new MetricName(
"buffer-available-bytes",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double consumedMemory = totalBytes - availableBytes;
double memoryRate = consumedMemory / totalBytes;
if (memoryRate >= 0.5) {
double outgoingRate = producer.metrics().get(
new MetricName(
"outgoing-byte-rate",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double throughputRate = Math.max(outgoingRate, 1.0);
return (long) (consumedMemory / throughputRate * 1000);
} else {
return 0;
}
}
}
private void dropMessage(final String routingKey, final String reason) {
DynamicCounter.increment(
MonitorConfig
.builder("droppedRecord")
.withTag(TagKey.ROUTING_KEY, routingKey)
.withTag(TagKey.DROPPED_REASON, reason)
.build());
droppedRecords.incrementAndGet();
runRecordCounterListener();
}
}
| 1,278 |
0 |
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaRetentionPartitioner.java
|
package com.netflix.suro.sink.kafka;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.Singleton;
import com.netflix.config.DynamicLongProperty;
import org.apache.kafka.common.PartitionInfo;
import java.util.List;
import java.util.Random;
import java.util.concurrent.*;
@Singleton
public class KafkaRetentionPartitioner {
private final Random prng;
// index cache for each topic
private final ConcurrentMap<String, Integer> indexCache;
private static DynamicLongProperty retention = new DynamicLongProperty(
"kafka.producer.partition.retention", 1000);
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("KafkaRetentionPartitioner-%d").build());
public KafkaRetentionPartitioner() {
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
indexCache.clear();
}
}, retention.get(), retention.get(), TimeUnit.MILLISECONDS);
this.prng = new Random();
// seed with a random integer
this.indexCache = new ConcurrentHashMap<>();
// increment index every interval
}
public int getKey(String topic, List<PartitionInfo> partitions) {
if(topic == null) {
throw new IllegalArgumentException("topic is null");
}
if(partitions.isEmpty()) {
throw new IllegalArgumentException("no partitions for topic: " + topic);
}
final int numPartitions = partitions.size();
Integer index = indexCache.get(topic);
if(index != null) {
// stick to the same partition in cache
return index;
} else {
// randomly pick a new partition from [0, numPartitions) range
int partition = prng.nextInt(numPartitions);
// try to find a partition with leader
for (int i = 0; i < numPartitions; i++) {
if (partitions.get(partition).leader() != null) {
// found a partition with leader
index = indexCache.putIfAbsent(topic, partition);
return index != null ? index : partition;
} else {
// try next partition
partition = (partition + 1) % numPartitions;
}
}
// no partitions are available, give a non-available partition.
// partition will loop back to its earlier value from prng.nextInt(numPartitions).
// but don't update cache in this case.
return partition;
}
}
}
| 1,279 |
0 |
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/SuroKeyedMessage.java
|
package com.netflix.suro.sink.kafka;
import com.netflix.suro.message.Message;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class SuroKeyedMessage extends Message {
static {
Message.classMap.put((byte) 1, SuroKeyedMessage.class);
}
private long key;
private Message message = new Message();
public SuroKeyedMessage() {}
public SuroKeyedMessage(long key, Message message) {
this.key = key;
this.message = message;
}
@Override
public String getRoutingKey() {
return message.getRoutingKey();
}
@Override
public byte[] getPayload() {
return message.getPayload();
}
public long getKey() { return key; }
@Override
public String toString() {
return String.format("routingKey: %s, payload byte size: %d",
getRoutingKey(),
getPayload().length);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SuroKeyedMessage keyedMessage = (SuroKeyedMessage) o;
if (key == keyedMessage.key) {
return message.equals(keyedMessage.message);
} else {
return false;
}
}
@Override
public int hashCode() {
return (int) (key * 31 + message.hashCode());
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(key);
message.write(dataOutput);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
key = dataInput.readLong();
message.readFields(dataInput);
}
}
| 1,280 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input/remotefile/TestJsonLine.java
|
package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestJsonLine {
@Test
public void shouldReturnStaticRoutingKey() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
"staticRoutingKey",
null,
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f1", "v1").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "staticRoutingKey");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void shouldReturnRoutingKeyField() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
null,
"f1",
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f1", "v1").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "v1");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void shouldReturnStaticRoutingKeyOnNonExistingRoutingKeyField() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
"defaultRoutingKey",
"f1",
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f3", "v3").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "defaultRoutingKey");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void testWithNonParseableMessage() throws Exception {
JsonLine jsonLine = new JsonLine(
"defaultRoutingKey",
"f1",
new DefaultObjectMapper());
List<MessageContainer> messages = jsonLine.parse("non_parseable_msg");
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "defaultRoutingKey");
try {
messages.get(0).getEntity(S3Consumer.typeReference);
assertEquals(messages.get(0).getEntity(String.class), "non_parseable_msg");
fail("exception should be thrown");
} catch (Exception e) {}
jsonLine = new JsonLine(
null,
"f1",
new DefaultObjectMapper());
assertEquals(jsonLine.parse("non_parseable_msg").size(), 0);
}
}
| 1,281 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input/remotefile/TestS3Consumer.java
|
package com.netflix.suro.input.remotefile;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.util.StringInputStream;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.MessageRouter;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.util.Pair;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.*;
public class TestS3Consumer {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private ObjectMapper jsonMapper = new DefaultObjectMapper();
private final int testFileCount = 6;
@Test
public void test() throws Exception {
final String downloadPath = tempDir.newFolder().getAbsolutePath();
final CountDownLatch latch = new CountDownLatch(1);
final ConcurrentSkipListSet<String> removedKeys = new ConcurrentSkipListSet<String>();
final AtomicInteger count = new AtomicInteger(0);
final AtomicInteger peekedMsgCount = new AtomicInteger(0);
final AtomicInteger invalidMsgCount = new AtomicInteger(0);
Notice<String> mockedNotice = new Notice<String>() {
@Override
public void init() {
}
@Override
public boolean send(String message) {
return false;
}
@Override
public String recv() {
return null;
}
@Override
public Pair<String, String> peek() {
if (peekedMsgCount.get() == 1) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "invalid_msg");
}
if (peekedMsgCount.get() == 3) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "{\n" +
" \"Message\": {\n" +
" \"s3Bucket\": \"bucket\",\n" +
" \"s3ObjectKey\": \"key\"\n" +
" }\n" +
"}");
}
if (peekedMsgCount.get() == 5) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "{\n" +
" \"Message\": {\n" +
" \"Bucket\": \"bucket\",\n" +
" \"ObjectKey\": [\"key\"]\n" +
" }\n" +
"}");
}
try {
List<String> dummyKeys = new ArrayList<String>();
dummyKeys.add("prefix/key" + (count.getAndIncrement()));
dummyKeys.add("prefix/key" + (count.getAndIncrement()));
return new Pair<String, String>(
"receiptHandle" + peekedMsgCount.getAndIncrement(),
jsonMapper.writeValueAsString(
new ImmutableMap.Builder<String, Object>()
.put("Message",
new ImmutableMap.Builder<String, Object>()
.put("s3Bucket", "bucket")
.put("s3ObjectKey", dummyKeys)
.build())
.build()));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
} finally {
if (count.get() == testFileCount) {
latch.countDown();
}
}
}
@Override
public void remove(String key) {
removedKeys.add(key);
}
@Override
public String getStat() {
return null;
}
};
AWSCredentialsProvider awsCredentials = mock(AWSCredentialsProvider.class);
AWSCredentials credentials = mock(AWSCredentials.class);
doReturn("accessKey").when(credentials).getAWSAccessKeyId();
doReturn("secretKey").when(credentials).getAWSSecretKey();
doReturn(credentials).when(awsCredentials).getCredentials();
MessageRouter router = mock(MessageRouter.class);
int numOfLines = 3;
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < numOfLines; ++i) {
sb.append("line" + i).append('\n');
}
RestS3Service s3 = mock(RestS3Service.class);
doAnswer(new Answer<S3Object>() {
@Override
public S3Object answer(InvocationOnMock invocation) throws Throwable {
S3Object obj = mock(S3Object.class);
doReturn(new StringInputStream(sb.toString())).when(obj).getDataInputStream();
return obj;
}
}).when(s3).getObject(anyString(), anyString());
RecordParser recordParser = mock(RecordParser.class);
List<MessageContainer> messages = new ArrayList<MessageContainer>();
int numOfMessages = 5;
for (int i = 0; i < numOfMessages; ++i) {
messages.add(mock(MessageContainer.class));
}
doReturn(messages).when(recordParser).parse(anyString());
S3Consumer consumer = new S3Consumer(
"id",
"s3Endpoint",
mockedNotice,
1000,
3,
downloadPath,
recordParser,
awsCredentials,
router,
jsonMapper,
s3);
consumer.start();
latch.await();
consumer.shutdown();
verify(router, times(numOfMessages * numOfLines * count.get())).process(any(SuroInput.class), any(MessageContainer.class));
assertEquals(removedKeys.size(), peekedMsgCount.get() - invalidMsgCount.get());
// no files under downloadPath
assertEquals(new File(downloadPath).list().length, 0);
}
}
| 1,282 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/SuroSinkPlugin.java
|
package com.netflix.suro.sink.remotefile;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.sink.SuroSink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.NoNotice;
import com.netflix.suro.sink.notice.QueueNotice;
import com.netflix.suro.sink.remotefile.formatter.DateRegionStackFormatter;
import com.netflix.suro.sink.remotefile.formatter.DynamicRemotePrefixFormatter;
public class SuroSinkPlugin extends SuroPlugin {
@Override
protected void configure() {
this.addSinkType(LocalFileSink.TYPE, LocalFileSink.class);
this.addSinkType(S3FileSink.TYPE, S3FileSink.class);
this.addSinkType(HdfsFileSink.TYPE, HdfsFileSink.class);
this.addRemotePrefixFormatterType(DateRegionStackFormatter.TYPE, DateRegionStackFormatter.class);
this.addRemotePrefixFormatterType(DynamicRemotePrefixFormatter.TYPE, DynamicRemotePrefixFormatter.class);
this.addSinkType(SuroSink.TYPE, SuroSink.class);
this.addNoticeType(NoNotice.TYPE, NoNotice.class);
this.addNoticeType(QueueNotice.TYPE, QueueNotice.class);
}
}
| 1,283 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestPrefixFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.name.Names;
import com.netflix.config.ConfigurationManager;
import com.netflix.suro.jackson.DefaultObjectMapper;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class TestPrefixFormatter {
private static Injector injector = Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
bind(String.class).annotatedWith(Names.named("region")).toInstance("eu-west-1");
bind(String.class).annotatedWith(Names.named("stack")).toInstance("gps");
}
}
);
@Test
public void testDynamicStatic() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"static(prefix)\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "prefix/");
}
@Test
public void testDynamicDate() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"date(YYYYMMDD)\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
assertEquals(formatter.get(), format.print(new DateTime()) + "/");
}
@Test
public void testDynamicProperty() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"property(prop1)\"\n" +
"}";
ConfigurationManager.getConfigInstance().setProperty("prop1", "prop1");
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "prop1/");
}
@Test
public void testDynamicCombination() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"static(routing_key);date(YYYYMMDD);property(prop1)\"\n" +
"}";
ConfigurationManager.getConfigInstance().setProperty("prop1", "propvalue1");
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "routing_key/" + format.print(new DateTime()) + "/propvalue1/");
}
@Test
public void testInjectedDateRegionStack() throws IOException {
String spec = "{\n" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>() {});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
String answer = String.format("%s/eu-west-1/gps/", format.print(new DateTime()));
assertEquals(formatter.get(), answer);
}
@Test
public void testDateRegionStack() throws IOException {
String spec = "{\n" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\",\n" +
" \"region\": \"us-east-1\",\n" +
" \"stack\": \"normal\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>() {});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
String answer = String.format("%s/us-east-1/normal/", format.print(new DateTime()));
assertEquals(formatter.get(), answer);
}
}
| 1,284 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestS3FileSink.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.localfile.LocalFileSink.SpaceChecker;
import org.jets3t.service.ServiceException;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.multi.s3.S3ServiceEventListener;
import org.jets3t.service.utils.MultipartUtils;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
public class TestS3FileSink {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
@Test
public void testDefaultParameters() throws Exception {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertTrue(count > 0);
}
@Test
public void test() throws Exception {
String testDir = tempDir.newFolder().getAbsolutePath();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
//" \"maxFileSize\": 10240,\n" +
" \"rotationPeriod\": \"PT1m\",\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
" },\n" +
" \"bucket\": \"s3bucket\",\n" +
" \"maxPartSize\": 10000,\n" +
" \"concurrentUpload\":5,\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" },\n" +
" \"prefixFormatter\": {" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\"}\n" +
"}";
Injector injector = getInjector();
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertTrue(count > 0);
}
@Test
public void testTooManyFiles() throws IOException {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"\n" +
"}";
// pre-create many files
new File(testDir).mkdir();
for (int i = 0; i < 100; ++i) {
createFile(testDir, i);
}
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 100);
}
private void createFile(String testDir, int i) throws IOException {
File f = new File(testDir, "fileNo" + i + ".done");
f.createNewFile();
FileOutputStream o = new FileOutputStream(f);
o.write("temporaryStringContents".getBytes());
o.close();
}
@Test
public void testUploadAll() throws IOException {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\",\n" +
" \"batchUpload\":true\n" +
"}";
// pre-create many files
new File(testDir).mkdir();
for (int i = 0; i < 100; ++i) {
createFile(testDir, i);
}
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
S3FileSink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
assertEquals(sink.getNumOfPendingMessages(), 100);
sink.uploadAll(testDir);
// check every file uploaded, deleted, and notified
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 100);
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
assertEquals(sink.getNumOfPendingMessages(), 0);
}
@Test
public void testAclFailure() throws IOException, ServiceException, InterruptedException {
String testDir = tempDir.newFolder().getAbsolutePath();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"" +
"}";
Injector injector = getInjector();
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
S3FileSink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
GrantAcl grantAcl = mock(GrantAcl.class);
when(grantAcl.grantAcl(any(S3Object.class))).thenReturn(false);
sink.open();
sink.grantAcl = grantAcl;
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
File[] files = getFiles(testDir);
assertTrue(files.length > 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 0);
}
private File[] getFiles(String testDir) {
// check no file uploaded, deleted, and notified
File dir = new File(testDir);
return dir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File file, String name) {
if (!name.startsWith(".")) {
return true;
} else {
return false;
}
}
});
}
private Injector getInjector() {
return Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
bind(AWSCredentialsProvider.class)
.toInstance(new AWSCredentialsProvider() {
@Override
public AWSCredentials getCredentials() {
return new AWSCredentials() {
@Override
public String getAWSAccessKeyId() {
return "accessKey";
}
@Override
public String getAWSSecretKey() {
return "secretKey";
}
};
}
@Override
public void refresh() {
}
});
MultipartUtils mpUtils = mock(MultipartUtils.class);
try {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(1000);
return null;
}
}).when(mpUtils).uploadObjects(
any(String.class),
any(RestS3Service.class),
any(List.class),
any(S3ServiceEventListener.class));
bind(MultipartUtils.class).toInstance(mpUtils);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
bind(SpaceChecker.class).toInstance(mock(SpaceChecker.class));
}
}
);
}
}
| 1,285 |
0 |
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestGrantAcl.java
|
package com.netflix.suro.sink.remotefile;
import org.jets3t.service.acl.AccessControlList;
import org.jets3t.service.acl.CanonicalGrantee;
import org.jets3t.service.acl.GrantAndPermission;
import org.jets3t.service.acl.Permission;
import org.jets3t.service.acl.gs.GSAccessControlList;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
public class TestGrantAcl {
@Test
public void test() throws Exception {
RestS3Service s3Service = mock(RestS3Service.class);
AccessControlList acl = new AccessControlList();
doReturn(acl).when(s3Service).getObjectAcl("bucket", "key");
doNothing().when(s3Service).putObjectAcl("bucket", "key", acl);
GrantAcl grantAcl = new GrantAcl(s3Service, "1,2,3", 1);
S3Object obj = new S3Object("key");
obj.setBucketName("bucket");
obj.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
assertTrue(grantAcl.grantAcl(obj));
Set<GrantAndPermission> grants = new HashSet<GrantAndPermission>(Arrays.asList(acl.getGrantAndPermissions()));
assertEquals(grants.size(), 3);
Set<GrantAndPermission> grantSet = new HashSet<GrantAndPermission>();
for (int i = 1; i <= 3; ++i) {
grantSet.add(new GrantAndPermission(new CanonicalGrantee(Integer.toString(i)), Permission.PERMISSION_READ));
}
}
}
| 1,286 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/CloudTrail.java
|
package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class CloudTrail implements RecordParser {
private static Logger log = LoggerFactory.getLogger(CloudTrail.class);
public static final String TYPE = "cloudtrail";
private final ObjectMapper jsonMapper;
private final String routingKey;
@JsonCreator
public CloudTrail(
@JsonProperty("routingKey") String routingKey,
@JacksonInject ObjectMapper jsonMapper
) {
this.routingKey = routingKey == null ? "cloudtrail" : routingKey;
this.jsonMapper = jsonMapper;
}
@Override
public List<MessageContainer> parse(String data) {
List<MessageContainer> messages = new ArrayList<MessageContainer>();
try {
Map<String, Object> blob = jsonMapper.readValue(data, S3Consumer.typeReference);
List<Map<String, Object>> records = (List<Map<String, Object>>) blob.get("Records");
for (Map<String, Object> record : records) {
messages.add(new DefaultMessageContainer(
new Message(routingKey, jsonMapper.writeValueAsBytes(record)),
jsonMapper));
}
} catch (Exception e) {
log.error("Exception on parsing: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("recordParseError").withTag("parserType", TYPE).build());
}
return messages;
}
}
| 1,287 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/S3Consumer.java
|
package com.netflix.suro.input.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.MessageRouter;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.suro.sink.remotefile.AWSSessionCredentialsAdapter;
import com.netflix.util.Pair;
import org.apache.commons.io.FileUtils;
import org.jets3t.service.Jets3tProperties;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.GZIPInputStream;
public class S3Consumer implements SuroInput {
public static final String TYPE = "s3";
private static Logger log = LoggerFactory.getLogger(S3Consumer.class);
private final String id;
private final String s3Endpoint;
private final long timeout;
private final int concurrentDownload;
private final Notice<String> notice;
private final RecordParser recordParser;
private final String downloadPath;
private AWSCredentialsProvider credentialsProvider;
private RestS3Service s3Service;
private volatile boolean running = false;
private ExecutorService executor;
private Future<?> runner = null;
private final MessageRouter router;
private final ObjectMapper jsonMapper;
@JsonCreator
public S3Consumer(
@JsonProperty("id") String id,
@JsonProperty("s3Endpoint") String s3Endpoint,
@JsonProperty("notice") Notice notice,
@JsonProperty("recvTimeout") long timeout,
@JsonProperty("concurrentDownload") int concurrentDownload,
@JsonProperty("downloadPath") String downloadPath,
@JsonProperty("recordParser") RecordParser recordParser,
@JacksonInject AWSCredentialsProvider credentialProvider,
@JacksonInject MessageRouter router,
@JacksonInject ObjectMapper jsonMapper,
@JacksonInject RestS3Service s3Service
) {
this.id = id;
this.s3Endpoint = s3Endpoint == null ? "s3.amazonaws.com" : s3Endpoint;
this.notice = notice;
this.timeout = timeout == 0 ? 1000 : timeout;
this.concurrentDownload = concurrentDownload == 0 ? 5 : concurrentDownload;
this.recordParser = recordParser;
this.downloadPath = downloadPath == null ? "/logs/suro-s3consumer/" + id : downloadPath;
this.credentialsProvider = credentialProvider;
this.router = router;
this.jsonMapper = jsonMapper;
this.s3Service = s3Service;
Preconditions.checkNotNull(notice, "notice is needed");
Preconditions.checkNotNull(recordParser, "recordParser is needed");
}
@Override
public String getId() {
return id;
}
private static final long MAX_PAUSE = 10000;
@Override
public void start() throws Exception {
if (s3Service == null) {
Jets3tProperties properties = new Jets3tProperties();
properties.setProperty("s3service.s3-endpoint", s3Endpoint);
if (credentialsProvider.getCredentials() instanceof AWSSessionCredentials) {
s3Service = new RestS3Service(
new AWSSessionCredentialsAdapter(credentialsProvider),
null, null, properties);
} else {
s3Service = new RestS3Service(
new AWSCredentials(
credentialsProvider.getCredentials().getAWSAccessKeyId(),
credentialsProvider.getCredentials().getAWSSecretKey()),
null, null, properties);
}
}
executor = new ThreadPoolExecutor(
concurrentDownload + 1,
concurrentDownload + 1,
0, TimeUnit.MILLISECONDS,
new ArrayBlockingQueue<Runnable>(concurrentDownload) {
@Override
public boolean offer(Runnable runnable) {
try {
put(runnable); // not to reject the task, slowing down
} catch (InterruptedException e) {
// do nothing
}
return true;
}
},
new ThreadFactoryBuilder().setDaemon(true).setNameFormat("S3Consumer-" + id + "-%d").build());
notice.init();
running = true;
runner = executor.submit(new Runnable() {
@Override
public void run() {
while (running) {
try {
long pause = Math.min(pausedTime.get(), MAX_PAUSE);
if (pause > 0) {
Thread.sleep(pause);
pausedTime.set(0);
}
Pair<String, String> msg = notice.peek();
if (msg != null) {
executor.submit(createDownloadRunnable(msg));
} else {
Thread.sleep(timeout);
}
} catch (Exception e) {
log.error("Exception on receiving messages from Notice", e);
}
}
}
});
}
@Override
public void shutdown() {
try {
log.info("shutting down S3Consumer now");
running = false;
try {
runner.get();
} catch (InterruptedException e) {
// do nothing
} catch (ExecutionException e) {
log.error("Exception on stopping the task", e);
}
executor.shutdown();
while (true) {
if (!executor.awaitTermination(timeout * 5, TimeUnit.MILLISECONDS)) {
log.warn("downloading jobs were not terminated gracefully, retry again...");
} else {
break;
}
}
s3Service.shutdown();
} catch (Exception e) {
log.error("Exception on shutting down s3Service: " + e.getMessage(), e);
}
}
private AtomicLong pausedTime = new AtomicLong(0);
@Override
public void setPause(long ms) {
pausedTime.addAndGet(ms);
}
public static TypeReference<Map<String, Object>> typeReference = new TypeReference<Map<String, Object>>() {};
private static final int retryCount = 5;
private static final int sleepOnS3Exception = 5000;
private Runnable createDownloadRunnable(final Pair<String, String> msg) {
Map<String, Object> msgMap = null;
try {
msgMap = parseMessage(msg);
} catch (Exception e) {
log.error("Invalid message: " + e.getMessage(), e);
return createEmptyRunnable(msg);
}
String s3Bucket = null;
List<String> s3ObjectKey = null;
try {
s3Bucket = (String) msgMap.get("s3Bucket");
s3ObjectKey = (List<String>) msgMap.get("s3ObjectKey");
if (s3Bucket == null || s3ObjectKey == null) {
throw new NullPointerException("s3Bucket or s3ObjectKey is null");
}
} catch (Exception e) {
log.error("Invalid message: " + e.getMessage(), e);
return createEmptyRunnable(msg);
}
final String s3BucketClone = s3Bucket;
final List<String> s3ObjectKeyClone = s3ObjectKey;
return new Runnable() {
@Override
public void run() {
List<String> downloadedFiles = new ArrayList<String>();
for (String path : s3ObjectKeyClone) {
boolean success = false;
String localFileName = path.replace("/", "");
for (int i = 0; i < retryCount; ++i) {
try {
S3Object object = s3Service.getObject(s3BucketClone, path);
FileUtils.copyInputStreamToFile(object.getDataInputStream(),
new File(downloadPath, localFileName));
success = true;
log.info(path + " downloaded successfully");
break;
} catch (Exception e) {
log.error("Exception on downloading and processing file: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("s3Exception").withTag("consumerId", id).build());
try {
Thread.sleep(sleepOnS3Exception);
} catch (InterruptedException e1) {
// do nothing
}
}
}
if (success) {
downloadedFiles.add(localFileName);
}
}
if (s3ObjectKeyClone.size() == downloadedFiles.size()) {
for (String path : downloadedFiles) {
try {
BufferedReader br = new BufferedReader(
new InputStreamReader(
createInputStream(path)));
String data = null;
while ((data = br.readLine()) != null) {
try {
if (data.trim().length() > 0) {
for (MessageContainer msg : recordParser.parse(data)) {
router.process(S3Consumer.this, msg);
}
}
} catch (Exception e) {
log.error("Exception on parsing and processing: " + e.getMessage(), e);
}
}
br.close();
deleteFile(path);
} catch (Exception e) {
log.error("Exception on processing downloaded file: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("processingException").withTag("consumerId", id).build()
);
}
}
notice.remove(msg.first());
}
}
};
}
private void deleteFile(String path) {
File f = new File(downloadPath, path);
while (f.exists()) {
f.delete();
}
}
@VisibleForTesting
protected Map<String, Object> parseMessage(Pair<String, String> msg) throws IOException {
Map<String, Object> msgContainer = jsonMapper.readValue(msg.second(), typeReference);
if (!(msgContainer.get("Message") instanceof Map)) {
return jsonMapper.readValue(msgContainer.get("Message").toString(), typeReference);
} else {
return (Map<String, Object>) msgContainer.get("Message");
}
}
private InputStream createInputStream(String path) throws IOException {
if (path.endsWith(".gz")) {
return new GZIPInputStream(
new FileInputStream(new File(downloadPath, path)));
} else {
return new FileInputStream(new File(downloadPath, path));
}
}
private Runnable createEmptyRunnable(final Pair<String, String> msg) {
return new Runnable() {
@Override
public void run() {
log.error("invalid msg: " + msg.second());
}
};
}
@Override
public boolean equals(Object o) {
if (o instanceof S3Consumer) {
S3Consumer kafkaConsumer = (S3Consumer) o;
return kafkaConsumer.id.equals(id);
} else {
return false;
}
}
@Override
public int hashCode() {
return (getId()).hashCode();
}
}
| 1,288 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/JsonLine.java
|
package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class JsonLine implements RecordParser {
private static Logger log = LoggerFactory.getLogger(JsonLine.class);
public static final String TYPE = "jsonline";
private final String routingKeyField;
private final String routingKey;
private final ObjectMapper jsonMapper;
@JsonCreator
public JsonLine(
@JsonProperty("routingKey") String routingKey,
@JsonProperty("routingKeyField") String routingKeyField,
@JacksonInject ObjectMapper jsonMapper
) {
this.routingKey = routingKey;
this.routingKeyField = routingKeyField;
this.jsonMapper = jsonMapper;
}
@Override
public List<MessageContainer> parse(String data) {
if (routingKey != null) {
return new ImmutableList.Builder<MessageContainer>()
.add(new DefaultMessageContainer(
new Message(routingKey, data.getBytes()),
jsonMapper))
.build();
} else {
try {
Map<String, Object> record = jsonMapper.readValue(data, S3Consumer.typeReference);
String routingKeyOnRecord = record.get(routingKeyField).toString();
if (Strings.isNullOrEmpty(routingKeyOnRecord)) {
routingKeyOnRecord = routingKey;
}
if (!Strings.isNullOrEmpty(routingKeyOnRecord)) {
return new ImmutableList.Builder<MessageContainer>()
.add(new DefaultMessageContainer(
new Message(routingKeyOnRecord, data.getBytes()),
jsonMapper))
.build();
} else {
return new ArrayList<MessageContainer>();
}
} catch (IOException e) {
log.error("Exception on parsing: " + e.getMessage(), e);
return new ArrayList<MessageContainer>();
}
}
}
}
| 1,289 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/S3FileSink.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.suro.sink.localfile.FileNameFormatter;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.suro.sink.notice.QueueNotice;
import org.codehaus.jettison.json.JSONObject;
import org.jets3t.service.Jets3tProperties;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.acl.gs.GSAccessControlList;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
import org.jets3t.service.utils.MultipartUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
/**
* Sink for S3. Ths embeds local file sink. When local file sink rotates
* the file, the file is uploaded to S3.
*
* @author jbae
*/
public class S3FileSink extends RemoteFileSink {
public static final String TYPE = "s3";
private static Logger log = LoggerFactory.getLogger(S3FileSink.class);
private final String bucket;
private final String s3Endpoint;
private final long maxPartSize;
private final Notice<String> notice;
private MultipartUtils mpUtils;
private AWSCredentialsProvider credentialsProvider;
private RestS3Service s3Service;
@VisibleForTesting
protected GrantAcl grantAcl;
private final String s3Acl;
private final int s3AclRetries;
@JsonCreator
public S3FileSink(
@JsonProperty("localFileSink") LocalFileSink localFileSink,
@JsonProperty("bucket") String bucket,
@JsonProperty("s3Endpoint") String s3Endpoint,
@JsonProperty("maxPartSize") long maxPartSize,
@JsonProperty("concurrentUpload") int concurrentUpload,
@JsonProperty("notice") Notice notice,
@JsonProperty("prefixFormatter") RemotePrefixFormatter prefixFormatter,
@JsonProperty("batchUpload") boolean batchUpload,
@JsonProperty("s3Acl") String s3Acl,
@JsonProperty("s3AclRetries") int s3AclRetries,
@JacksonInject MultipartUtils mpUtils,
@JacksonInject AWSCredentialsProvider credentialProvider) {
super(localFileSink, prefixFormatter, concurrentUpload, batchUpload);
this.bucket = bucket;
this.s3Endpoint = s3Endpoint == null ? "s3.amazonaws.com" : s3Endpoint;
this.maxPartSize = maxPartSize == 0 ? 20 * 1024 * 1024 : maxPartSize;
this.notice = notice == null ? new QueueNotice<String>() : notice;
this.mpUtils = mpUtils;
this.credentialsProvider = credentialProvider;
this.s3Acl = s3Acl;
this.s3AclRetries = s3AclRetries > 0 ? s3AclRetries : 5;
Preconditions.checkNotNull(bucket, "bucket is needed");
}
protected void initialize() {
if (mpUtils == null) { // not injected
mpUtils = new MultipartUtils(maxPartSize);
}
Jets3tProperties properties = new Jets3tProperties();
properties.setProperty("s3service.s3-endpoint", s3Endpoint);
if (credentialsProvider.getCredentials() instanceof AWSSessionCredentials) {
s3Service = new RestS3Service(
new AWSSessionCredentialsAdapter(credentialsProvider),
null, null, properties);
} else {
s3Service = new RestS3Service(
new AWSCredentials(
credentialsProvider.getCredentials().getAWSAccessKeyId(),
credentialsProvider.getCredentials().getAWSSecretKey()),
null, null, properties);
}
grantAcl = new GrantAcl(s3Service, s3Acl, s3AclRetries == 0 ? 5 : s3AclRetries);
notice.init();
}
@Override
public String recvNotice() {
return notice.recv();
}
@Override
public long checkPause() {
return localFileSink.checkPause();
}
@Monitor(name="fail_grantAcl", type=DataSourceType.COUNTER)
private AtomicLong fail_grantAcl = new AtomicLong(0);
public long getFail_grantAcl() { return fail_grantAcl.get(); }
@Override
protected void notify(String filePath, long fileSize) throws Exception {
JSONObject jsonMessage = new JSONObject();
jsonMessage.put("bucket", bucket);
jsonMessage.put("filePath", filePath);
jsonMessage.put("size", fileSize);
jsonMessage.put("collector", FileNameFormatter.localHostAddr);
if (!notice.send(jsonMessage.toString())) {
throw new RuntimeException("Notice failed");
}
}
@Override
protected void upload(String localFilePath, String remoteFilePath) throws Exception {
S3Object file = new S3Object(new File(localFilePath));
file.setBucketName(bucket);
file.setKey(remoteFilePath);
file.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
List objectsToUploadAsMultipart = new ArrayList();
objectsToUploadAsMultipart.add(file);
mpUtils.uploadObjects(bucket, s3Service, objectsToUploadAsMultipart, null);
if (!grantAcl.grantAcl(file)) {
throw new RuntimeException("Failed to set Acl");
}
}
}
| 1,290 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/RemoteFileSink.java
|
package com.netflix.suro.sink.remotefile;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.remotefile.formatter.DynamicRemotePrefixFormatter;
import org.joda.time.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
public abstract class RemoteFileSink implements Sink {
private static final Logger log = LoggerFactory.getLogger(RemoteFileSink.class);
protected final LocalFileSink localFileSink;
private final RemotePrefixFormatter prefixFormatter;
private final ExecutorService uploader;
private final ExecutorService localFilePoller;
private ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final boolean batchUpload;
private boolean running = false;
private static final int processingFileQueueThreshold = 1000;
private static final String processingFileQueueCleanupInterval = "PT60s";
private Set<String> processingFileSet = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
private BlockingQueue<String> processedFileQueue = new LinkedBlockingQueue<String>();
public RemoteFileSink(
LocalFileSink localFileSink,
RemotePrefixFormatter prefixFormatter,
int concurrentUpload,
boolean batchUpload) {
this.localFileSink = localFileSink;
this.prefixFormatter = prefixFormatter == null ? new DynamicRemotePrefixFormatter("date(yyyyMMdd)") : prefixFormatter;
this.batchUpload = batchUpload;
Preconditions.checkNotNull(localFileSink, "localFileSink is needed");
uploader = Executors.newFixedThreadPool(concurrentUpload == 0 ? 5 : concurrentUpload);
localFilePoller = Executors.newSingleThreadExecutor();
if (!batchUpload) {
localFileSink.cleanUp(false);
}
Monitors.registerObject(
this.getClass().getSimpleName() + '-' + localFileSink.getOutputDir().replace('/', '_'),
this);
}
@Override
public void writeTo(MessageContainer message) {
localFileSink.writeTo(message);
}
@Override
public void open() {
initialize();
if (!batchUpload) {
running = true;
localFilePoller.submit(new Runnable() {
@Override
public void run() {
while (running) {
uploadAllFromQueue();
localFileSink.cleanUp(false);
}
uploadAllFromQueue();
}
});
localFileSink.open();
int schedulingSecond = new Period(processingFileQueueCleanupInterval).toStandardSeconds().getSeconds();
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (processingFileSet.size() > processingFileQueueThreshold) {
String file = null;
int count = 0;
while (processingFileSet.size() > processingFileQueueThreshold &&
(file = processedFileQueue.poll()) != null) {
processingFileSet.remove(file);
++count;
}
log.info(count + " files are removed from processingFileSet");
}
}
}, schedulingSecond, schedulingSecond, TimeUnit.SECONDS);
}
}
@Override
public void close() {
try {
if (!batchUpload) {
localFileSink.close();
running = false;
localFilePoller.shutdown();
localFilePoller.awaitTermination(60000, TimeUnit.MILLISECONDS);
}
uploader.shutdown();
uploader.awaitTermination(60000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
// ignore exceptions while closing
log.error("Exception while closing: " + e.getMessage(), e);
}
}
@Override
public String getStat() {
StringBuilder sb = new StringBuilder(localFileSink.getStat());
sb.append('\n').append(String.format("%d files uploaded so far", uploadedFileCount.get()));
return sb.toString();
}
public void uploadAll(String dir) {
clearFileHistory();
while (localFileSink.cleanUp(dir, true) > 0) {
uploadAllFromQueue();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// do nothing
}
}
}
private void clearFileHistory() {
processedFileQueue.clear();
processingFileSet.clear();
}
private void uploadAllFromQueue() {
String note = localFileSink.recvNotice();
while (note != null) {
uploadFile(note);
note = localFileSink.recvNotice();
}
}
private void uploadFile(final String filePath) {
// to prevent multiple uploading in any situations
final String key = filePath.substring(filePath.lastIndexOf("/"));
if (processingFileSet.contains(key)) {
return;
}
processingFileSet.add(key);
uploader.submit(new Runnable() {
@Override
public void run() {
try {
File localFile = new File(filePath);
long fileLength = localFile.length();
if (fileLength == 0) {
log.warn("empty file: " + filePath + " is abandoned");
localFileSink.deleteFile(filePath);
return;
}
String remoteFilePath = makeUploadPath(localFile);
long t1 = System.currentTimeMillis();
upload(filePath, remoteFilePath);
long t2 = System.currentTimeMillis();
log.info("upload duration: " + (t2 - t1) + " ms " +
"for " + filePath + " Len: " + fileLength + " bytes");
uploadedFileSize.addAndGet(fileLength);
uploadedFileCount.incrementAndGet();
uploadDuration = t2 - t1;
RemoteFileSink.this.notify(remoteFilePath, fileLength);
localFileSink.deleteFile(filePath);
log.info("upload done deleting from local: " + filePath);
} catch (Exception e) {
uploadFailureCount.incrementAndGet();
log.error("Exception while uploading: " + e.getMessage(), e);
} finally {
// check the file was deleted or not
if (new File(filePath).exists()) {
// something error happened
// it should be done again
processingFileSet.remove(key);
} else {
processedFileQueue.add(key);
}
}
}
});
}
private String makeUploadPath(File file) {
return prefixFormatter.get() + file.getName();
}
@Monitor(name = "uploadedFileSize", type = DataSourceType.COUNTER)
public long getUploadedFileSize() {
return uploadedFileSize.get();
}
@Monitor(name = "uploadDuration", type = DataSourceType.GAUGE)
private long uploadDuration;
@Monitor(name = "uploadedFileCount", type = DataSourceType.COUNTER)
public int getUploadedFileCount() {
return uploadedFileCount.get();
}
@Monitor(name = "uploadFailureCount", type=DataSourceType.COUNTER)
public int getUploadFailureCount() {
return uploadFailureCount.get();
}
private AtomicLong uploadedFileSize = new AtomicLong(0);
private AtomicInteger uploadedFileCount = new AtomicInteger(0);
private AtomicInteger uploadFailureCount = new AtomicInteger(0);
abstract void initialize();
abstract void upload(String localFilePath, String remoteFilePath) throws Exception;
abstract void notify(String filePath, long fileSize) throws Exception;
@Override
public long getNumOfPendingMessages() {
long numMessages = localFileSink.getNumOfPendingMessages();
if (numMessages == 0) {
return localFileSink.cleanUp(true);
} else {
return numMessages;
}
}
}
| 1,291 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/AWSSessionCredentialsAdapter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import org.jets3t.service.security.AWSSessionCredentials;
/**
* AWSCredentialsProvider wrapper for jets3t library
*
* @author jbae
*/
public class AWSSessionCredentialsAdapter extends AWSSessionCredentials {
private final AWSCredentialsProvider provider;
public AWSSessionCredentialsAdapter(AWSCredentialsProvider provider) {
super(null, null, null);
if(provider.getCredentials() instanceof com.amazonaws.auth.AWSSessionCredentials)
this.provider = provider;
else
throw new IllegalArgumentException("provider does not contain session credentials");
}
@Override
protected String getTypeName() {
return "AWSSessionCredentialsAdapter";
}
@Override
public String getVersionPrefix() {
return "Netflix AWSSessionCredentialsAdapter, version: ";
}
@Override
public String getAccessKey() {
return provider.getCredentials().getAWSAccessKeyId();
}
@Override
public String getSecretKey() {
return provider.getCredentials().getAWSSecretKey();
}
public String getSessionToken() {
com.amazonaws.auth.AWSSessionCredentials sessionCredentials =
(com.amazonaws.auth.AWSSessionCredentials) provider.getCredentials();
return sessionCredentials.getSessionToken();
}
}
| 1,292 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/GrantAcl.java
|
package com.netflix.suro.sink.remotefile;
import com.google.common.base.Strings;
import org.jets3t.service.ServiceException;
import org.jets3t.service.acl.AccessControlList;
import org.jets3t.service.acl.CanonicalGrantee;
import org.jets3t.service.acl.Permission;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class that grants access to S3 bucket to an AWS account. We can use this when uploading files to S3 on behalf of
* given AWS account ID.
*
* @author jbae
*/
public class GrantAcl {
private static final Logger log = LoggerFactory.getLogger(GrantAcl.class);
private final RestS3Service s3Service;
private final String s3Acl;
private final int s3AclRetries;
public GrantAcl(RestS3Service s3Service, String s3Acl, int s3AclRetries) {
this.s3Service = s3Service;
this.s3Acl = s3Acl;
this.s3AclRetries = s3AclRetries;
}
public boolean grantAcl(S3Object object) throws ServiceException, InterruptedException {
if(Strings.isNullOrEmpty(s3Acl)){
return true;
}
for (int i = 0; i < s3AclRetries; ++i) {
try {
AccessControlList acl = s3Service.getObjectAcl(object.getBucketName(), object.getKey());
for (String id : s3Acl.split(",")) {
acl.grantPermission(new CanonicalGrantee(id), Permission.PERMISSION_READ);
}
s3Service.putObjectAcl(object.getBucketName(), object.getKey(), acl);
return true;
} catch (Exception e) {
log.error("Exception while granting ACL: " + e.getMessage(), e);
Thread.sleep(1000 * (i + 1));
}
}
return false;
}
}
| 1,293 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/HdfsFileSink.java
|
package com.netflix.suro.sink.remotefile;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.netflix.suro.sink.localfile.FileNameFormatter;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.Notice;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.codehaus.jettison.json.JSONObject;
import java.util.Properties;
public class HdfsFileSink extends RemoteFileSink {
public static final String TYPE = "hdfs";
private final String directory;
private final Notice<String> notice;
private final Configuration hadoopConf;
@JsonCreator
public HdfsFileSink(
@JsonProperty("localFileSink") LocalFileSink localFileSink,
@JsonProperty("directory") String directory,
@JsonProperty("concurrentUpload") int concurrentUpload,
@JsonProperty("notice") Notice notice,
@JsonProperty("prefixFormatter") RemotePrefixFormatter prefixFormatter,
@JsonProperty("batchUpload") boolean batchUpload,
@JsonProperty("properties") Properties properties
) {
super(localFileSink, prefixFormatter, concurrentUpload, batchUpload);
this.directory = directory;
this.notice = notice;
hadoopConf = new Configuration();
if (properties != null) {
for (String propertyName : properties.stringPropertyNames()) {
hadoopConf.set(propertyName, properties.getProperty(propertyName));
}
}
Preconditions.checkNotNull(directory, "directory is needed");
}
@Override
public String recvNotice() {
return notice.recv();
}
@Override
public long checkPause() {
return localFileSink.checkPause();
}
@Override
void initialize() {
// do nothing
}
@Override
void upload(String localFilePath, String remoteFilePath) throws Exception {
Path outFile = new Path(String.format("%s/%s", directory, remoteFilePath));
FileSystem fs = outFile.getFileSystem(hadoopConf);
fs.mkdirs(outFile.getParent());
fs.moveFromLocalFile(new Path(localFilePath), outFile);
}
@Override
void notify(String filePath, long fileSize) throws Exception {
JSONObject jsonMessage = new JSONObject();
jsonMessage.put("directory", directory);
jsonMessage.put("filePath", filePath);
jsonMessage.put("size", fileSize);
jsonMessage.put("collector", FileNameFormatter.localHostAddr);
if (!notice.send(jsonMessage.toString())) {
throw new RuntimeException("Notice failed");
}
}
}
| 1,294 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DateRegionStackFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile.formatter;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.sink.remotefile.RemotePrefixFormatter;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
/**
* It would be useful to append region and stack information to the file path
* when we upload files to AWS S3. region and stack can be injected through
* Jackson {@link com.fasterxml.jackson.databind.ObjectMapper}.
*
* @author jbae
*/
public class DateRegionStackFormatter implements RemotePrefixFormatter {
public static final String TYPE = "DateRegionStack";
private final DateTimeFormatter format;
private String region;
private String stack;
@JsonCreator
public DateRegionStackFormatter(
@JsonProperty("date") String dateFormat,
@JsonProperty("region") @JacksonInject("region") String region,
@JsonProperty("stack") @JacksonInject("stack") String stack) {
this.format = DateTimeFormat.forPattern(dateFormat);
this.region = region;
this.stack = stack;
}
@Override
public String get() {
StringBuilder sb = new StringBuilder();
sb.append(format.print(new DateTime())).append('/')
.append(region).append('/')
.append(stack).append('/');
return sb.toString();
}
}
| 1,295 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DatePrefixFormatter.java
|
package com.netflix.suro.sink.remotefile.formatter;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
public class DatePrefixFormatter implements PrefixFormatter {
private final DateTimeFormatter formatter;
public DatePrefixFormatter(String formatString) {
this.formatter = DateTimeFormat.forPattern(formatString);
}
@Override
public String format() {
return formatter.print(new DateTime());
}
}
| 1,296 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DynamicRemotePrefixFormatter.java
|
package com.netflix.suro.sink.remotefile.formatter;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.sink.remotefile.RemotePrefixFormatter;
import java.util.ArrayList;
import java.util.List;
public class DynamicRemotePrefixFormatter implements RemotePrefixFormatter {
public static final String TYPE = "dynamic";
private final List<PrefixFormatter> formatterList = new ArrayList<PrefixFormatter>();
@JsonCreator
public DynamicRemotePrefixFormatter(@JsonProperty("format") String formatString) {
String[] formatList = formatString.split(";");
for (String format : formatList) {
formatterList.add(createFormatter(format));
}
}
@Override
public String get() {
StringBuilder sb = new StringBuilder();
for (PrefixFormatter formatter : formatterList) {
sb.append(formatter.format()).append('/');
}
return sb.toString();
}
public static PrefixFormatter createFormatter(String formatString) {
int startBracket = formatString.indexOf('(');
int endBracket = formatString.lastIndexOf(')');
String name = formatString.substring(0, startBracket);
String param = formatString.substring(startBracket + 1, endBracket);
if (name.equals("date")) {
return new DatePrefixFormatter(param);
} else if (name.equals("static")) {
return new StaticPrefixFormatter(param);
} else if (name.equals("property")) {
return new PropertyPrefixFormatter(param);
} else {
throw new IllegalArgumentException(name + " cannot be supported");
}
}
}
| 1,297 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/StaticPrefixFormatter.java
|
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile.formatter;
/**
* Append static prefix for the remote file path
*
* @author jbae
*/
public class StaticPrefixFormatter implements PrefixFormatter {
private final String formatString;
public StaticPrefixFormatter(String formatString) {
this.formatString = formatString;
}
@Override
public String format() {
return formatString;
}
}
| 1,298 |
0 |
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile
|
Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/PropertyPrefixFormatter.java
|
package com.netflix.suro.sink.remotefile.formatter;
import com.netflix.config.ConfigurationManager;
public class PropertyPrefixFormatter implements PrefixFormatter {
private final String propertyName;
public PropertyPrefixFormatter(String propertyName) {
this.propertyName = propertyName;
}
@Override
public String format() {
return ConfigurationManager.getConfigInstance().getProperty(propertyName).toString();
}
}
| 1,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.