index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io/jdbc/IdentifiableName.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.io.jdbc; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.lib.db.DBWritable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; public class IdentifiableName implements DBWritable, Writable { public IntWritable id = new IntWritable(); public Text name = new Text(); @Override public void readFields(DataInput in) throws IOException { id.readFields(in); name.readFields(in); } @Override public void write(DataOutput out) throws IOException { id.write(out); name.write(out); } @Override public void readFields(ResultSet resultSet) throws SQLException { id.set(resultSet.getInt(1)); name.set(resultSet.getString(2)); } @Override public void write(PreparedStatement preparedStatement) throws SQLException { throw new UnsupportedOperationException("Not implemented"); } }
2,300
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io/jdbc/DataBaseSource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.io.jdbc; import java.sql.Driver; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.impl.FileSourceImpl; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.lib.db.DBConfiguration; import org.apache.hadoop.mapreduce.lib.db.DBInputFormat; import org.apache.hadoop.mapreduce.lib.db.DBWritable; /** * Source from reading from a database via a JDBC connection. Underlying * database reading is provided by {@link DBInputFormat}. * <p> * A type that is input via this class must be a Writable that also implements * DBWritable. On the {@link DBWritable#readFields(java.sql.ResultSet)} method * needs to be fully implemented form {@link DBWritable}. * * @param <T> The input type of this source */ public class DataBaseSource<T extends DBWritable & Writable> extends FileSourceImpl<T> { private DataBaseSource(Class<T> inputClass, String driverClassName, String url, String username, String password, String selectClause, String countClause) { super( new Path("dbsource"), Writables.writables(inputClass), FormatBundle.forInput(DBInputFormat.class) .set(DBConfiguration.DRIVER_CLASS_PROPERTY, driverClassName) .set(DBConfiguration.URL_PROPERTY, url) .set(DBConfiguration.USERNAME_PROPERTY, username) .set(DBConfiguration.PASSWORD_PROPERTY, password) .set(DBConfiguration.INPUT_CLASS_PROPERTY, inputClass.getCanonicalName()) .set(DBConfiguration.INPUT_QUERY, selectClause) .set(DBConfiguration.INPUT_COUNT_QUERY, countClause)); } public static class Builder<T extends DBWritable & Writable> { private Class<T> inputClass; private String driverClass; private String url; private String username; private String password; private String selectClause; public String countClause; public Builder(Class<T> inputClass) { this.inputClass = inputClass; } public Builder<T> setDriverClass(Class<? extends Driver> driverClass) { this.driverClass = driverClass.getName(); return this; } public Builder<T> setUrl(String url) { this.url = url; return this; } public Builder<T> setUsername(String username) { this.username = username; return this; } public Builder<T> setPassword(String password) { this.password = password; return this; } public Builder<T> selectSQLQuery(String selectClause) { this.selectClause = selectClause; return this; } public Builder<T> countSQLQuery(String countClause) { this.countClause = countClause; return this; } public DataBaseSource<T> build() { return new DataBaseSource<T>( inputClass, driverClass, url, username, password, selectClause, countClause); } } @Override public long getSize(Configuration configuration) { // TODO Do something smarter here return 1000 * 1000; } @Override public long getLastModifiedAt(Configuration configuration) { return -1; } }
2,301
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/io/jdbc/package-info.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Support for reading data from RDBMS using JDBC */ package org.apache.crunch.contrib.io.jdbc;
2,302
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/bloomfilter/BloomFilterFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.bloomfilter; import java.io.IOException; import java.util.Map; import org.apache.crunch.Aggregator; import org.apache.crunch.PCollection; import org.apache.crunch.PObject; import org.apache.crunch.PTable; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.materialize.pobject.FirstElementPObject; import org.apache.crunch.types.PTypeFamily; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.bloom.BloomFilter; import com.google.common.collect.ImmutableList; /** * Factory Class for creating BloomFilters. The APIs require a * {@link BloomFilterFn} which is responsible for generating keys of the filter. */ public class BloomFilterFactory { /** * The method will take an input path and generates BloomFilters for all text * files in that path. The method return back a {@link PObject} containing a * {@link Map} having file names as keys and filters as values */ public static PObject<Map<String, BloomFilter>> createFilter(Path inputPath, BloomFilterFn<String> filterFn) throws IOException { MRPipeline pipeline = new MRPipeline(BloomFilterFactory.class); FileStatus[] listStatus = FileSystem.get(pipeline.getConfiguration()).listStatus(inputPath); PTable<String, BloomFilter> filterTable = null; for (FileStatus fileStatus : listStatus) { Path path = fileStatus.getPath(); PCollection<String> readTextFile = pipeline.readTextFile(path.toString()); pipeline.getConfiguration().set(BloomFilterFn.CRUNCH_FILTER_NAME, path.getName()); PTable<String, BloomFilter> currentTable = createFilterTable(readTextFile, filterFn); if (filterTable != null) { filterTable = filterTable.union(currentTable); } else { filterTable = currentTable; } } return filterTable.asMap(); } public static <T> PObject<BloomFilter> createFilter(PCollection<T> collection, BloomFilterFn<T> filterFn) { collection.getPipeline().getConfiguration().set(BloomFilterFn.CRUNCH_FILTER_NAME, collection.getName()); return new FirstElementPObject<BloomFilter>(createFilterTable(collection, filterFn).values()); } private static <T> PTable<String, BloomFilter> createFilterTable(PCollection<T> collection, BloomFilterFn<T> filterFn) { PTypeFamily tf = collection.getTypeFamily(); PTable<String, BloomFilter> table = collection.parallelDo(filterFn, tf.tableOf(tf.strings(), Writables.writables(BloomFilter.class))); return table.groupByKey(1).combineValues(new BloomFilterAggregator()); } @SuppressWarnings("serial") private static class BloomFilterAggregator implements Aggregator<BloomFilter> { private transient BloomFilter bloomFilter = null; private transient int filterSize; @Override public void update(BloomFilter value) { bloomFilter.or(value); } @Override public Iterable<BloomFilter> results() { return ImmutableList.of(bloomFilter); } @Override public void initialize(Configuration configuration) { filterSize = BloomFilterFn.getBloomFilterSize(configuration); } @Override public void reset() { bloomFilter = BloomFilterFn.initializeFilter(filterSize); } } }
2,303
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/bloomfilter/BloomFilterFn.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.bloomfilter; import java.util.Collection; import org.apache.commons.collections.CollectionUtils; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.crunch.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.bloom.BloomFilter; import org.apache.hadoop.util.bloom.Key; import org.apache.hadoop.util.hash.Hash; /** * The class is responsible for generating keys that are used in a BloomFilter */ @SuppressWarnings("serial") public abstract class BloomFilterFn<S> extends DoFn<S, Pair<String, BloomFilter>> { public static final String CRUNCH_FILTER_SIZE = "crunch.filter.size"; public static final String CRUNCH_FILTER_NAME = "crunch.filter.name"; private transient BloomFilter bloomFilter = null; @Override public void initialize() { super.initialize(); bloomFilter = initializeFilter(getBloomFilterSize(getConfiguration())); } @Override public void process(S input, Emitter<Pair<String, BloomFilter>> emitter) { Collection<Key> keys = generateKeys(input); if (CollectionUtils.isNotEmpty(keys)) { bloomFilter.add(keys); } } public abstract Collection<Key> generateKeys(S input); @Override public void cleanup(Emitter<Pair<String, BloomFilter>> emitter) { String filterName = getConfiguration().get(CRUNCH_FILTER_NAME); emitter.emit(Pair.of(filterName, bloomFilter)); } static BloomFilter initializeFilter(int size) { return new BloomFilter(size, 5, Hash.MURMUR_HASH); } static int getBloomFilterSize(Configuration configuration) { return configuration.getInt(CRUNCH_FILTER_SIZE, 1024); } }
2,304
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/bloomfilter/package-info.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Support for creating Bloom Filters. * * Bloom Filters are space and time efficient data structures supported by * Hadoop. This package provides support for creating them in Crunch. */ package org.apache.crunch.contrib.bloomfilter;
2,305
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/TokenizerFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; import java.io.Serializable; import java.util.Locale; import java.util.Scanner; import java.util.Set; /** * Factory class that constructs {@link Tokenizer} instances for input strings that use a fixed * set of delimiters, skip patterns, locales, and sets of indices to keep or drop. */ public class TokenizerFactory implements Serializable { private static TokenizerFactory DEFAULT_INSTANCE = new TokenizerFactory(null, null, null, ImmutableSet.<Integer>of(), true); private final String delim; private final String skip; private final Locale locale; private final Set<Integer> indices; private final boolean keep; /** * Returns a default {@code TokenizerFactory} that uses whitespace as a delimiter and does * not skip any input fields. * @return The default {@code TokenizerFactory} */ public static TokenizerFactory getDefaultInstance() { return DEFAULT_INSTANCE; } private TokenizerFactory(String delim, String skip, Locale locale, Set<Integer> indices, boolean keep) { this.delim = delim; this.skip = skip; this.locale = locale; this.indices = indices; this.keep = keep; } /** * Return a {@code Scanner} instance that wraps the input string and uses the delimiter, * skip, and locale settings for this {@code TokenizerFactory} instance. * * @param input The input string * @return A new {@code Scanner} instance with appropriate settings */ public Tokenizer create(String input) { Scanner s = new Scanner(input); s.useLocale(Locale.US); // Use period for floating point number formatting if (delim != null) { s.useDelimiter(delim); } if (skip != null) { s.skip(skip); } if (locale != null) { s.useLocale(locale); } return new Tokenizer(s, indices, keep); } /** * Factory method for creating a {@code TokenizerFactory.Builder} instance. * @return A new {@code TokenizerFactory.Builder} */ public static Builder builder() { return new Builder(); } /** * A class for constructing new {@code TokenizerFactory} instances using the Builder pattern. */ public static class Builder { private String delim; private String skip; private Locale locale; private Set<Integer> indices = ImmutableSet.of(); private boolean keep; /** * Sets the delimiter used by the {@code TokenizerFactory} instances constructed by * this instance. * @param delim The delimiter to use, which may be a regular expression * @return This {@code Builder} instance */ public Builder delimiter(String delim) { this.delim = delim; return this; } /** * Sets the regular expression that determines which input characters should be * ignored by the {@code Scanner} that is returned by the constructed * {@code TokenizerFactory}. * * @param skip The regular expression of input values to ignore * @return This {@code Builder} instance */ public Builder skip(String skip) { this.skip = skip; return this; } /** * Sets the {@code Locale} to use with the {@code TokenizerFactory} returned by * this {@code Builder} instance. * * @param locale The locale to use * @return This {@code Builder} instance */ public Builder locale(Locale locale) { this.locale = locale; return this; } /** * Keep only the specified fields found by the input scanner, counting from * zero. * * @param indices The indices to keep * @return This {@code Builder} instance */ public Builder keep(Integer... indices) { Preconditions.checkArgument(this.indices.isEmpty(), "Cannot set keep indices more than once"); this.indices = ImmutableSet.copyOf(indices); this.keep = true; return this; } /** * Drop the specified fields found by the input scanner, counting from zero. * * @param indices The indices to drop * @return This {@code Builder} instance */ public Builder drop(Integer... indices) { Preconditions.checkArgument(this.indices.isEmpty(), "Cannot set drop indices more than once"); this.indices = ImmutableSet.copyOf(indices); this.keep = false; return this; } /** * Returns a new {@code TokenizerFactory} with settings determined by this * {@code Builder} instance. * @return A new {@code TokenizerFactory} */ public TokenizerFactory build() { return new TokenizerFactory(delim, skip, locale, indices, keep); } } }
2,306
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/ExtractorStats.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.util.List; import com.google.common.collect.ImmutableList; /** * Records the number of kind of errors that an {@code Extractor} encountered when parsing * input data. */ public class ExtractorStats { private final int errorCount; private final List<Integer> fieldErrors; public ExtractorStats(int errorCount) { this(errorCount, ImmutableList.<Integer>of()); } public ExtractorStats(int errorCount, List<Integer> fieldErrors) { this.errorCount = errorCount; this.fieldErrors = fieldErrors; } /** * The overall number of records that had some kind of parsing error. * @return The overall number of records that had some kind of parsing error */ public int getErrorCount() { return errorCount; } /** * Returns the number of errors that occurred when parsing the individual fields of * a composite record type, like a {@code Pair} or {@code TupleN}. * @return The number of errors that occurred when parsing the individual fields of * a composite record type */ public List<Integer> getFieldErrors() { return fieldErrors; } }
2,307
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/Tokenizer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.util.Scanner; import java.util.Set; /** * Manages a {@link Scanner} instance and provides support for returning only a subset * of the fields returned by the underlying {@code Scanner}. */ public class Tokenizer { private final Scanner scanner; private final Set<Integer> indices; private final boolean keep; private int current; /** * Create a new {@code Tokenizer} instance. * * @param scanner The scanner to manage * @param indices The indices to keep/drop * @param keep Whether the indices should be kept (true) or dropped (false) */ public Tokenizer(Scanner scanner, Set<Integer> indices, boolean keep) { this.scanner = scanner; this.indices = checkIndices(indices); this.keep = keep; this.current = -1; } private static Set<Integer> checkIndices(Set<Integer> indices) { for (Integer index : indices) { if (index < 0) { throw new IllegalArgumentException("All tokenizer indices must be non-negative"); } } return indices; } private void advance() { if (indices.isEmpty()) { return; } current++; while (scanner.hasNext() && (keep && !indices.contains(current)) || (!keep && indices.contains(current))) { scanner.next(); current++; } } /** * Returns true if the underlying {@code Scanner} has any tokens remaining. */ public boolean hasNext() { return scanner.hasNext(); } /** * Advance this {@code Tokenizer} and return the next String from the {@code Scanner}. * * @return The next String from the {@code Scanner} */ public String next() { advance(); return scanner.next(); } /** * Advance this {@code Tokenizer} and return the next Long from the {@code Scanner}. * * @return The next Long from the {@code Scanner} */ public Long nextLong() { advance(); return scanner.nextLong(); } /** * Advance this {@code Tokenizer} and return the next Boolean from the {@code Scanner}. * * @return The next Boolean from the {@code Scanner} */ public Boolean nextBoolean() { advance(); return scanner.nextBoolean(); } /** * Advance this {@code Tokenizer} and return the next Double from the {@code Scanner}. * * @return The next Double from the {@code Scanner} */ public Double nextDouble() { advance(); return scanner.nextDouble(); } /** * Advance this {@code Tokenizer} and return the next Float from the {@code Scanner}. * * @return The next Float from the {@code Scanner} */ public Float nextFloat() { advance(); return scanner.nextFloat(); } /** * Advance this {@code Tokenizer} and return the next Integer from the {@code Scanner}. * * @return The next Integer from the {@code Scanner} */ public Integer nextInt() { advance(); return scanner.nextInt(); } }
2,308
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/AbstractSimpleExtractor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for the common case {@code Extractor} instances that construct a single * object from a block of text stored in a {@code String}, with support for error handling * and reporting. */ public abstract class AbstractSimpleExtractor<T> implements Extractor<T> { private static final Logger LOG = LoggerFactory.getLogger(AbstractSimpleExtractor.class); private static final int LOG_ERROR_LIMIT = 100; private int errors; private boolean errorOnLast; private final T defaultValue; private final TokenizerFactory scannerFactory; protected AbstractSimpleExtractor(T defaultValue) { this(defaultValue, TokenizerFactory.getDefaultInstance()); } protected AbstractSimpleExtractor(T defaultValue, TokenizerFactory scannerFactory) { this.defaultValue = defaultValue; this.scannerFactory = scannerFactory; } @Override public void initialize() { this.errors = 0; this.errorOnLast = false; } @Override public T extract(String input) { errorOnLast = false; T res = defaultValue; try { res = doExtract(scannerFactory.create(input)); } catch (Exception e) { errorOnLast = true; errors++; if (errors < LOG_ERROR_LIMIT) { LOG.error("Error occurred parsing input '{}' using extractor {}", input, this); } } return res; } @Override public boolean errorOnLastRecord() { return errorOnLast; } @Override public T getDefaultValue() { return defaultValue; } @Override public ExtractorStats getStats() { return new ExtractorStats(errors); } /** * Subclasses must override this method to return a new instance of the * class that this {@code Extractor} instance is designed to parse. * <p>Any runtime parsing exceptions from the given {@code Tokenizer} instance * should be thrown so that they may be caught by the error handling logic * inside of this class. * * @param tokenizer The {@code Tokenizer} instance for the current record * @return A new instance of the type defined for this class */ protected abstract T doExtract(Tokenizer tokenizer); }
2,309
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/Extractors.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.lang.reflect.Constructor; import java.util.Collection; import org.apache.crunch.Pair; import org.apache.crunch.Tuple; import org.apache.crunch.Tuple3; import org.apache.crunch.Tuple4; import org.apache.crunch.TupleN; import org.apache.crunch.types.PType; import org.apache.crunch.types.PTypeFamily; import org.apache.crunch.types.avro.AvroTypeFamily; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; /** * Factory methods for constructing common {@code Extractor} types. */ public final class Extractors { /** * Returns an Extractor for integers. */ public static Extractor<Integer> xint() { return xint(0); } /** * Returns an Extractor for integers. */ public static Extractor<Integer> xint(Integer defaultValue) { return new IntExtractor(defaultValue); } /** * Returns an Extractor for longs. */ public static Extractor<Long> xlong() { return xlong(0L); } /** * Returns an Extractor for longs. */ public static Extractor<Long> xlong(Long defaultValue) { return new LongExtractor(defaultValue); } /** * Returns an Extractor for floats. */ public static Extractor<Float> xfloat() { return xfloat(0.0f); } public static Extractor<Float> xfloat(Float defaultValue) { return new FloatExtractor(defaultValue); } /** * Returns an Extractor for doubles. */ public static Extractor<Double> xdouble() { return xdouble(0.0); } public static Extractor<Double> xdouble(Double defaultValue) { return new DoubleExtractor(defaultValue); } /** * Returns an Extractor for booleans. */ public static Extractor<Boolean> xboolean() { return xboolean(false); } public static Extractor<Boolean> xboolean(Boolean defaultValue) { return new BooleanExtractor(defaultValue); } /** * Returns an Extractor for strings. */ public static Extractor<String> xstring() { return xstring(""); } public static Extractor<String> xstring(String defaultValue) { return new StringExtractor(defaultValue); } public static <T> Extractor<Collection<T>> xcollect(TokenizerFactory scannerFactory, Extractor<T> extractor) { return new CollectionExtractor<T>(scannerFactory, extractor); } /** * Returns an Extractor for pairs of the given types that uses the given {@code TokenizerFactory} * for parsing the sub-fields. */ public static <K, V> Extractor<Pair<K, V>> xpair(TokenizerFactory scannerFactory, Extractor<K> one, Extractor<V> two) { return new PairExtractor<K, V>(scannerFactory, one, two); } /** * Returns an Extractor for triples of the given types that uses the given {@code TokenizerFactory} * for parsing the sub-fields. */ public static <A, B, C> Extractor<Tuple3<A, B, C>> xtriple(TokenizerFactory scannerFactory, Extractor<A> a, Extractor<B> b, Extractor<C> c) { return new TripExtractor<A, B, C>(scannerFactory, a, b, c); } /** * Returns an Extractor for quads of the given types that uses the given {@code TokenizerFactory} * for parsing the sub-fields. */ public static <A, B, C, D> Extractor<Tuple4<A, B, C, D>> xquad(TokenizerFactory scannerFactory, Extractor<A> a, Extractor<B> b, Extractor<C> c, Extractor<D> d) { return new QuadExtractor<A, B, C, D>(scannerFactory, a, b, c, d); } /** * Returns an Extractor for an arbitrary number of types that uses the given {@code TokenizerFactory} * for parsing the sub-fields. */ public static Extractor<TupleN> xtupleN(TokenizerFactory scannerFactory, Extractor...extractors) { return new TupleNExtractor(scannerFactory, extractors); } /** * Returns an Extractor for a subclass of {@code Tuple} with a constructor that * has the given extractor types that uses the given {@code TokenizerFactory} * for parsing the sub-fields. */ public static <T extends Tuple> Extractor<T> xcustom(Class<T> clazz, TokenizerFactory scannerFactory, Extractor... extractors) { return new CustomTupleExtractor<T>(scannerFactory, clazz, extractors); } private static class IntExtractor extends AbstractSimpleExtractor<Integer> { IntExtractor(Integer defaultValue) { super(defaultValue); } @Override protected Integer doExtract(Tokenizer tokenizer) { return tokenizer.nextInt(); } @Override public PType<Integer> getPType(PTypeFamily ptf) { return ptf.ints(); } @Override public String toString() { return "xint"; } } private static class LongExtractor extends AbstractSimpleExtractor<Long> { LongExtractor(Long defaultValue) { super(defaultValue); } @Override protected Long doExtract(Tokenizer tokenizer) { return tokenizer.nextLong(); } @Override public PType<Long> getPType(PTypeFamily ptf) { return ptf.longs(); } @Override public String toString() { return "xlong"; } } private static class FloatExtractor extends AbstractSimpleExtractor<Float> { FloatExtractor(Float defaultValue) { super(defaultValue); } @Override protected Float doExtract(Tokenizer tokenizer) { return tokenizer.nextFloat(); } @Override public PType<Float> getPType(PTypeFamily ptf) { return ptf.floats(); } @Override public String toString() { return "xfloat"; } } private static class DoubleExtractor extends AbstractSimpleExtractor<Double> { DoubleExtractor(Double defaultValue) { super(defaultValue); } @Override protected Double doExtract(Tokenizer tokenizer) { return tokenizer.nextDouble(); } @Override public PType<Double> getPType(PTypeFamily ptf) { return ptf.doubles(); } @Override public String toString() { return "xdouble"; } } private static class BooleanExtractor extends AbstractSimpleExtractor<Boolean> { BooleanExtractor(Boolean defaultValue) { super(defaultValue); } @Override protected Boolean doExtract(Tokenizer tokenizer) { return tokenizer.nextBoolean(); } @Override public PType<Boolean> getPType(PTypeFamily ptf) { return ptf.booleans(); } @Override public String toString() { return "xboolean"; } } private static class StringExtractor extends AbstractSimpleExtractor<String> { StringExtractor(String defaultValue) { super(defaultValue); } @Override protected String doExtract(Tokenizer tokenizer) { return tokenizer.next(); } @Override public PType<String> getPType(PTypeFamily ptf) { return ptf.strings(); } @Override public String toString() { return "xstring"; } } private static class CollectionExtractor<T> implements Extractor<Collection<T>> { private final TokenizerFactory tokenizerFactory; private final Extractor<T> extractor; private int errors = 0; private boolean errorOnLast; CollectionExtractor(TokenizerFactory tokenizerFactory, Extractor<T> extractor) { this.tokenizerFactory = tokenizerFactory; this.extractor = extractor; } @Override public Collection<T> extract(String input) { errorOnLast = false; Tokenizer tokenizer = tokenizerFactory.create(input); Collection<T> parsed = Lists.newArrayList(); while (tokenizer.hasNext()) { parsed.add(extractor.extract(tokenizer.next())); if (extractor.errorOnLastRecord() && !errorOnLast) { errorOnLast = true; errors++; } } return parsed; } @Override public PType<Collection<T>> getPType(PTypeFamily ptf) { return ptf.collections(extractor.getPType(ptf)); } @Override public Collection<T> getDefaultValue() { return ImmutableList.of(); } @Override public ExtractorStats getStats() { return new ExtractorStats(errors, ImmutableList.of(extractor.getStats().getErrorCount())); } @Override public void initialize() { this.errorOnLast = false; this.errors = 0; extractor.initialize(); } @Override public boolean errorOnLastRecord() { return errorOnLast; } } private static class PairExtractor<K, V> extends AbstractCompositeExtractor<Pair<K, V>> { private final Extractor<K> one; private final Extractor<V> two; PairExtractor(TokenizerFactory scannerFactory, Extractor<K> one, Extractor<V> two) { super(scannerFactory, ImmutableList.<Extractor<?>>of(one, two)); this.one = one; this.two = two; } @Override protected Pair<K, V> doCreate(Object[] values) { return Pair.of((K) values[0], (V) values[1]); } @Override public PType<Pair<K, V>> getPType(PTypeFamily ptf) { return ptf.pairs(one.getPType(ptf), two.getPType(ptf)); } @Override public String toString() { return "xpair(" + one + "," + two + ")"; } @Override public Pair<K, V> getDefaultValue() { return Pair.of(one.getDefaultValue(), two.getDefaultValue()); } } private static class TripExtractor<A, B, C> extends AbstractCompositeExtractor<Tuple3<A, B, C>> { private final Extractor<A> one; private final Extractor<B> two; private final Extractor<C> three; TripExtractor(TokenizerFactory sf, Extractor<A> one, Extractor<B> two, Extractor<C> three) { super(sf, ImmutableList.<Extractor<?>>of(one, two, three)); this.one = one; this.two = two; this.three = three; } @Override protected Tuple3<A, B, C> doCreate(Object[] values) { return Tuple3.of((A) values[0], (B) values[1], (C) values[2]); } @Override public PType<Tuple3<A, B, C>> getPType(PTypeFamily ptf) { return ptf.triples(one.getPType(ptf), two.getPType(ptf), three.getPType(ptf)); } @Override public Tuple3<A, B, C> getDefaultValue() { return Tuple3.of(one.getDefaultValue(), two.getDefaultValue(), three.getDefaultValue()); } @Override public String toString() { return "xtriple(" + one + "," + two + "," + three + ")"; } } private static class QuadExtractor<A, B, C, D> extends AbstractCompositeExtractor<Tuple4<A, B, C, D>> { private final Extractor<A> one; private final Extractor<B> two; private final Extractor<C> three; private final Extractor<D> four; QuadExtractor(TokenizerFactory sf, Extractor<A> one, Extractor<B> two, Extractor<C> three, Extractor<D> four) { super(sf, ImmutableList.<Extractor<?>>of(one, two, three, four)); this.one = one; this.two = two; this.three = three; this.four = four; } @Override protected Tuple4<A, B, C, D> doCreate(Object[] values) { return Tuple4.of((A) values[0], (B) values[1], (C) values[2], (D) values[3]); } @Override public PType<Tuple4<A, B, C, D>> getPType(PTypeFamily ptf) { return ptf.quads(one.getPType(ptf), two.getPType(ptf), three.getPType(ptf), four.getPType(ptf)); } @Override public Tuple4<A, B, C, D> getDefaultValue() { return Tuple4.of(one.getDefaultValue(), two.getDefaultValue(), three.getDefaultValue(), four.getDefaultValue()); } @Override public String toString() { return "xquad(" + one + "," + two + "," + three + "," + four + ")"; } } private static class TupleNExtractor extends AbstractCompositeExtractor<TupleN> { private final Extractor[] extractors; TupleNExtractor(TokenizerFactory scannerFactory, Extractor...extractors) { super(scannerFactory, ImmutableList.<Extractor<?>>copyOf(extractors)); this.extractors = extractors; } @Override protected TupleN doCreate(Object[] values) { return new TupleN(values); } @Override public PType<TupleN> getPType(PTypeFamily ptf) { PType[] ptypes = new PType[extractors.length]; for (int i = 0; i < ptypes.length; i++) { ptypes[i] = extractors[i].getPType(ptf); } return ptf.tuples(ptypes); } @Override public TupleN getDefaultValue() { Object[] values = new Object[extractors.length]; for (int i = 0; i < values.length; i++) { values[i] = extractors[i].getDefaultValue(); } return doCreate(values); } @Override public String toString() { return "xtupleN(" + Joiner.on(',').join(extractors) + ")"; } } private static class CustomTupleExtractor<T extends Tuple> extends AbstractCompositeExtractor<T> { private final Class<T> clazz; private final Extractor[] extractors; private transient Constructor<T> constructor; CustomTupleExtractor(TokenizerFactory sf, Class<T> clazz, Extractor... extractors) { super(sf, ImmutableList.<Extractor<?>>copyOf(extractors)); this.clazz = clazz; this.extractors = extractors; } @Override public void initialize() { super.initialize(); Class[] typeArgs = new Class[extractors.length]; for (int i = 0; i < typeArgs.length; i++) { typeArgs[i] = extractors[i].getPType( AvroTypeFamily.getInstance()).getTypeClass(); } try { constructor = clazz.getConstructor(typeArgs); } catch (Exception e) { throw new RuntimeException(e); } } @Override public T doCreate(Object[] values) { try { return constructor.newInstance(values); } catch (Exception e) { throw new RuntimeException(e); } } @Override public PType<T> getPType(PTypeFamily ptf) { PType[] ptypes = new PType[extractors.length]; for (int i = 0; i < ptypes.length; i++) { ptypes[i] = extractors[i].getPType(ptf); } return ptf.tuples(clazz, ptypes); } @Override public T getDefaultValue() { Object[] values = new Object[extractors.length]; for (int i = 0; i < values.length; i++) { values[i] = extractors[i].getDefaultValue(); } return doCreate(values); } @Override public String toString() { return "Extractor(" + clazz + ")"; } } }
2,310
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/Parse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.util.List; import org.apache.crunch.Emitter; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.PTable; import org.apache.crunch.Pair; import org.apache.crunch.types.PTableType; import org.apache.crunch.types.PType; import org.apache.crunch.types.PTypeFamily; /** * Methods for parsing instances of {@code PCollection<String>} into {@code PCollection}'s of strongly-typed * tuples. */ public final class Parse { /** * Parses the lines of the input {@code PCollection<String>} and returns a {@code PCollection<T>} using * the given {@code Extractor<T>}. * * @param groupName A label to use for tracking errors related to the parsing process * @param input The input {@code PCollection<String>} to convert * @param extractor The {@code Extractor<T>} that converts each line * @return A {@code PCollection<T>} */ public static <T> PCollection<T> parse(String groupName, PCollection<String> input, Extractor<T> extractor) { return parse(groupName, input, input.getTypeFamily(), extractor); } /** * Parses the lines of the input {@code PCollection<String>} and returns a {@code PCollection<T>} using * the given {@code Extractor<T>} that uses the given {@code PTypeFamily}. * * @param groupName A label to use for tracking errors related to the parsing process * @param input The input {@code PCollection<String>} to convert * @param ptf The {@code PTypeFamily} of the returned {@code PCollection<T>} * @param extractor The {@code Extractor<T>} that converts each line * @return A {@code PCollection<T>} */ public static <T> PCollection<T> parse(String groupName, PCollection<String> input, PTypeFamily ptf, Extractor<T> extractor) { return input.parallelDo(groupName, new ExtractorFn<T>(groupName, extractor), extractor.getPType(ptf)); } /** * Parses the lines of the input {@code PCollection<String>} and returns a {@code PTable<K, V>} using * the given {@code Extractor<Pair<K, V>>}. * * @param groupName A label to use for tracking errors related to the parsing process * @param input The input {@code PCollection<String>} to convert * @param extractor The {@code Extractor<Pair<K, V>>} that converts each line * @return A {@code PTable<K, V>} */ public static <K, V> PTable<K, V> parseTable(String groupName, PCollection<String> input, Extractor<Pair<K, V>> extractor) { return parseTable(groupName, input, input.getTypeFamily(), extractor); } /** * Parses the lines of the input {@code PCollection<String>} and returns a {@code PTable<K, V>} using * the given {@code Extractor<Pair<K, V>>} that uses the given {@code PTypeFamily}. * * @param groupName A label to use for tracking errors related to the parsing process * @param input The input {@code PCollection<String>} to convert * @param ptf The {@code PTypeFamily} of the returned {@code PTable<K, V>} * @param extractor The {@code Extractor<Pair<K, V>>} that converts each line * @return A {@code PTable<K, V>} */ public static <K, V> PTable<K, V> parseTable(String groupName, PCollection<String> input, PTypeFamily ptf, Extractor<Pair<K, V>> extractor) { List<PType> st = extractor.getPType(ptf).getSubTypes(); PTableType<K, V> ptt = ptf.tableOf((PType<K>) st.get(0), (PType<V>) st.get(1)); return input.parallelDo(groupName, new ExtractorFn<Pair<K, V>>(groupName, extractor), ptt); } private static class ExtractorFn<T> extends MapFn<String, T> { private final String groupName; private final Extractor<T> extractor; public ExtractorFn(String groupName, Extractor<T> extractor) { this.groupName = groupName; this.extractor = extractor; } @Override public void initialize() { extractor.initialize(); } @Override public T map(String input) { return extractor.extract(input); } @Override public void cleanup(Emitter<T> emitter) { if (getContext() != null) { ExtractorStats stats = extractor.getStats(); increment(groupName, "OVERALL_ERRORS", stats.getErrorCount()); List<Integer> fieldErrors = stats.getFieldErrors(); for (int i = 0; i < fieldErrors.size(); i++) { increment(groupName, "ERRORS_FOR_FIELD_" + i, fieldErrors.get(i)); } } } } // Non-instantiable. private Parse() { } }
2,311
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/AbstractCompositeExtractor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.util.List; import com.google.common.base.Function; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** * Base class for {@code Extractor} instances that delegates the parsing of fields to other * {@code Extractor} instances, primarily used for constructing composite records that implement * the {@code Tuple} interface. */ public abstract class AbstractCompositeExtractor<T> implements Extractor<T> { private final TokenizerFactory tokenizerFactory; private int errors = 0; private boolean errorOnLast; private final List<Extractor<?>> extractors; public AbstractCompositeExtractor(TokenizerFactory scannerFactory, List<Extractor<?>> extractors) { Preconditions.checkArgument(extractors.size() > 0); this.tokenizerFactory = scannerFactory; this.extractors = extractors; } @Override public T extract(String input) { errorOnLast = false; Tokenizer tokenizer = tokenizerFactory.create(input); Object[] values = new Object[extractors.size()]; try { for (int i = 0; i < values.length; i++) { values[i] = extractors.get(i).extract(tokenizer.next()); if (extractors.get(i).errorOnLastRecord() && !errorOnLast) { errors++; errorOnLast = true; } } } catch (Exception e) { if (!errorOnLast) { errors++; errorOnLast = true; } return getDefaultValue(); } return doCreate(values); } @Override public void initialize() { this.errors = 0; this.errorOnLast = false; for (Extractor<?> x : extractors) { x.initialize(); } } @Override public boolean errorOnLastRecord() { return errorOnLast; } @Override public ExtractorStats getStats() { return new ExtractorStats(errors, Lists.transform(extractors, new Function<Extractor<?>, Integer>() { @Override public Integer apply(Extractor<?> input) { return input.getStats().getErrorCount(); } })); } /** * Subclasses should return a new instance of the object based on the fields parsed by * the {@code Extractor} instances for this composite {@code Extractor} instance. * * @param values The values that were extracted by the component {@code Extractor} objects * @return A new instance of the composite class for this {@code Extractor} */ protected abstract T doCreate(Object[] values); }
2,312
0
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib
Create_ds/crunch/crunch-contrib/src/main/java/org/apache/crunch/contrib/text/Extractor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.contrib.text; import java.io.Serializable; import org.apache.crunch.types.PType; import org.apache.crunch.types.PTypeFamily; /** * An interface for extracting a specific data type from a text string that * is being processed by a {@code Scanner} object. * * @param <T> The data type to be extracted */ public interface Extractor<T> extends Serializable { /** * Extract a value with the type of this instance. */ T extract(String input); /** * Returns the {@code PType} associated with this data type for the * given {@code PTypeFamily}. */ PType<T> getPType(PTypeFamily ptf); /** * Returns the default value for this {@code Extractor} in case of an * error. */ T getDefaultValue(); /** * Perform any initialization required by this {@code Extractor} during the * start of a map or reduce task. */ void initialize(); /** * Returns true if the last call to {@code extract} on this instance * threw an exception that was handled. */ boolean errorOnLastRecord(); /** * Return statistics about how many errors this {@code Extractor} instance * encountered while parsing input data. */ ExtractorStats getStats(); }
2,313
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/KafkaUtilsIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka; import org.apache.hadoop.conf.Configuration; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import java.util.Properties; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; public class KafkaUtilsIT { @Rule public TestName testName = new TestName(); @BeforeClass public static void startup() throws Exception { ClusterTest.startTest(); } @AfterClass public static void shutdown() throws Exception { ClusterTest.endTest(); } @Test public void getKafkaProperties() { Configuration config = new Configuration(false); String propertyKey = "fake.kafka.property"; String propertyValue = testName.getMethodName(); config.set(propertyKey, propertyValue); Properties props = KafkaUtils.getKafkaConnectionProperties(config); assertThat(props.get(propertyKey), is((Object) propertyValue)); } @Test public void addKafkaProperties() { String propertyKey = "fake.kafka.property"; String propertyValue = testName.getMethodName(); Properties props = new Properties(); props.setProperty(propertyKey, propertyValue); Configuration config = new Configuration(false); KafkaUtils.addKafkaConnectionProperties(props, config); assertThat(config.get(propertyKey), is(propertyValue)); } }
2,314
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/ClusterTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka; import kafka.serializer.Decoder; import kafka.utils.VerifiableProperties; import org.apache.crunch.impl.mr.run.RuntimeParameters; import org.apache.crunch.kafka.record.KafkaInputFormat; import org.apache.crunch.kafka.utils.KafkaBrokerTestHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Suite; import java.io.IOException; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; @RunWith(Suite.class) @Suite.SuiteClasses({ // org.apache.crunch.kafka.record org.apache.crunch.kafka.record.KafkaSourceIT.class, org.apache.crunch.kafka.record.KafkaRecordsIterableIT.class, org.apache.crunch.kafka.record.KafkaDataIT.class }) public class ClusterTest { private static TemporaryFolder folder = new TemporaryFolder(); private static KafkaBrokerTestHarness kafka; private static boolean runAsSuite = false; private static Configuration conf; private static FileSystem fs; @BeforeClass public static void startSuite() throws Exception { runAsSuite = true; startKafka(); setupFileSystem(); } @AfterClass public static void endSuite() throws Exception { stopKafka(); } public static void startTest() throws Exception { if (!runAsSuite) { startKafka(); setupFileSystem(); } } public static void endTest() throws Exception { if (!runAsSuite) { stopKafka(); } } private static void stopKafka() throws IOException { kafka.tearDown(); } private static void startKafka() throws IOException { Properties props = new Properties(); props.setProperty("auto.create.topics.enable", Boolean.TRUE.toString()); kafka = new KafkaBrokerTestHarness(props); kafka.setUp(); } private static void setupFileSystem() throws IOException { folder.create(); conf = new Configuration(); conf.set(RuntimeParameters.TMP_DIR, folder.getRoot().getAbsolutePath()); // Run Map/Reduce tests in process. conf.set("mapreduce.jobtracker.address", "local"); } public static Configuration getConf() { // Clone the configuration so it doesn't get modified for other tests. return new Configuration(conf); } public static Properties getConsumerProperties() { Properties props = new Properties(); props.putAll(kafka.getProps()); props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerDe.class.getName()); props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerDe.class.getName()); //set this because still needed by some APIs. props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, props.getProperty("metadata.broker.list")); props.setProperty("enable.auto.commit", Boolean.toString(false)); //when set this causes some problems with initializing the consumer. props.remove(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); return props; } public static Properties getProducerProperties() { Properties props = new Properties(); props.putAll(kafka.getProps()); //set this because still needed by some APIs. props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, props.getProperty("metadata.broker.list")); return props; } public static Configuration getConsumerConfig() { Configuration kafkaConfig = new Configuration(conf); KafkaUtils.addKafkaConnectionProperties(KafkaInputFormat.tagExistingKafkaConnectionProperties( getConsumerProperties()), kafkaConfig); return kafkaConfig; } public static List<String> writeData(Properties props, String topic, String batch, int loops, int numValuesPerLoop) { Properties producerProps = new Properties(); producerProps.putAll(props); producerProps.setProperty("value.serializer", StringSerDe.class.getName()); producerProps.setProperty("key.serializer", StringSerDe.class.getName()); // Set the default compression used to be snappy producerProps.setProperty("compression.codec", "snappy"); producerProps.setProperty("request.required.acks", "1"); Producer<String, String> producer = new KafkaProducer<>(producerProps); List<String> keys = new LinkedList<>(); try { for (int i = 0; i < loops; i++) { for (int j = 0; j < numValuesPerLoop; j++) { String key = "key" + batch + i + j; String value = "value" + batch + i + j; keys.add(key); producer.send(new ProducerRecord<>(topic, key, value)); } } } finally { producer.close(); } return keys; } public static class StringSerDe implements Serializer<String>, Deserializer<String> { @Override public void configure(Map map, boolean b) { } @Override public byte[] serialize(String topic, String value) { return value.getBytes(); } @Override public String deserialize(String topic, byte[] bytes) { return new String(bytes); } @Override public void close() { } } }
2,315
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/KafkaRecordsIterableIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.kafka.*; import org.apache.crunch.kafka.utils.KafkaTestUtils; import org.junit.Test; import org.apache.crunch.Pair; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.UUID; import static org.apache.crunch.kafka.ClusterTest.writeData; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class KafkaRecordsIterableIT { @Mock private Consumer<String, String> mockedConsumer; @Mock private ConsumerRecords<String, String> records; @Rule public TestName testName = new TestName(); private String topic; private Map<TopicPartition, Long> startOffsets; private Map<TopicPartition, Long> stopOffsets; private Map<TopicPartition, Pair<Long, Long>> offsets; private Consumer<String, String> consumer; private Properties props; private Properties consumerProps; @BeforeClass public static void init() throws Exception { ClusterTest.startTest(); } @AfterClass public static void cleanup() throws Exception { ClusterTest.endTest(); } @Before public void setup() { topic = UUID.randomUUID().toString(); props = ClusterTest.getConsumerProperties(); startOffsets = new HashMap<>(); stopOffsets = new HashMap<>(); offsets = new HashMap<>(); for (int i = 0; i < 4; i++) { TopicPartition tp = new TopicPartition(topic, i); startOffsets.put(tp, 0L); stopOffsets.put(tp, 100L); offsets.put(tp, Pair.of(0L, 100L)); } consumerProps = new Properties(); consumerProps.putAll(props); } @After public void shutdown() { } @Test(expected = IllegalArgumentException.class) public void nullConsumer() { new KafkaRecordsIterable(null, offsets, new Properties()); } @Test(expected = IllegalArgumentException.class) public void nullOffsets() { new KafkaRecordsIterable<>(consumer, null, new Properties()); } @Test(expected=IllegalArgumentException.class) public void emptyOffsets() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(consumer, Collections.<TopicPartition, Pair<Long, Long>>emptyMap(), new Properties()); } @Test(expected = IllegalArgumentException.class) public void nullProperties() { new KafkaRecordsIterable(consumer, offsets, null); } @Test public void iterateOverValues() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 10; int numPerLoop = 100; int total = loops * numPerLoop; List<String> keys = writeData(props, topic, "batch", loops, numPerLoop); startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); stopOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<String, String>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { assertThat(keys, hasItem(record.key())); assertTrue(keys.remove(record.key())); count++; } assertThat(count, is(total)); assertThat(keys.size(), is(0)); } @Test public void iterateOverOneValue() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 1; int numPerLoop = 1; int total = loops * numPerLoop; List<String> keys = writeData(props, topic, "batch", loops, numPerLoop); startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); stopOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<String, String>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { assertThat(keys, hasItem(record.key())); assertTrue(keys.remove(record.key())); count++; } assertThat(count, is(total)); assertThat(keys.size(), is(0)); } @Test public void iterateOverNothing() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 10; int numPerLoop = 100; writeData(props, topic, "batch", loops, numPerLoop); //set the start offsets equal to the stop so won't iterate over anything startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); stopOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { count++; } assertThat(count, is(0)); } @Test public void iterateOverPartial() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 10; int numPerLoop = 100; int numPerPartition = 50; writeData(props, topic, "batch", loops, numPerLoop); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), entry.getValue() + numPerPartition)); } Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { count++; } assertThat(count, is(startOffsets.size() * numPerPartition)); } @Test public void dontIteratePastStop() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 10; int numPerLoop = 100; List<String> keys = writeData(props, topic, "batch1", loops, numPerLoop); startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); stopOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } List<String> secondKeys = writeData(props, topic, "batch2", loops, numPerLoop); Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { assertThat(keys, hasItem(record.key())); assertTrue(keys.remove(record.key())); assertThat(secondKeys, not(hasItem(record.key()))); count++; } assertThat(count, is(loops * numPerLoop)); assertThat(keys.size(), is(0)); } @Test public void iterateSkipInitialValues() { consumer = new KafkaConsumer<>(consumerProps, new ClusterTest.StringSerDe(), new ClusterTest.StringSerDe()); int loops = 10; int numPerLoop = 100; List<String> keys = writeData(props, topic, "batch1", loops, numPerLoop); //set the start offsets equal to the stop so won't iterate over anything startOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); List<String> secondKeys = writeData(props, topic, "batch2", loops, numPerLoop); stopOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<String, String>(consumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { assertThat(secondKeys, hasItem(record.key())); assertTrue(secondKeys.remove(record.key())); assertThat(keys, not(hasItem(record.key()))); count++; } assertThat(count, is(loops * numPerLoop)); assertThat(secondKeys.size(), is(0)); } @Test public void iterateValuesWithExceptions() { List<ConsumerRecord<String, String>> returnedRecords = new LinkedList<>(); for(int i = 0; i < 25; i++){ returnedRecords.add(new ConsumerRecord<String, String>(topic, 0, i, "key", null)); returnedRecords.add(new ConsumerRecord<String, String>(topic, 1, i, "key", null)); returnedRecords.add(new ConsumerRecord<String, String>(topic, 2, i, "key", null)); returnedRecords.add(new ConsumerRecord<String, String>(topic, 3, i, "key", null)); } offsets = new HashMap<>(); offsets.put(new TopicPartition(topic, 0), Pair.of(0L, 25L)); offsets.put(new TopicPartition(topic, 1), Pair.of(0L, 25L)); offsets.put(new TopicPartition(topic, 2), Pair.of(0L, 25L)); offsets.put(new TopicPartition(topic, 3), Pair.of(0L, 25L)); when(records.isEmpty()).thenReturn(false); when(records.iterator()).thenReturn(returnedRecords.iterator()); when(mockedConsumer.poll(Matchers.anyLong())) //request for the first poll .thenReturn(null) //fail twice .thenThrow(new TimeoutException("fail1")) .thenThrow(new TimeoutException("fail2")) //request that will give data .thenReturn(records) // shows to stop retrieving data .thenReturn(null); Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(mockedConsumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { count++; } //should have gotten one value per topicpartition assertThat(count, is(returnedRecords.size())); } @Test public void iterateValuesAfterStopOffsets() { List<ConsumerRecord<String, String>> returnedRecords = new LinkedList<>(); for (Map.Entry<TopicPartition, Long> entry : stopOffsets.entrySet()) { returnedRecords.add(new ConsumerRecord<String, String>(entry.getKey().topic(), entry.getKey().partition(), entry.getValue() + 1, "key", null)); } when(records.isEmpty()).thenReturn(false); when(records.iterator()).thenReturn(returnedRecords.iterator()); when(mockedConsumer.poll(Matchers.anyLong())).thenReturn(records).thenReturn(records).thenReturn(null); Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(mockedConsumer, offsets, new Properties()); int count = 0; for (ConsumerRecord<String, String> record : data) { count++; } assertThat(count, is(0)); } @Test(expected = RetriableException.class) public void iterateRetriableExceptionMaxExceeded() { List<ConsumerRecord<String, String>> returnedRecords = new LinkedList<>(); for (Map.Entry<TopicPartition, Long> entry : stopOffsets.entrySet()) { returnedRecords.add(new ConsumerRecord<String, String>(entry.getKey().topic(), entry.getKey().partition(), entry.getValue() + 1, "key", null)); } when(mockedConsumer.poll(Matchers.anyLong())) //for the fill poll call .thenReturn(null) //retry 5 times then fail .thenThrow(new TimeoutException("fail1")) .thenThrow(new TimeoutException("fail2")) .thenThrow(new TimeoutException("fail3")) .thenThrow(new TimeoutException("fail4")) .thenThrow(new TimeoutException("fail5")) .thenThrow(new TimeoutException("fail6")); Iterable<ConsumerRecord<String, String>> data = new KafkaRecordsIterable<>(mockedConsumer, offsets, new Properties()); data.iterator().next(); } }
2,316
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/KafkaRecordReaderTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the * Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.DisconnectException; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Properties; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class KafkaRecordReaderTest { @Mock private KafkaConsumer<String, String> consumer; @Mock private TaskAttemptContext taskAttemptContext; private TopicPartition topicPartition; private long startOffset; private long endOffset; private KafkaInputSplit inputSplit; private ConsumerRecords<String, String> records; private KafkaRecordReader<String, String> reader; @Before public void before() throws IOException, InterruptedException { when(taskAttemptContext.getConfiguration()).thenReturn(new Configuration(false)); startOffset = 0L; endOffset = 100L; topicPartition = new TopicPartition("topic", 0); inputSplit = new KafkaInputSplit(topicPartition.topic(), topicPartition.partition(), startOffset, endOffset); records = new ConsumerRecords<>(Collections.singletonMap(inputSplit.getTopicPartition(), Collections.singletonList(new ConsumerRecord<>("topic", 0, 0, "key", "value")))); when(consumer.poll(anyLong())).thenReturn(records); reader = new KafkaRecordReaderTester(); reader.initialize(inputSplit, taskAttemptContext); } @Test public void getRecords_consumerPollThrowsException_thenReturnsMessage() { // DisconnectException is retriable when(consumer.poll(anyLong())).thenThrow(new DisconnectException()).thenReturn(records); reader.loadRecords(); Iterator<ConsumerRecord<String, String>> iterator = reader.getRecordIterator(); assertThat(iterator.hasNext(), is(true)); assertThat(iterator.next(), is(records.records(topicPartition).get(0))); } @Test public void getRecords_consumerPollEmpty_thenReturnsMessage() { // DisconnectException is retriable when(consumer.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.<TopicPartition, List<ConsumerRecord<String, String>>> emptyMap())).thenReturn(records); reader.loadRecords(); Iterator<ConsumerRecord<String, String>> iterator = reader.getRecordIterator(); assertThat(iterator.hasNext(), is(true)); assertThat(iterator.next(), is(records.records(topicPartition).get(0))); } @Test public void nextKeyValue() throws IOException, InterruptedException { assertThat(reader.nextKeyValue(), is(true)); assertThat(reader.getCurrentKey(), is(records.records(topicPartition).get(0))); assertThat(reader.getCurrentOffset(), is(0L)); } @Test public void nextKeyValue_recordOffsetAheadOfExpected() throws IOException, InterruptedException { records = new ConsumerRecords<>(Collections.singletonMap(inputSplit.getTopicPartition(), Collections.singletonList(new ConsumerRecord<>("topic", 0, 10L, "key", "value")))); when(consumer.poll(anyLong())).thenReturn(records); assertThat(reader.nextKeyValue(), is(true)); assertThat(reader.getCurrentKey(), is(records.records(topicPartition).get(0))); assertThat(reader.getCurrentOffset(), is(10L)); } @Test public void nextKeyValue_noRecord_emptyPartition() throws IOException, InterruptedException { when(consumer.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.<TopicPartition, List<ConsumerRecord<String, String>>> emptyMap())); when(consumer.beginningOffsets(Collections.singletonList(topicPartition))).thenReturn( Collections.singletonMap(topicPartition, endOffset)); assertThat(reader.nextKeyValue(), is(false)); } @Test(expected = IOException.class) public void nextKeyValue_noRecord_nonEmptyPartition() throws IOException, InterruptedException { when(consumer.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.<TopicPartition, List<ConsumerRecord<String, String>>> emptyMap())); reader.nextKeyValue(); } @Test public void nextKeyValue_recordIsBeyondEndOffset() throws IOException, InterruptedException { records = new ConsumerRecords<>(Collections.singletonMap(inputSplit.getTopicPartition(), Collections.singletonList(new ConsumerRecord<>("topic", 0, 100L, "key", "value")))); when(consumer.poll(anyLong())).thenReturn(records); assertThat(reader.nextKeyValue(), is(false)); } @Test public void getEarliestOffset_noOffsetFound() { when(consumer.beginningOffsets(Collections.singletonList(inputSplit.getTopicPartition()))).thenReturn( Collections.<TopicPartition, Long> emptyMap()); assertThat(reader.getEarliestOffset(), is(0L)); } @Test public void getEarliestOffset() { when(consumer.beginningOffsets(Collections.singletonList(inputSplit.getTopicPartition()))).thenReturn( Collections.singletonMap(inputSplit.getTopicPartition(), 100L)); assertThat(reader.getEarliestOffset(), is(100L)); } private class KafkaRecordReaderTester extends KafkaRecordReader<String, String> { @Override protected KafkaConsumer<String, String> buildConsumer(Properties properties) { return consumer; } } }
2,317
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/ConsumerRecordHelperTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.PCollection; import org.apache.crunch.impl.mem.MemPipeline; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.io.BytesWritable; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.record.TimestampType; import org.junit.Test; import java.io.IOException; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; public class ConsumerRecordHelperTest { @Test (expected = IllegalArgumentException.class) public void serialize_nullRecord() throws IOException { ConsumerRecordHelper.serialize(null); } @Test (expected = IllegalArgumentException.class) public void deserialize_nullRecord() throws IOException { ConsumerRecordHelper.deserialize(null); } @Test public void serializeDeserialize() throws IOException { ConsumerRecord<BytesWritable, BytesWritable> record = new ConsumerRecord<>("topic", 1, 2, 3L, TimestampType.CREATE_TIME, 4L, 5, 6, new BytesWritable("key".getBytes()), new BytesWritable("value".getBytes())); ConsumerRecord<BytesWritable, BytesWritable> newRecord = ConsumerRecordHelper.deserialize( ConsumerRecordHelper.serialize(record)); assertRecordsAreEqual(record, newRecord); } @Test public void serializeDeserialize_nullKeyValue() throws IOException { ConsumerRecord<BytesWritable, BytesWritable> record = new ConsumerRecord<>("topic", 1, 2, 3L, TimestampType.CREATE_TIME, 4L, 5, 6, null, null); ConsumerRecord<BytesWritable, BytesWritable> newRecord = ConsumerRecordHelper.deserialize( ConsumerRecordHelper.serialize(record)); assertRecordsAreEqual(record, newRecord); } @Test public void mapFns() throws IOException { ConsumerRecord<BytesWritable, BytesWritable> record = new ConsumerRecord<>("topic", 1, 2, 3L, TimestampType.CREATE_TIME, 4L, 5, 6, new BytesWritable("key".getBytes()), new BytesWritable("value".getBytes())); PCollection<BytesWritable> bytes = MemPipeline.collectionOf(new BytesWritable(ConsumerRecordHelper.serialize(record))); PCollection<ConsumerRecord<BytesWritable, BytesWritable>> records = bytes.parallelDo( new ConsumerRecordHelper.BytesToConsumerRecord(), ConsumerRecordHelper.CONSUMER_RECORD_P_TYPE); PCollection<BytesWritable> newBytes = records.parallelDo( new ConsumerRecordHelper.ConsumerRecordToBytes(), Writables.writables(BytesWritable.class)); ConsumerRecord<BytesWritable, BytesWritable> newRecord = ConsumerRecordHelper.deserialize( newBytes.materialize().iterator().next().getBytes()); assertRecordsAreEqual(record, newRecord); } private void assertRecordsAreEqual(ConsumerRecord<BytesWritable, BytesWritable> record1, ConsumerRecord<BytesWritable, BytesWritable> record2) { // ConsumerRecord doesn't implement equals so have to verify each field assertThat(record1.topic(), is(record2.topic())); assertThat(record1.partition(), is(record2.partition())); assertThat(record1.offset(), is(record2.offset())); assertThat(record1.timestamp(), is(record2.timestamp())); assertThat(record1.timestampType(), is(record2.timestampType())); assertThat(record1.checksum(), is(record2.checksum())); assertThat(record1.serializedKeySize(), is(record2.serializedKeySize())); assertThat(record1.serializedValueSize(), is(record2.serializedValueSize())); assertThat(record1.key(), is(record2.key())); assertThat(record1.value(), is(record2.value())); } }
2,318
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/KafkaInputFormatTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.io.FormatBundle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.junit.Test; import java.io.IOException; import java.util.List; import java.util.Properties; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.mock; public class KafkaInputFormatTest { @Test public void generateConnectionPropertyKeyTest() { String propertyName = "some.property"; String actual = KafkaInputFormat.generateConnectionPropertyKey(propertyName); String expected = "org.apache.crunch.kafka.connection.properties.some.property"; assertEquals(expected, actual); } @Test public void getConnectionPropertyFromKeyTest() { String prefixedConnectionProperty = "org.apache.crunch.kafka.connection.properties.some.property"; String actual = KafkaInputFormat.getConnectionPropertyFromKey(prefixedConnectionProperty); String expected = "some.property"; assertEquals(expected, actual); } @Test public void writeConnectionPropertiesToBundleTest() { FormatBundle<KafkaInputFormat> actual = FormatBundle.forInput(KafkaInputFormat.class); Properties connectionProperties = new Properties(); connectionProperties.put("key1", "value1"); connectionProperties.put("key2", "value2"); KafkaInputFormat.writeConnectionPropertiesToBundle(connectionProperties, actual); FormatBundle<KafkaInputFormat> expected = FormatBundle.forInput(KafkaInputFormat.class); expected.set("org.apache.crunch.kafka.connection.properties.key1", "value1"); expected.set("org.apache.crunch.kafka.connection.properties.key2", "value2"); assertEquals(expected, actual); } @Test public void filterConnectionPropertiesTest() { Properties props = new Properties(); props.put("org.apache.crunch.kafka.connection.properties.key1", "value1"); props.put("org.apache.crunch.kafka.connection.properties.key2", "value2"); props.put("org_apache_crunch_kafka_connection_properties.key3", "value3"); props.put("org.apache.crunch.another.prefix.properties.key4", "value4"); Properties actual = KafkaInputFormat.filterConnectionProperties(props); Properties expected = new Properties(); expected.put("key1", "value1"); expected.put("key2", "value2"); assertEquals(expected, actual); } @Test(expected = IllegalArgumentException.class) public void getSplitsInvalidMaxRecords() throws IOException, InterruptedException { KafkaInputFormat kafkaInputFormat = new KafkaInputFormat(); Configuration conf = new Configuration(false); conf.setLong(KafkaInputFormat.KAFKA_MAX_RECORDS_PER_SPLIT, 0L); kafkaInputFormat.setConf(conf); kafkaInputFormat.getSplits(mock(JobContext.class)); } @Test public void getSplitsConfiguredMaxRecords() throws IOException, InterruptedException { KafkaInputFormat kafkaInputFormat = new KafkaInputFormat(); Configuration conf = new Configuration(false); conf.setLong(KafkaInputFormat.KAFKA_MAX_RECORDS_PER_SPLIT, 2L); conf.set("org.apache.crunch.kafka.offsets.topic.abc.partitions", "0,1"); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.start", 300L); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.end", 1000L); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.1.start", 30L); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.1.end", 100L); conf.set("org.apache.crunch.kafka.offsets.topic.xyz.partitions", "0"); conf.setLong("org.apache.crunch.kafka.offsets.topic.xyz.partitions.0.start", 3L); conf.setLong("org.apache.crunch.kafka.offsets.topic.xyz.partitions.0.end", 10L); kafkaInputFormat.setConf(conf); List<InputSplit> splits = kafkaInputFormat.getSplits(mock(JobContext.class)); assertThat(splits.size(), is((700/2 + 700%2) + (70/2 + 70%2) + (7/2 + 7%2))); } @Test public void getSplitsDefaultMaxRecords() throws IOException, InterruptedException { KafkaInputFormat kafkaInputFormat = new KafkaInputFormat(); Configuration conf = new Configuration(false); conf.set("org.apache.crunch.kafka.offsets.topic.abc.partitions", "0"); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.start", 0L); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.end", 5234567L); kafkaInputFormat.setConf(conf); List<InputSplit> splits = kafkaInputFormat.getSplits(mock(JobContext.class)); assertThat(splits.size(), is(2)); } @Test public void getSplitsNoRecords() throws IOException, InterruptedException { KafkaInputFormat kafkaInputFormat = new KafkaInputFormat(); Configuration conf = new Configuration(false); conf.set("org.apache.crunch.kafka.offsets.topic.abc.partitions", "0"); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.start", 5L); conf.setLong("org.apache.crunch.kafka.offsets.topic.abc.partitions.0.end", 5L); kafkaInputFormat.setConf(conf); List<InputSplit> splits = kafkaInputFormat.getSplits(mock(JobContext.class)); assertThat(splits.size(), is(0)); } }
2,319
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/KafkaSourceIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.From; import org.apache.crunch.io.ReadableSource; import org.apache.crunch.io.To; import org.apache.crunch.kafka.ClusterTest; import org.apache.crunch.kafka.KafkaUtils; import org.apache.crunch.kafka.utils.KafkaTestUtils; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.types.avro.Avros; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.BytesWritable; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.junit.*; import org.junit.rules.TestName; import java.util.*; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import static org.junit.matchers.JUnitMatchers.hasItem; public class KafkaSourceIT { @Rule public TemporaryPath path = new TemporaryPath(); @Rule public TestName testName = new TestName(); private Properties consumerProps; private String topic; @BeforeClass public static void setup() throws Exception { ClusterTest.startTest(); } @AfterClass public static void cleanup() throws Exception { ClusterTest.endTest(); } @Before public void setupTest() { topic = UUID.randomUUID().toString(); consumerProps = ClusterTest.getConsumerProperties(); } @Test public void defaultEarliestOffsetReset() { Map<TopicPartition, Pair<Long, Long>> offsets = Collections.emptyMap(); //Remove this so should revert to default. consumerProps.remove(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG); KafkaSource kafkaSource = new KafkaSource(consumerProps, offsets); FormatBundle inputBundle = kafkaSource.getInputBundle(); Configuration cfg = new Configuration(false); inputBundle.configure(cfg); Properties kafkaConnectionProperties = KafkaUtils.getKafkaConnectionProperties(cfg); kafkaConnectionProperties = KafkaInputFormat.filterConnectionProperties(kafkaConnectionProperties); assertThat(kafkaConnectionProperties.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), is("earliest")); } @Test public void offsetResetOverridable() { Map<TopicPartition, Pair<Long, Long>> offsets = Collections.emptyMap(); consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); KafkaSource kafkaSource = new KafkaSource(consumerProps, offsets); FormatBundle inputBundle = kafkaSource.getInputBundle(); Configuration cfg = new Configuration(false); inputBundle.configure(cfg); Properties kafkaConnectionProperties = KafkaUtils.getKafkaConnectionProperties(cfg); kafkaConnectionProperties = KafkaInputFormat.filterConnectionProperties(kafkaConnectionProperties); assertThat(kafkaConnectionProperties.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), is("latest")); } @Test public void sourceReadData() { List<String> keys = ClusterTest.writeData(ClusterTest.getProducerProperties(), topic, "batch", 10, 10); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps); Map<TopicPartition, Long> startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); Map<TopicPartition, Long> endOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { Long endingOffset = endOffsets.get(entry.getKey()); offsets.put(entry.getKey(), Pair.of(entry.getValue(), endingOffset)); } Configuration config = ClusterTest.getConf(); Pipeline pipeline = new MRPipeline(KafkaSourceIT.class, config); pipeline.enableDebug(); ReadableSource<ConsumerRecord<BytesWritable, BytesWritable>> kafkaSource = new KafkaSource(consumerProps, offsets); PCollection<ConsumerRecord<BytesWritable, BytesWritable>> read = pipeline.read(kafkaSource); Set<String> keysRead = new HashSet<>(); int numRecordsFound = 0; String currentKey; for (ConsumerRecord<BytesWritable, BytesWritable> record : read.materialize()) { currentKey = new String(record.key().getBytes()); assertThat(keys, hasItem(currentKey)); numRecordsFound++; keysRead.add(new String(record.key().getBytes())); } assertThat(numRecordsFound, is(keys.size())); assertThat(keysRead.size(), is(keys.size())); pipeline.done(); } @Test public void sourceReadDataThroughPipeline() { List<String> keys = ClusterTest.writeData(ClusterTest.getProducerProperties(), topic, "batch", 10, 10); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps); Map<TopicPartition, Long> startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); Map<TopicPartition, Long> endOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { Long endingOffset = endOffsets.get(entry.getKey()); offsets.put(entry.getKey(), Pair.of(entry.getValue(), endingOffset)); } Configuration config = ClusterTest.getConf(); Pipeline pipeline = new MRPipeline(KafkaSourceIT.class, config); pipeline.enableDebug(); ReadableSource<ConsumerRecord<BytesWritable, BytesWritable>> kafkaSource = new KafkaSource(consumerProps, offsets); PCollection<ConsumerRecord<BytesWritable, BytesWritable>> read = pipeline.read(kafkaSource); Path out = path.getPath("out"); read.parallelDo(new KafkaSourceIT.SimpleConvertFn(), Avros.strings()).write(To.textFile(out)); pipeline.run(); PCollection<String> persistedKeys = pipeline.read(From.textFile(out)); Set<String> keysRead = new HashSet<>(); int numRecordsFound = 0; for (String value : persistedKeys.materialize()) { assertThat(keys, hasItem(value)); numRecordsFound++; keysRead.add(value); } assertThat(numRecordsFound, is(keys.size())); assertThat(keysRead.size(), is(keys.size())); pipeline.done(); } @Test public void sourceReadDataThroughPipelineMultipleSplitsPerPartition() { Configuration config = ClusterTest.getConf(); config.setLong(KafkaInputFormat.KAFKA_MAX_RECORDS_PER_SPLIT, 7L); List<String> keys = ClusterTest.writeData(ClusterTest.getProducerProperties(), topic, "batch", 10, 10); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps); Map<TopicPartition, Long> startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); Map<TopicPartition, Long> endOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { Long endingOffset = endOffsets.get(entry.getKey()); offsets.put(entry.getKey(), Pair.of(entry.getValue(), endingOffset)); } Pipeline pipeline = new MRPipeline(KafkaSourceIT.class, config); pipeline.enableDebug(); ReadableSource<ConsumerRecord<BytesWritable, BytesWritable>> kafkaSource = new KafkaSource(consumerProps, offsets); PCollection<ConsumerRecord<BytesWritable, BytesWritable>> read = pipeline.read(kafkaSource); Path out = path.getPath("out"); read.parallelDo(new KafkaSourceIT.SimpleConvertFn(), Avros.strings()).write(To.textFile(out)); pipeline.run(); PCollection<String> persistedKeys = pipeline.read(From.textFile(out)); Set<String> keysRead = new HashSet<>(); int numRecordsFound = 0; for (String value : persistedKeys.materialize()) { assertThat(keys, hasItem(value)); numRecordsFound++; keysRead.add(value); } assertThat(numRecordsFound, is(keys.size())); assertThat(keysRead.size(), is(keys.size())); pipeline.done(); } private static class SimpleConvertFn extends MapFn<ConsumerRecord<BytesWritable, BytesWritable>, String> { @Override public String map(ConsumerRecord<BytesWritable, BytesWritable> record) { return new String(record.key().getBytes()); } } }
2,320
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/record/KafkaDataIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.Pair; import org.apache.crunch.kafka.ClusterTest; import org.apache.crunch.kafka.KafkaUtils; import org.apache.crunch.kafka.utils.KafkaTestUtils; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.junit.*; import org.junit.rules.TestName; import java.io.IOException; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.*; import static org.apache.crunch.kafka.ClusterTest.writeData; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; public class KafkaDataIT { @Rule public TestName testName = new TestName(); private String topic; private Map<TopicPartition, Long> startOffsets; private Map<TopicPartition, Long> stopOffsets; private Map<TopicPartition, Pair<Long, Long>> offsets; private Properties props; private Consumer<String, String> consumer; @BeforeClass public static void init() throws Exception { ClusterTest.startTest(); } @AfterClass public static void cleanup() throws Exception { ClusterTest.endTest(); } @Before public void setup() { topic = UUID.randomUUID().toString(); props = ClusterTest.getConsumerProperties(); startOffsets = new HashMap<>(); stopOffsets = new HashMap<>(); offsets = new HashMap<>(); for (int i = 0; i < 4; i++) { TopicPartition tp = new TopicPartition(topic, i); startOffsets.put(tp, 0L); stopOffsets.put(tp, 100L); offsets.put(tp, Pair.of(0L, 100L)); } consumer = new KafkaConsumer<>(props); } @Test public void getDataIterable() throws IOException { int loops = 10; int numPerLoop = 100; int total = loops * numPerLoop; List<String> keys = writeData(props, topic, "batch", loops, numPerLoop); startOffsets = KafkaTestUtils.getStartOffsets(consumer, topic); stopOffsets = KafkaTestUtils.getStopOffsets(consumer, topic); Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); for (Map.Entry<TopicPartition, Long> entry : startOffsets.entrySet()) { offsets.put(entry.getKey(), Pair.of(entry.getValue(), stopOffsets.get(entry.getKey()))); } Iterable<ConsumerRecord<String, String>> data = new KafkaData<String, String>(props, offsets).read(null); int count = 0; for (ConsumerRecord<String, String> record : data) { assertThat(keys, hasItem(record.key())); assertTrue(keys.remove(record.key())); count++; } assertThat(count, is(total)); assertThat(keys.size(), is(0)); } }
2,321
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/utils/EmbeddedZookeeper.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * <p> * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.utils; import org.apache.commons.io.FileUtils; import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ZooKeeperServer; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; /** * Embedded Zookeeper instance for testing purposes. * <p> * Adapted from the {@code kafka.zk.EmbeddedZookeeper} class. * </p> */ class EmbeddedZookeeper { private final File snapshotDir; private final File logDir; private final NIOServerCnxnFactory factory; /** * Constructs an embedded Zookeeper instance. * * @param connectString Zookeeper connection string. * * @throws IOException if an error occurs during Zookeeper initialization. */ public EmbeddedZookeeper(String connectString) throws IOException { this.snapshotDir = KafkaTestUtils.getTempDir(); this.logDir = KafkaTestUtils.getTempDir(); this.factory = new NIOServerCnxnFactory(); String hostname = connectString.split(":")[0]; int port = Integer.valueOf(connectString.split(":")[1]); int maxClientConnections = 1024; factory.configure(new InetSocketAddress(hostname, port), maxClientConnections); try { int tickTime = 500; factory.startup(new ZooKeeperServer(snapshotDir, logDir, tickTime)); } catch (InterruptedException e) { throw new IOException(e); } } /** * Shuts down the embedded Zookeeper instance. */ public void shutdown() throws IOException { factory.shutdown(); FileUtils.deleteDirectory(snapshotDir); FileUtils.deleteDirectory(logDir); } }
2,322
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/utils/ZkStringSerializer.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.utils; import org.I0Itec.zkclient.exception.ZkMarshallingError; import org.I0Itec.zkclient.serialize.ZkSerializer; import java.nio.charset.Charset; /** * A {@link ZkSerializer Zookeeper serializer} for {@link String} objects. * <p> * Ported from the {@code kafka.utils.ZKStringSerializer} scala object. * </p> */ public class ZkStringSerializer implements ZkSerializer { private static final Charset UTF_8 = Charset.forName("UTF-8"); @Override public byte[] serialize(Object data) throws ZkMarshallingError { return ((String) data).getBytes(UTF_8); } @Override public Object deserialize(byte[] bytes) throws ZkMarshallingError { return bytes != null ? new String(bytes, UTF_8) : null; } }
2,323
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/utils/KafkaTestUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.utils; import org.apache.crunch.kafka.KafkaUtils; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; import java.io.File; import java.io.IOException; import java.net.ServerSocket; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.HashSet; import java.util.Map; import java.util.Random; import java.util.Set; /** * Assorted Kafka testing utility methods. */ public class KafkaTestUtils { private static final Random RANDOM = new Random(); private static final String TEMP_DIR_PREFIX = "kafka-"; private static final Set<Integer> USED_PORTS = new HashSet<Integer>(); /** * Creates and returns a new randomly named temporary directory. It will be deleted upon JVM exit. * * @return a new temporary directory. * * @throws RuntimeException if a new temporary directory could not be created. */ public static File getTempDir() { File file = new File(System.getProperty("java.io.tmpdir"), TEMP_DIR_PREFIX + RANDOM.nextInt(10000000)); if (!file.mkdirs()) { throw new RuntimeException("could not create temp directory: " + file.getAbsolutePath()); } file.deleteOnExit(); return file; } /** * Returns an array containing the specified number of available local ports. * * @param count Number of local ports to identify and return. * * @return an array of available local port numbers. * * @throws RuntimeException if an I/O error occurs opening or closing a socket. */ public static int[] getPorts(int count) { int[] ports = new int[count]; Set<ServerSocket> openSockets = new HashSet<ServerSocket>(count + USED_PORTS.size()); for (int i = 0; i < count; ) { try { ServerSocket socket = new ServerSocket(0); int port = socket.getLocalPort(); openSockets.add(socket); // Disallow port reuse. if (!USED_PORTS.contains(port)) { ports[i++] = port; USED_PORTS.add(port); } } catch (IOException e) { throw new RuntimeException("could not open socket", e); } } // Close the sockets so that their port numbers can be used by the caller. for (ServerSocket socket : openSockets) { try { socket.close(); } catch (IOException e) { throw new RuntimeException("could not close socket", e); } } return ports; } public static Map<TopicPartition, Long> getStartOffsets(Consumer<?, ?> consumer, String topic) { return consumer.beginningOffsets(KafkaUtils.getTopicPartitions(consumer, topic), Duration.of(1, ChronoUnit.MINUTES)); } public static Map<TopicPartition, Long> getStopOffsets(Consumer<?, ?> consumer, String topic) { return consumer.endOffsets(KafkaUtils.getTopicPartitions(consumer, topic), Duration.of(1, ChronoUnit.MINUTES)); } }
2,324
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/utils/KafkaBrokerTestHarness.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.utils; import kafka.metrics.KafkaMetricsReporter; import kafka.server.KafkaConfig; import kafka.server.KafkaServer; import org.apache.commons.io.FileUtils; import org.apache.kafka.common.utils.Time; import scala.Option; import scala.collection.JavaConversions; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Properties; import static scala.collection.JavaConversions.asJavaIterable; /** * A test harness that brings up some number of Kafka broker nodes. * <p> * Adapted from the {@code kafka.integration.KafkaServerTestHarness} class. * </p> */ public class KafkaBrokerTestHarness extends ZookeeperTestHarness { /** * Producer send acknowledgment timeout in milliseconds. */ public static final String KAFKA_PRODUCER_ACK_TIMEOUT_MILLIS = "request.timeout.ms"; /** * Producer send retry maximum count. */ public static final String KAFKA_PRODUCER_RETRY_MAX = "message.send.max.retries"; /** * Producer send retry backoff interval in milliseconds. */ public static final String KAFKA_PRODUCER_RETRY_INTERVAL_MILLIS = "retry.backoff.ms"; /** * Comma-delimited Kafka Zookeeper quorum list. */ public static final String KAFKA_ZOOKEEPERS = "zookeeper.connect"; /** * Comma-delimited list of Kafka brokers, for producer bootstrapping purposes. */ public static final String KAFKA_BROKERS = "metadata.broker.list"; /** * Default number of brokers in the Kafka cluster. */ public static final int DEFAULT_BROKERS = 1; /** * Default number of partitions per Kafka topic. */ public static final int PARTITIONS_PER_TOPIC = 4; private List<KafkaConfig> brokerConfigs; private List<KafkaServer> brokers; private File clientConfig; private boolean setUp; private boolean tornDown; /** * Creates a new Kafka broker test harness using the {@link #DEFAULT_BROKERS default} number of brokers. */ public KafkaBrokerTestHarness() { this(DEFAULT_BROKERS, KafkaTestUtils.getPorts(1)[0]); } /** * Creates a new Kafka broker test harness using the {@link #DEFAULT_BROKERS default} number of brokers and the supplied * {@link Properties} which will be applied to the brokers. * * @param properties * the additional {@link Properties} supplied to the brokers * @throws IllegalArgumentException * if {@code properties} is {@code null} */ public KafkaBrokerTestHarness(Properties properties) { this(DEFAULT_BROKERS, KafkaTestUtils.getPorts(1)[0], properties); } /** * Creates a new Kafka broker test harness using the given number of brokers and Zookeeper port. * * @param brokers Number of Kafka brokers to start up. * @param zookeeperPort The port number to use for Zookeeper client connections. * * @throws IllegalArgumentException if {@code brokers} is less than 1. */ public KafkaBrokerTestHarness(int brokers, int zookeeperPort) { this(getBrokerConfig(brokers, zookeeperPort), zookeeperPort); } /** * Creates a new Kafka broker test harness using the given number of brokers and Zookeeper port. * * @param brokers * Number of Kafka brokers to start up. * @param zookeeperPort * The port number to use for Zookeeper client connections. * @param properties * the additional {@link Properties} supplied to the brokers * * @throws IllegalArgumentException * if {@code brokers} is less than 1 or if {@code baseProperties} is {@code null} */ public KafkaBrokerTestHarness(int brokers, int zookeeperPort, Properties properties) { this(getBrokerConfig(brokers, zookeeperPort, properties), zookeeperPort); } /** * Creates a new Kafka broker test harness using the given broker configuration properties and Zookeeper port. * * @param brokerConfigs List of Kafka broker configurations. * @param zookeeperPort The port number to use for Zookeeper client connections. * * @throws IllegalArgumentException if {@code brokerConfigs} is {@code null} or empty. */ public KafkaBrokerTestHarness(List<KafkaConfig> brokerConfigs, int zookeeperPort) { super(zookeeperPort); if (brokerConfigs == null || brokerConfigs.isEmpty()) { throw new IllegalArgumentException("Must supply at least one broker configuration."); } this.brokerConfigs = brokerConfigs; this.brokers = null; this.setUp = false; this.tornDown = false; } /** * Start up the Kafka broker cluster. * * @throws IOException if an error occurs during Kafka broker startup. * @throws IllegalStateException if the Kafka broker cluster has already been {@link #setUp() setup}. */ @Override public void setUp() throws IOException { if (setUp) { throw new IllegalStateException("Already setup, cannot setup again"); } setUp = true; // Start up zookeeper. super.setUp(); brokers = new ArrayList<KafkaServer>(brokerConfigs.size()); for (KafkaConfig config : brokerConfigs) { brokers.add(startBroker(config)); } // Write out Kafka client config to a temp file. clientConfig = new File(KafkaTestUtils.getTempDir(), "kafka-config.xml"); FileWriter writer = new FileWriter(clientConfig); writer.append("<configuration>"); for (String prop : Arrays.asList(KAFKA_BROKERS, KAFKA_ZOOKEEPERS)) { writer.append("<property>"); writer.append("<name>").append(prop).append("</name>"); writer.append("<value>").append(getProps().getProperty(prop)).append("</value>"); writer.append("</property>"); } writer.append("</configuration>"); writer.close(); } /** * Shutdown the Kafka broker cluster. Attempting to {@link #setUp()} a cluster again after calling this method is not allowed; * a new {@code KafkaBrokerTestHarness} must be created instead. * * @throws IllegalStateException if the Kafka broker cluster has already been {@link #tearDown() torn down} or has not been * {@link #setUp()}. */ @Override public void tearDown() throws IOException { if (!setUp) { throw new IllegalStateException("Not set up, cannot tear down"); } if (tornDown) { throw new IllegalStateException("Already torn down, cannot tear down again"); } tornDown = true; for (KafkaServer broker : brokers) { broker.shutdown(); } for (KafkaServer broker : brokers) { for (String logDir : asJavaIterable(broker.config().logDirs())) { FileUtils.deleteDirectory(new File(logDir)); } } // Shutdown zookeeper super.tearDown(); } /** * Returns properties for a Kafka producer. * * @return Producer properties. */ public Properties getProducerProps() { StringBuilder brokers = new StringBuilder(); for (int i = 0; i < brokerConfigs.size(); ++i) { KafkaConfig config = brokerConfigs.get(i); brokers.append((i > 0) ? "," : "").append(config.hostName()).append(":").append(config.port()); } Properties props = new Properties(); props.setProperty(KAFKA_BROKERS, brokers.toString()); props.setProperty(KAFKA_PRODUCER_ACK_TIMEOUT_MILLIS, "10000"); // These two properties below are increased from their defaults to help with the case that auto.create.topics.enable is // disabled and a test tries to create a topic and immediately write to it props.setProperty(KAFKA_PRODUCER_RETRY_INTERVAL_MILLIS, Integer.toString(500)); props.setProperty(KAFKA_PRODUCER_RETRY_MAX, Integer.toString(10)); return props; } /** * Returns properties for a Kafka consumer. * * @return Consumer properties. */ public Properties getConsumerProps() { Properties props = new Properties(); props.setProperty(KAFKA_ZOOKEEPERS, zookeeperConnect); return props; } /** * Returns properties for either a Kafka producer or consumer. * * @return Combined producer and consumer properties. */ public Properties getProps() { // Combine producer and consumer properties. Properties props = getProducerProps(); props.putAll(getConsumerProps()); return props; } /** * Returns configuration properties for each Kafka broker in the cluster. * * @return Broker properties. */ public List<Properties> getBrokerProps() { List<Properties> props = new ArrayList<Properties>(brokers.size()); for (KafkaServer broker : brokers) { Properties prop = new Properties(); prop.putAll(broker.config().props()); props.add(prop); } return props; } /** * Creates a collection of Kafka Broker configurations based on the number of brokers and zookeeper. * @param brokers the number of brokers to create configuration for. * @param zookeeperPort the zookeeper port for the brokers to connect to. * @return configuration for a collection of brokers. * @throws IllegalArgumentException if {@code brokers} is less than 1 */ public static List<KafkaConfig> getBrokerConfig(int brokers, int zookeeperPort) { return getBrokerConfig(brokers, zookeeperPort, new Properties()); } /** * Creates a collection of Kafka Broker configurations based on the number of brokers and zookeeper. * @param brokers the number of brokers to create configuration for. * @param zookeeperPort the zookeeper port for the brokers to connect to. * @param baseProperties basic properties that should be applied for each broker config. These properties will be * honored in favor of any default properties. * @return configuration for a collection of brokers. * @throws IllegalArgumentException if {@code brokers} is less than 1 or {@code baseProperties} is {@code null}. */ public static List<KafkaConfig> getBrokerConfig(int brokers, int zookeeperPort, Properties baseProperties) { if (brokers < 1) { throw new IllegalArgumentException("Invalid broker count: " + brokers); } if (baseProperties == null) { throw new IllegalArgumentException("The 'baseProperties' cannot be 'null'."); } int ports[] = KafkaTestUtils.getPorts(brokers); List<KafkaConfig> configs = new ArrayList<KafkaConfig>(brokers); for (int i = 0; i < brokers; ++i) { Properties props = new Properties(); props.setProperty(KAFKA_ZOOKEEPERS, "localhost:" + zookeeperPort); props.setProperty("broker.id", String.valueOf(i + 1)); props.setProperty("host.name", "localhost"); props.setProperty("port", String.valueOf(ports[i])); props.setProperty("log.dir", KafkaTestUtils.getTempDir().getAbsolutePath()); props.setProperty("log.flush.interval.messages", String.valueOf(1)); props.setProperty("num.partitions", String.valueOf(PARTITIONS_PER_TOPIC)); props.setProperty("default.replication.factor", String.valueOf(brokers)); props.setProperty("auto.create.topics.enable", Boolean.FALSE.toString()); props.setProperty("offsets.topic.replication.factor", String.valueOf(brokers)); props.putAll(baseProperties); configs.add(new KafkaConfig(props)); } return configs; } /** * Returns location of Kafka client configuration file containing broker and zookeeper connection properties. * <p> * This file can be loaded using the {@code -conf} command option to easily achieve Kafka connectivity. * </p> * * @return Kafka client configuration file path */ public String getClientConfigPath() { return clientConfig.getAbsolutePath(); } private static KafkaServer startBroker(KafkaConfig config) { KafkaServer server = new KafkaServer(config, new SystemTime(), Option.<String>empty(), JavaConversions.asScalaBuffer(Collections.<KafkaMetricsReporter>emptyList())); server.startup(); return server; } private static class SystemTime implements Time { @Override public long milliseconds() { return System.currentTimeMillis(); } @Override public long hiResClockMs() { return System.currentTimeMillis(); } @Override public long nanoseconds() { return System.nanoTime(); } @Override public void sleep(long ms) { try { Thread.sleep(ms); } catch (InterruptedException e) { // Ignore } } } }
2,325
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/utils/ZookeeperTestHarness.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.utils; import kafka.utils.ZkUtils; import org.I0Itec.zkclient.ZkClient; import org.I0Itec.zkclient.ZkConnection; import java.io.IOException; /** * A test harness that brings up an embedded Zookeeper instance. * <p> * Adapted from the {@code kafka.zk.ZooKeeperTestHarness} class. * </p> */ public class ZookeeperTestHarness { /** * Zookeeper connection info. */ protected final String zookeeperConnect; private EmbeddedZookeeper zookeeper; private final int zkConnectionTimeout; private final int zkSessionTimeout; /** * Zookeeper client connection. */ protected ZkUtils zkUtils; /** * Creates a new Zookeeper broker test harness. */ public ZookeeperTestHarness() { this(KafkaTestUtils.getPorts(1)[0]); } /** * Creates a new Zookeeper service test harness using the given port. * * @param zookeeperPort The port number to use for Zookeeper client connections. */ public ZookeeperTestHarness(int zookeeperPort) { this.zookeeper = null; this.zkUtils = null; this.zkConnectionTimeout = 6000; this.zkSessionTimeout = 6000; this.zookeeperConnect = "localhost:" + zookeeperPort; } /** * Returns a client for communicating with the Zookeeper service. * * @return A Zookeeper client. * * @throws IllegalStateException * if Zookeeper has not yet been {@link #setUp()}, or has already been {@link #tearDown() torn down}. */ public ZkClient getZkClient() { if (zkUtils == null) { throw new IllegalStateException("Zookeeper service is not active"); } return zkUtils.zkClient(); } public ZkUtils getZkUtils() { return zkUtils; } /** * Startup Zookeeper. * * @throws IOException if an error occurs during Zookeeper initialization. */ public void setUp() throws IOException { zookeeper = new EmbeddedZookeeper(zookeeperConnect); ZkClient zkClient = new ZkClient(zookeeperConnect, zkSessionTimeout, zkConnectionTimeout, new ZkStringSerializer()); ZkConnection connection = new ZkConnection(zookeeperConnect, zkSessionTimeout); zkUtils = new ZkUtils(zkClient, connection, false); } /** * Shutdown Zookeeper. */ public void tearDown() throws IOException { if (zkUtils != null) { zkUtils.close(); zkUtils = null; } if (zookeeper != null) { zookeeper.shutdown(); zookeeper = null; } } }
2,326
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset/hdfs/OffsetsTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.io.output.ByteArrayOutputStream; import org.apache.kafka.common.requests.ListOffsetRequest; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import java.io.IOException; import java.util.Collections; import java.util.LinkedList; import java.util.List; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; public class OffsetsTest { @Rule public TestName testName = new TestName(); private static ObjectMapper mapper; @BeforeClass public static void setup() { mapper = new ObjectMapper(); } @Test(expected = IllegalArgumentException.class) public void buildOffsetNullOffsets() { Offsets.Builder.newBuilder().setOffsets(null); } @Test(expected = IllegalArgumentException.class) public void buildInvalidAsOfTime() { Offsets.Builder.newBuilder().setAsOfTime(-1); } @Test(expected = IllegalStateException.class) public void buildNoAsOfTime() { Offsets.Builder.newBuilder().build(); } @Test(expected = IllegalArgumentException.class) public void buildPartitionNullTopic() { Offsets.PartitionOffset.Builder.newBuilder().setTopic(null); } @Test(expected = IllegalArgumentException.class) public void buildPartitionEmptyTopic() { Offsets.PartitionOffset.Builder.newBuilder().setTopic(" "); } @Test(expected = IllegalArgumentException.class) public void buildPartitionInvalidPartition() { Offsets.PartitionOffset.Builder.newBuilder().setPartition(-1); } @Test(expected = IllegalStateException.class) public void buildPartitionNoTopicSet() { Offsets.PartitionOffset.Builder.newBuilder().setPartition(10).setOffset(10L).build(); } @Test(expected = IllegalStateException.class) public void buildPartitionNoPartitionSet() { Offsets.PartitionOffset.Builder.newBuilder().setTopic(testName.getMethodName()).setOffset(10L).build(); } @Test public void buildPartitionOffset() { Offsets.PartitionOffset partitionOffset = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset.getOffset(), is(10L)); assertThat(partitionOffset.getPartition(), is(1)); assertThat(partitionOffset.getTopic(), is(testName.getMethodName())); } @Test public void buildPartitionOffsetNoOffsetSet() { Offsets.PartitionOffset partitionOffset = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setPartition(1).build(); assertThat(partitionOffset.getOffset(), is(ListOffsetRequest.EARLIEST_TIMESTAMP)); assertThat(partitionOffset.getPartition(), is(1)); assertThat(partitionOffset.getTopic(), is(testName.getMethodName())); } @Test public void partitionOffsetSame() { Offsets.PartitionOffset partitionOffset = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset.equals(partitionOffset), is(true)); assertThat(partitionOffset.compareTo(partitionOffset), is(0)); } @Test public void partitionOffsetEqual() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(true)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(0)); } @Test public void partitionOffsetNotEqualDiffTopic() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic("abc").setOffset(10L).setPartition(1).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(lessThan(0))); } @Test public void partitionOffsetNotEqualDiffPartition() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(0).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(lessThan(0))); } @Test public void partitionOffsetNotEqualDiffOffset() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(9L).setPartition(1).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(lessThan(0))); } @Test public void partitionOffsetNotEqualDiffGreaterTopic() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic("abc").setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(greaterThan(0))); } @Test public void partitionOffsetNotEqualDiffGreaterPartition() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(2).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(greaterThan(0))); } @Test public void partitionOffsetNotEqualDiffGreaterOffset() { Offsets.PartitionOffset partitionOffset1 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(12L).setPartition(1).build(); Offsets.PartitionOffset partitionOffset2 = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(10L).setPartition(1).build(); assertThat(partitionOffset1.equals(partitionOffset2), is(false)); assertThat(partitionOffset1.compareTo(partitionOffset2), is(greaterThan(0))); } @Test public void jsonSerializationPartitionOffset() throws IOException { Offsets.PartitionOffset partitionOffset = Offsets.PartitionOffset.Builder.newBuilder() .setTopic(testName.getMethodName()).setOffset(12L).setPartition(1).build(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); mapper.writeValue(baos, partitionOffset); Offsets.PartitionOffset readOffset = mapper.readValue(baos.toByteArray(), Offsets.PartitionOffset.class); assertThat(readOffset, is(partitionOffset)); } @Test public void buildOffsetsNoOffsets() { Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.getOffsets(), is(Collections.<Offsets.PartitionOffset>emptyList())); } @Test public void buildOffsetsSortOffsets() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); List<Offsets.PartitionOffset> returnedOffsets = offsets.getOffsets(); int count = 1; //iterate in the expected order for (Offsets.PartitionOffset o : returnedOffsets) { assertThat(o.getTopic(), is("topic" + count)); assertThat(o.getPartition(), is(partition)); assertThat(o.getOffset(), is(offset)); count++; } assertThat(count, is(10)); } @Test public void offsetsSame() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets), is(true)); } @Test public void offsetsEqual() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); Offsets offsets2 = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets2), is(true)); } @Test public void offsetsDiffAsOfTime() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); Offsets offsets2 = Offsets.Builder.newBuilder().setAsOfTime(11).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets2), is(false)); } @Test public void offsetsDiffOffsets() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } List<Offsets.PartitionOffset> secondOffsets = new LinkedList<>(); for(int i = 0; i < 5; i++){ secondOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); Offsets offsets2 = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(secondOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets2), is(false)); } @Test(expected = IllegalStateException.class) public void offsetsDuplicates() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic9").setPartition(0).build()); Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); } @Test public void offsetsDiffListInstances() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ reversedOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } List<Offsets.PartitionOffset> secondOffsets = new LinkedList<>(); for(int i = 0; i < 9; i++){ secondOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + (9 - i)) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); Offsets offsets2 = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(secondOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets2), is(true)); } @Test public void offsetsEqualEmptyOffsets() { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> reversedOffsets = new LinkedList<>(); List<Offsets.PartitionOffset> secondOffsets = new LinkedList<>(); Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(reversedOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); Offsets offsets2 = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(secondOffsets).build(); assertThat(offsets.getAsOfTime(), is(10L)); assertThat(offsets.equals(offsets2), is(true)); } @Test public void jsonSerializationOffsets() throws IOException { int partition = 0; long offset = 10L; List<Offsets.PartitionOffset> partitionOffsets = new LinkedList<>(); for(int i = 0; i < 100; i++){ partitionOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setTopic("topic" + i) .setPartition(partition).setOffset(offset).build()); } Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10).setOffsets(partitionOffsets).build(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); mapper.writeValue(baos, offsets); Offsets readOffsets = mapper.readValue(baos.toByteArray(), Offsets.class); assertThat(readOffsets, is(offsets)); } @Test public void jsonSerializationOffsetsEmpty() throws IOException { int partition = 0; long offset = 10L; Offsets offsets = Offsets.Builder.newBuilder().setAsOfTime(10) .setOffsets(Collections.<Offsets.PartitionOffset>emptyList()).build(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); mapper.writeValue(baos, offsets); Offsets readOffsets = mapper.readValue(baos.toByteArray(), Offsets.class); assertThat(readOffsets, is(offsets)); } }
2,327
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset/hdfs/HDFSOffsetReaderTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import org.apache.crunch.kafka.offset.OffsetReader; import org.apache.crunch.kafka.offset.OffsetWriter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestName; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; public class HDFSOffsetReaderTest { @Rule public TemporaryFolder tempFolder = new TemporaryFolder(); @Rule public TestName testName = new TestName(); private Path basePath; private FileSystem fileSystem; private OffsetWriter writer; private OffsetReader reader; @Before public void setup() throws IOException { Configuration config = new Configuration(); config.set(FileSystem.DEFAULT_FS, tempFolder.newFolder().getAbsolutePath()); fileSystem = FileSystem.newInstance(config); basePath = new Path(tempFolder.newFolder().toString(), testName.getMethodName()); writer = new HDFSOffsetWriter(config, basePath); reader = new HDFSOffsetReader(config, basePath); } @After public void cleanup() throws IOException { writer.close(); reader.close(); fileSystem.close(); } @Test(expected = IllegalArgumentException.class) public void constructNullConfig() { new HDFSOffsetReader(null, new Path("/")); } @Test(expected = IllegalArgumentException.class) public void constructNullPath() { new HDFSOffsetReader(new Configuration(), null); } @Test public void getStoredOffsetPersistenceTimesNoValues() throws IOException { List<Long> storedOffsetPersistenceTimes = reader.getStoredOffsetPersistenceTimes(); assertThat(storedOffsetPersistenceTimes, is(Collections.<Long>emptyList())); } @Test public void getStoredOffsetPersistenceTimesMultipleValues() throws IOException { long current = 1464992662000L; List<Long> persistedTimes = new LinkedList<>(); for (int i = 0; i < 10; i++) { persistedTimes.add(current + (i * 18000)); } for (Long t : persistedTimes) { try { writer.write(t, Collections.<TopicPartition, Long>emptyMap()); } catch (IOException e) { e.printStackTrace(); } } List<Long> storedTimes = reader.getStoredOffsetPersistenceTimes(); assertThat(storedTimes, is(persistedTimes)); } @Test public void readOffsetNoMatchForTime() throws IOException { Map<TopicPartition, Long> offsets = reader.readOffsets(12345L); assertThat(offsets, is(nullValue())); } @Test public void readOffsetLatestNone() throws IOException { assertThat(reader.readLatestOffsets(), is(Collections.<TopicPartition, Long>emptyMap())); } @Test public void readOffsetLatest() throws IOException { long current = 1464992662000L; List<Long> persistedTimes = new LinkedList<>(); for (int i = 0; i < 10; i++) { persistedTimes.add(current + (i * 18000)); } for (Long t : persistedTimes) { try { writer.write(t, Collections.<TopicPartition, Long>emptyMap()); } catch (IOException e) { e.printStackTrace(); } } long expectedTime = persistedTimes.get(persistedTimes.size() - 1); Map<TopicPartition, Long> offsets = new HashMap<>(); for (int i = 0; i < 9; i++) { for (int j = 0; j < 5; j++) { offsets.put(new TopicPartition("topic" + i, j), (long) j); } } writer.write(expectedTime, offsets); Map<TopicPartition, Long> retrievedOffsets = reader.readLatestOffsets(); assertThat(retrievedOffsets, is(offsets)); } @Test public void readOffsetForTime() throws IOException { long current = 1464992662000L; List<Long> persistedTimes = new LinkedList<>(); for (int i = 0; i < 10; i++) { persistedTimes.add(current + (i * 18000)); } for (Long t : persistedTimes) { try { writer.write(t, Collections.<TopicPartition, Long>emptyMap()); } catch (IOException e) { e.printStackTrace(); } } long expectedTime = persistedTimes.get(2); Map<TopicPartition, Long> offsets = new HashMap<>(); for (int i = 0; i < 9; i++) { for (int j = 0; j < 5; j++) { offsets.put(new TopicPartition("topic" + i, j), (long) j); } } writer.write(expectedTime, offsets); Map<TopicPartition, Long> retrievedOffsets = reader.readOffsets(expectedTime); assertThat(retrievedOffsets, is(offsets)); } @Test public void skipReadingDirectory() throws IOException { long current = 1464992662000L; List<Long> persistedTimes = new LinkedList<>(); for (int i = 0; i < 10; i++) { persistedTimes.add(current + (i * 18000)); } for (Long t : persistedTimes) { try { writer.write(t, Collections.<TopicPartition, Long>emptyMap()); } catch (IOException e) { e.printStackTrace(); } } fileSystem.mkdirs(new Path(basePath, "imadirectory")); List<Long> storedTimes = reader.getStoredOffsetPersistenceTimes(); assertThat(storedTimes, is(persistedTimes)); } @Test public void skipInvalidFile() throws IOException { long current = 1464992662000L; List<Long> persistedTimes = new LinkedList<>(); for (int i = 0; i < 10; i++) { persistedTimes.add(current + (i * 18000)); } for (Long t : persistedTimes) { try { writer.write(t, Collections.<TopicPartition, Long>emptyMap()); } catch (IOException e) { e.printStackTrace(); } } fileSystem.createNewFile(new Path(basePath, "imabadfile.json")); fileSystem.createNewFile(new Path(basePath, "imabadfile.txt")); List<Long> storedTimes = reader.getStoredOffsetPersistenceTimes(); assertThat(storedTimes, is(persistedTimes)); } }
2,328
0
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/test/java/org/apache/crunch/kafka/offset/hdfs/HDFSOffsetWriterTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestName; import java.io.IOException; import java.io.InputStream; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import static junit.framework.TestCase.assertTrue; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; public class HDFSOffsetWriterTest { private static final ObjectMapper MAPPER = new ObjectMapper(); @Rule public TemporaryFolder tempFolder = new TemporaryFolder(); @Rule public TestName testName = new TestName(); private Configuration config; private Path basePath; private FileSystem fileSystem; private HDFSOffsetWriter writer; @Before public void setup() throws IOException { config = new Configuration(); config.set(FileSystem.DEFAULT_FS, tempFolder.newFolder().getAbsolutePath()); fileSystem = FileSystem.newInstance(config); basePath = new Path(tempFolder.newFolder().toString(), testName.getMethodName()); writer = new HDFSOffsetWriter(config, basePath); } @After public void cleanup() throws IOException { writer.close(); fileSystem.close(); } @Test(expected = IllegalArgumentException.class) public void constructNullConfig() { new HDFSOffsetWriter(null, new Path("/")); } @Test(expected = IllegalArgumentException.class) public void constructNullPath() { new HDFSOffsetWriter(new Configuration(), null); } @Test(expected = IllegalArgumentException.class) public void writeNullOffsets() throws IOException { writer.write(10L, null); } @Test(expected = IllegalArgumentException.class) public void writeNullInvalidAsOfTime() throws IOException { writer.write(-1L, Collections.<TopicPartition, Long>emptyMap()); } @Test public void writeEmptyOffsets() throws IOException { long persistTime = System.currentTimeMillis(); Map<TopicPartition, Long> offsets = Collections.emptyMap(); writer.write(persistTime, offsets); Path expectedPath = HDFSOffsetWriter.getPersistedTimeStoragePath(basePath, persistTime); try (InputStream in = fileSystem.open(expectedPath)) { Offsets persistedOffsets = MAPPER.readValue(in, Offsets.class); assertThat(persistedOffsets.getAsOfTime(), is(persistTime)); assertThat(persistedOffsets.getOffsets(), is(Collections.<Offsets.PartitionOffset>emptyList())); } } @Test public void writeOffsets() throws IOException { long persistTime = System.currentTimeMillis(); Map<TopicPartition, Long> offsets = new HashMap<>(); for (int i = 0; i < 9; i++) { for (int j = 0; j < 5; j++) { offsets.put(new TopicPartition("topic" + i, j), (long) j); } } writer.write(persistTime, offsets); Path expectedPath = HDFSOffsetWriter.getPersistedTimeStoragePath(basePath, persistTime); try (InputStream in = fileSystem.open(expectedPath)) { Offsets persistedOffsets = MAPPER.readValue(in, Offsets.class); assertThat(persistedOffsets.getAsOfTime(), is(persistTime)); assertThat(persistedOffsets.getOffsets().size(), is(offsets.size())); Iterator<Offsets.PartitionOffset> partitionOffsets = persistedOffsets.getOffsets().iterator(); for (int i = 0; i < 9; i++) { for (int j = 0; j < 5; j++) { assertTrue(partitionOffsets.hasNext()); Offsets.PartitionOffset partitionOffset = partitionOffsets.next(); assertThat(partitionOffset.getPartition(), is(j)); assertThat(partitionOffset.getOffset(), is((long) j)); assertThat(partitionOffset.getTopic(), is("topic" + i)); } } } } @Test(expected = IllegalArgumentException.class) public void getPersistedStoragePathNullBase() { HDFSOffsetWriter.getPersistedTimeStoragePath(null, 10L); } @Test public void getPersistedStoragePath() { //Timestamp of 02 Jun 2016 20:12:17 GMT //2016-06-02T20:12:17Z long timestamp = 1464898337000L; String expectedFileName = HDFSOffsetWriter.FILE_FORMATTER.print(timestamp) + HDFSOffsetWriter.FILE_FORMAT_EXTENSION; Path filePath = HDFSOffsetWriter.getPersistedTimeStoragePath(basePath, timestamp); assertThat(filePath, is(new Path(basePath, expectedFileName))); } @Test public void timeToFileName() { //Timestamp of 02 Jun 2016 20:12:17 GMT //2016-06-02T20:12:17Z long timestamp = 1464898337000L; String expectedFileName = "2016-06-02T20-12-17+0000" + HDFSOffsetWriter.FILE_FORMAT_EXTENSION; assertThat(HDFSOffsetWriter.persistenceTimeToFileName(timestamp), is(expectedFileName)); } @Test public void fileNameToTime() { //Timestamp of 02 Jun 2016 20:12:17 GMT //2016-06-02T20:12:17Z long timestamp = 1464898337000L; String expectedFileName = "2016-06-02T20-12-17+0000" + HDFSOffsetWriter.FILE_FORMAT_EXTENSION; assertThat(HDFSOffsetWriter.fileNameToPersistenceTime(expectedFileName), is(timestamp)); } @Test(expected = IllegalArgumentException.class) public void fileNameToTimeNullFileName() { HDFSOffsetWriter.fileNameToPersistenceTime(null); } @Test(expected = IllegalArgumentException.class) public void fileNameToTimeEmptyFileName() { HDFSOffsetWriter.fileNameToPersistenceTime(""); } @Test(expected = IllegalArgumentException.class) public void fileNameToTimeInvalidFileName() { HDFSOffsetWriter.fileNameToPersistenceTime("2016-06-02T20:12:17.000Z.json"); } }
2,329
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/KafkaUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka; import com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.stream.Collectors; /** * Simple utilities for retrieving offset and Kafka information to assist in setting up and configuring a * {@link org.apache.crunch.kafka.record.KafkaSource} instance. */ public class KafkaUtils { /** * Configuration property for the number of retry attempts that will be made to Kafka. */ public static final String KAFKA_RETRY_ATTEMPTS_KEY = "org.apache.crunch.kafka.retry.attempts"; /** * Default number of retry attempts. */ public static final int KAFKA_RETRY_ATTEMPTS_DEFAULT = 120; public static final String KAFKA_RETRY_ATTEMPTS_DEFAULT_STRING = Integer.toString(KAFKA_RETRY_ATTEMPTS_DEFAULT); /** * Converts the provided {@code config} into a {@link Properties} object to connect with Kafka. * * @param config the config to read properties * @return a properties instance populated with all of the values inside the provided {@code config}. */ public static Properties getKafkaConnectionProperties(Configuration config) { Properties props = new Properties(); for (Map.Entry<String, String> value : config) { props.setProperty(value.getKey(), value.getValue()); } return props; } /** * Adds the {@code properties} to the provided {@code config} instance. * * @param properties the properties to add to the config. * @param config the configuration instance to be modified. * @return the config instance with the populated properties */ public static Configuration addKafkaConnectionProperties(Properties properties, Configuration config) { for (String name : properties.stringPropertyNames()) { config.set(name, properties.getProperty(name)); } return config; } /** * Returns the {@link TopicPartition}s in a topic, returns an empty list if the topic does not exist. */ public static List<TopicPartition> getTopicPartitions(Consumer<?, ?> kafkaConsumer, String topic) { List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(topic); // This conversion of null to empty list is consistent with https://issues.apache.org/jira/browse/KAFKA-2358 if (partitionInfos == null) { return ImmutableList.of(); } else { return partitionInfos.stream() .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())) .collect(Collectors.toList()); } } }
2,330
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaRecordsIterable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.Pair; import org.apache.crunch.kafka.KafkaUtils; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.RetriableException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.Map; import java.util.NoSuchElementException; import java.util.Properties; import java.util.Set; class KafkaRecordsIterable<K, V> implements Iterable<ConsumerRecord<K, V>> { /** * Logger */ private static final Logger LOG = LoggerFactory.getLogger(KafkaRecordsIterable.class); /** * The Kafka consumer responsible for retrieving messages. */ private final Consumer<K, V> consumer; /** * The starting positions of the iterable for the topic. */ private final Map<TopicPartition, Pair<Long, Long>> offsets; /** * Tracks if the iterable is empty. */ private final boolean isEmpty; /** * The poll time between each request to Kafka */ private final long scanPollTime; private final int maxRetryAttempts; /** * Creates the iterable that will pull values for a collection of topics using the provided {@code consumer} between * the {@code startOffsets} and {@code stopOffsets}. * * @param consumer The consumer for pulling the data from Kafka. The consumer will be closed automatically once all * of the records have been consumed. * @param offsets offsets for pulling data * @param properties properties for tweaking the behavior of the iterable. * @throws IllegalArgumentException if any of the arguments are {@code null} or empty. */ KafkaRecordsIterable(Consumer<K, V> consumer, Map<TopicPartition, Pair<Long, Long>> offsets, Properties properties) { if (consumer == null) { throw new IllegalArgumentException("The 'consumer' cannot be 'null'."); } this.consumer = consumer; if (properties == null) { throw new IllegalArgumentException("The 'properties' cannot be 'null'."); } String retryString = properties .getProperty(KafkaUtils.KAFKA_RETRY_ATTEMPTS_KEY, KafkaUtils.KAFKA_RETRY_ATTEMPTS_DEFAULT_STRING); maxRetryAttempts = Integer.parseInt(retryString); if (offsets == null || offsets.isEmpty()) { throw new IllegalArgumentException("The 'offsets' cannot 'null' or empty."); } //filter out any topics and partitions that do not have offset ranges that will produce data. Map<TopicPartition, Pair<Long, Long>> filteredOffsets = new HashMap<>(); for (Map.Entry<TopicPartition, Pair<Long, Long>> entry : offsets.entrySet()) { Pair<Long, Long> value = entry.getValue(); //if start is less than one less than stop then there is data to be had if (value.first() < value.second()) { filteredOffsets.put(entry.getKey(), value); } else { LOG.debug("Removing offsets for {} because start is not less than the end offset.", entry.getKey()); } } //check to make sure that based on the offsets there is data to retrieve, otherwise false. //there will be data if the start offsets are less than stop offsets isEmpty = filteredOffsets.isEmpty(); if (isEmpty) { LOG.warn("Iterable for Kafka for is empty because offsets are empty."); } //assign this this.offsets = filteredOffsets; scanPollTime = Long.parseLong( properties.getProperty(KafkaSource.CONSUMER_POLL_TIMEOUT_KEY, Long.toString(KafkaSource.CONSUMER_POLL_TIMEOUT_DEFAULT))); } @Override public Iterator<ConsumerRecord<K, V>> iterator() { if (isEmpty) { LOG.debug("Returning empty iterator since offsets align."); return Collections.emptyIterator(); } //Assign consumer to all of the partitions LOG.debug("Assigning topics and partitions and seeking to start offsets."); consumer.assign(new LinkedList<>(offsets.keySet())); //hack so maybe look at removing this consumer.poll(0); for (Map.Entry<TopicPartition, Pair<Long, Long>> entry : offsets.entrySet()) { consumer.seek(entry.getKey(), entry.getValue().first()); } return new RecordsIterator<K, V>(consumer, offsets, scanPollTime, maxRetryAttempts); } private static class RecordsIterator<K, V> implements Iterator<ConsumerRecord<K, V>> { private final Consumer<K, V> consumer; private final Map<TopicPartition, Pair<Long, Long>> offsets; private final long pollTime; private final int maxNumAttempts; private ConsumerRecords<K, V> records; private Iterator<ConsumerRecord<K, V>> currentIterator; private final Set<TopicPartition> remainingPartitions; private ConsumerRecord<K, V> next; RecordsIterator(Consumer<K, V> consumer, Map<TopicPartition, Pair<Long, Long>> offsets, long pollTime, int maxNumRetries) { this.consumer = consumer; remainingPartitions = new HashSet<>(offsets.keySet()); this.offsets = offsets; this.pollTime = pollTime; this.maxNumAttempts = maxNumRetries; } @Override public boolean hasNext() { if (next != null) return true; //if partitions to consume then pull next value if (remainingPartitions.size() > 0) { next = getNext(); } return next != null; } @Override public ConsumerRecord<K, V> next() { if (next == null) { next = getNext(); } if (next != null) { ConsumerRecord<K, V> returnedNext = next; //prime for next call next = getNext(); //return the current next return returnedNext; } else { throw new NoSuchElementException("No more elements."); } } @Override public void remove() { throw new UnsupportedOperationException("remove is not supported."); } /** * Gets the current iterator. * * @return the current iterator or {@code null} if there are no more values to consume. */ private Iterator<ConsumerRecord<K, V>> getIterator() { if (!remainingPartitions.isEmpty()) { if (currentIterator != null && currentIterator.hasNext()) { return currentIterator; } LOG.debug("Retrieving next set of records."); int numTries = 0; boolean notSuccess = false; while (!notSuccess && numTries < maxNumAttempts) { try { records = consumer.poll(pollTime); notSuccess = true; } catch (RetriableException re) { numTries++; if (numTries < maxNumAttempts) { LOG.warn("Error pulling messages from Kafka. Retrying with attempt {}", numTries, re); } else { LOG.error("Error pulling messages from Kafka. Exceeded maximum number of attempts {}", maxNumAttempts, re); throw re; } } } if (records == null || records.isEmpty()) { LOG.debug("Retrieved empty records."); currentIterator = null; return null; } currentIterator = records.iterator(); return currentIterator; } LOG.debug("No more partitions to consume therefore not retrieving any more records."); return null; } /** * Internal method for retrieving the next value to retrieve. * * @return {@code null} if there are no more values to retrieve otherwise the next event. */ private ConsumerRecord<K, V> getNext() { while (!remainingPartitions.isEmpty()) { Iterator<ConsumerRecord<K, V>> iterator = getIterator(); while (iterator != null && iterator.hasNext()) { ConsumerRecord<K, V> record = iterator.next(); TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition()); long offset = record.offset(); if (withinRange(topicPartition, offset)) { LOG.debug("Retrieving value for {} with offset {}.", topicPartition, offset); return record; } LOG.debug("Value for {} with offset {} is outside of range skipping.", topicPartition, offset); } } LOG.debug("Closing the consumer because there are no more remaining partitions."); consumer.close(); LOG.debug("Consumed data from all partitions."); return null; } /** * Checks whether the value for {@code topicPartition} with an {@code offset} is within scan range. If * the value is not then {@code false} is returned otherwise {@code true}. * * @param topicPartion The partition for the offset * @param offset the offset in the partition * @return {@code true} if the value is within the expected consumption range, otherwise {@code false}. */ private boolean withinRange(TopicPartition topicPartion, long offset) { long endOffset = offsets.get(topicPartion).second(); //end offsets are one higher than the last written value. boolean emit = offset < endOffset; if (offset >= endOffset - 1) { if (LOG.isDebugEnabled()) { LOG.debug("Completed consuming partition {} with offset {} and ending offset {}.", new Object[] { topicPartion, offset, endOffset }); } remainingPartitions.remove(topicPartion); consumer.pause(Arrays.asList(topicPartion)); } LOG.debug("Value for partition {} and offset {} is within range.", topicPartion, offset); return emit; } } }
2,331
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaSource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.Pair; import org.apache.crunch.ReadableData; import org.apache.crunch.Source; import org.apache.crunch.impl.mr.run.CrunchMapper; import org.apache.crunch.io.CrunchInputs; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.ReadableSource; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.mapreduce.Job; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.Deserializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; /** * A Crunch Source that will retrieve events from Kafka given start and end offsets. The source is not designed to * process unbounded data but instead to retrieve data between a specified range. * <p> * <p> * The values retrieved from Kafka are returned as {@link ConsumerRecord} with key and value as raw bytes. If callers need specific * parsing logic based on the topic then consumers are encouraged to use multiple Kafka Sources for each topic and use special * {@link org.apache.crunch.DoFn} to parse the payload. */ public class KafkaSource implements Source<ConsumerRecord<BytesWritable, BytesWritable>>, ReadableSource<ConsumerRecord<BytesWritable, BytesWritable>> { private static final Logger LOG = LoggerFactory.getLogger(KafkaSource.class); private final FormatBundle inputBundle; private final Properties props; private final Map<TopicPartition, Pair<Long, Long>> offsets; /** * Constant to indicate how long the reader waits before timing out when retrieving data from Kafka. */ public static final String CONSUMER_POLL_TIMEOUT_KEY = "org.apache.crunch.kafka.consumer.poll.timeout"; /** * Default timeout value for {@link #CONSUMER_POLL_TIMEOUT_KEY} of 1 second. */ public static final long CONSUMER_POLL_TIMEOUT_DEFAULT = 1000L; /** * Constructs a Kafka source that will read data from the Kafka cluster identified by the {@code kafkaConnectionProperties} * and from the specific topics and partitions identified in the {@code offsets} * * @param kafkaConnectionProperties The connection properties for reading from Kafka. These properties will be honored * with the exception of the {@link ConsumerConfig#KEY_DESERIALIZER_CLASS_CONFIG} and * {@link ConsumerConfig#VALUE_DESERIALIZER_CLASS_CONFIG} * @param offsets A map of {@link TopicPartition} to a pair of start and end offsets respectively. The start * and end offsets are evaluated at [start, end) where the ending offset is excluded. Each * TopicPartition must have a non-null pair describing its offsets. The start offset should be * less than the end offset. If the values are equal or start is greater than the end then * that partition will be skipped. */ public KafkaSource(Properties kafkaConnectionProperties, Map<TopicPartition, Pair<Long, Long>> offsets) { this.props = copyAndSetProperties(kafkaConnectionProperties); inputBundle = createFormatBundle(props, offsets); this.offsets = Collections.unmodifiableMap(new HashMap<>(offsets)); } @Override public Source<ConsumerRecord<BytesWritable, BytesWritable>> inputConf(String key, String value) { inputBundle.set(key, value); return this; } @Override public Source<ConsumerRecord<BytesWritable, BytesWritable>> fileSystem(FileSystem fileSystem) { // not currently applicable/supported for Kafka return this; } @Override public FileSystem getFileSystem() { // not currently applicable/supported for Kafka return null; } @Override public PType<ConsumerRecord<BytesWritable, BytesWritable>> getType() { return ConsumerRecordHelper.CONSUMER_RECORD_P_TYPE; } @Override public Converter<?, ?, ?, ?> getConverter() { return new KafkaSourceConverter(); } @Override public long getSize(Configuration configuration) { // TODO something smarter here. return 1000L * 1000L * 1000L; } @Override public String toString() { return "KafkaSource(" + props.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG) + ")"; } @Override public long getLastModifiedAt(Configuration configuration) { LOG.warn("Cannot determine last modified time for source: {}", toString()); return -1; } private static <K, V> FormatBundle createFormatBundle(Properties kafkaConnectionProperties, Map<TopicPartition, Pair<Long, Long>> offsets) { FormatBundle<KafkaInputFormat> bundle = FormatBundle.forInput(KafkaInputFormat.class); KafkaInputFormat.writeOffsetsToBundle(offsets, bundle); KafkaInputFormat.writeConnectionPropertiesToBundle(kafkaConnectionProperties, bundle); return bundle; } private static Properties copyAndSetProperties(Properties kafkaConnectionProperties) { Properties props = new Properties(); //set the default to be earliest for auto reset but allow it to be overridden if appropriate. props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.putAll(kafkaConnectionProperties); //Setting the key/value deserializer to ensure proper translation from Kafka to PType format. props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName()); props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName()); //disable automatic committing of consumer offsets props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.toString(false)); return props; } @Override public Iterable<ConsumerRecord<BytesWritable, BytesWritable>> read(Configuration conf) throws IOException { // consumer will get closed when the iterable is fully consumed. // skip using the inputformat/splits since this will be read in a single JVM and don't need the complexity // of parallelism when reading. Consumer<BytesWritable, BytesWritable> consumer = new KafkaConsumer<>(props); return new KafkaRecordsIterable<>(consumer, offsets, props); } @Override @SuppressWarnings("unchecked") public void configureSource(Job job, int inputId) throws IOException { Configuration conf = job.getConfiguration(); //an id of -1 indicates that this is the only input so just use it directly if (inputId == -1) { job.setMapperClass(CrunchMapper.class); job.setInputFormatClass(inputBundle.getFormatClass()); inputBundle.configure(conf); } else { //there are multiple inputs for this mapper so add it as a CrunchInputs and need a fake path just to //make it play well with other file based inputs. Path dummy = new Path("/kafka/" + inputId); CrunchInputs.addInputPath(job, dummy, inputBundle, inputId); } } //exposed for testing purposes FormatBundle getInputBundle() { return inputBundle; } @Override public ReadableData<ConsumerRecord<BytesWritable, BytesWritable>> asReadable() { // skip using the inputformat/splits since this will be read in a single JVM and don't need the complexity // of parallelism when reading. return new KafkaData<>(props, offsets); } /** * Basic {@link Deserializer} which simply wraps the payload as a {@link BytesWritable}. */ public static class BytesDeserializer implements Deserializer<BytesWritable> { @Override public void configure(Map<String, ?> configProperties, boolean isKey) { //no-op } @Override public BytesWritable deserialize(String topic, byte[] valueBytes) { return new BytesWritable(valueBytes); } @Override public void close() { //no-op } } }
2,332
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaInputSplit.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.kafka.common.TopicPartition; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** * InputSplit that represent retrieving data from a single {@link TopicPartition} between the specified start * and end offsets. */ public class KafkaInputSplit extends InputSplit implements Writable { private long startingOffset; private long endingOffset; private TopicPartition topicPartition; /** * Nullary Constructor for creating the instance inside the Mapper instance. */ public KafkaInputSplit() { } /** * Constructs an input split for the provided {@code topic} and {@code partition} restricting data to be between * the {@code startingOffset} and {@code endingOffset} * * @param topic the topic for the split * @param partition the partition for the topic * @param startingOffset the start of the split * @param endingOffset the end of the split */ public KafkaInputSplit(String topic, int partition, long startingOffset, long endingOffset) { this.startingOffset = startingOffset; this.endingOffset = endingOffset; topicPartition = new TopicPartition(topic, partition); } @Override public long getLength() throws IOException, InterruptedException { // This is just used as a hint for size of bytes so it is already inaccurate. return startingOffset > 0 ? endingOffset - startingOffset : endingOffset; } @Override public String[] getLocations() throws IOException, InterruptedException { //Leave empty since data locality not really an issue. return new String[0]; } /** * Returns the topic and partition for the split * * @return the topic and partition for the split */ public TopicPartition getTopicPartition() { return topicPartition; } /** * Returns the starting offset for the split * * @return the starting offset for the split */ public long getStartingOffset() { return startingOffset; } /** * Returns the ending offset for the split * * @return the ending offset for the split */ public long getEndingOffset() { return endingOffset; } @Override public void write(DataOutput dataOutput) throws IOException { dataOutput.writeUTF(topicPartition.topic()); dataOutput.writeInt(topicPartition.partition()); dataOutput.writeLong(startingOffset); dataOutput.writeLong(endingOffset); } @Override public void readFields(DataInput dataInput) throws IOException { String topic = dataInput.readUTF(); int partition = dataInput.readInt(); startingOffset = dataInput.readLong(); endingOffset = dataInput.readLong(); topicPartition = new TopicPartition(topic, partition); } @Override public String toString() { return getTopicPartition() + " Start: " + startingOffset + " End: " + endingOffset; } }
2,333
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaRecordReader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.CrunchRuntimeException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.RetriableException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.Properties; import static org.apache.crunch.kafka.KafkaUtils.KAFKA_RETRY_ATTEMPTS_DEFAULT; import static org.apache.crunch.kafka.KafkaUtils.KAFKA_RETRY_ATTEMPTS_KEY; import static org.apache.crunch.kafka.KafkaUtils.getKafkaConnectionProperties; import static org.apache.crunch.kafka.record.KafkaInputFormat.filterConnectionProperties; import static org.apache.crunch.kafka.record.KafkaSource.CONSUMER_POLL_TIMEOUT_DEFAULT; import static org.apache.crunch.kafka.record.KafkaSource.CONSUMER_POLL_TIMEOUT_KEY; /** * A {@link RecordReader} for pulling data from Kafka. * * @param <K> the key of the records from Kafka * @param <V> the value of the records from Kafka */ public class KafkaRecordReader<K, V> extends RecordReader<ConsumerRecord<K, V>, Void> { private static final Logger LOG = LoggerFactory.getLogger(KafkaRecordReader.class); private Consumer<K, V> consumer; private ConsumerRecord<K, V> record; private long endingOffset; private Iterator<ConsumerRecord<K, V>> recordIterator; private long consumerPollTimeout; private long maxNumberOfRecords; private long startingOffset; private long currentOffset; private int maxNumberAttempts; private Properties kafkaConnectionProperties; private TopicPartition topicPartition; @Override public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { if (!(inputSplit instanceof KafkaInputSplit)) { throw new CrunchRuntimeException("InputSplit for RecordReader is not valid split type."); } kafkaConnectionProperties = filterConnectionProperties(getKafkaConnectionProperties(taskAttemptContext.getConfiguration())); consumer = buildConsumer(kafkaConnectionProperties); KafkaInputSplit split = (KafkaInputSplit) inputSplit; topicPartition = split.getTopicPartition(); consumer.assign(Collections.singletonList(topicPartition)); //suggested hack to gather info without gathering data consumer.poll(0); //now seek to the desired start location startingOffset = split.getStartingOffset(); consumer.seek(topicPartition, startingOffset); currentOffset = startingOffset - 1; endingOffset = split.getEndingOffset(); maxNumberOfRecords = endingOffset - startingOffset; if (LOG.isInfoEnabled()) { LOG.info("Reading data from {} between {} and {}", new Object[] { topicPartition, startingOffset, endingOffset }); } Configuration config = taskAttemptContext.getConfiguration(); consumerPollTimeout = config.getLong(CONSUMER_POLL_TIMEOUT_KEY, CONSUMER_POLL_TIMEOUT_DEFAULT); maxNumberAttempts = config.getInt(KAFKA_RETRY_ATTEMPTS_KEY, KAFKA_RETRY_ATTEMPTS_DEFAULT); } /** * Builds a new kafka consumer * * @param properties * the properties to configure the consumer * @return a new kafka consumer */ // Visible for testing protected KafkaConsumer<K, V> buildConsumer(Properties properties) { return new KafkaConsumer<>(properties); } /** * @return the current offset for the reader */ // Visible for testing protected long getCurrentOffset() { return currentOffset; } @Override public boolean nextKeyValue() throws IOException, InterruptedException { if (hasPendingData()) { loadRecords(); record = recordIterator.hasNext() ? recordIterator.next() : null; if (record != null) { LOG.debug("nextKeyValue: Retrieved record with offset {}", record.offset()); long oldOffset = currentOffset; currentOffset = record.offset(); LOG.debug("Current offset will be updated to be [{}]", currentOffset); if (LOG.isWarnEnabled() && (currentOffset - oldOffset > 1)) { // The most likely scenario here is our start offset was deleted and an offset reset took place by the consumer LOG.warn("Possible data loss in partition {} as offset {} was larger than expected {}", new Object[] { topicPartition, currentOffset, oldOffset + 1}); } if (currentOffset >= endingOffset) { // We had pending data but read a record beyond our end offset so don't include it and stop reading if (LOG.isWarnEnabled()) LOG.warn("Record offset {} is beyond our end offset {}. This could indicate data loss in partition {}", new Object[] { currentOffset, endingOffset, topicPartition}); record = null; return false; } return true; } else if (isPartitionEmpty()) { // If the partition is empty but we are expecting to read data we would read no records and // see no errors so handle that here if (LOG.isWarnEnabled()) LOG.warn("The partition {} is empty though we expected to read from {} to {}. This could indicate data loss", new Object[] {topicPartition, currentOffset, endingOffset}); } else { // We have pending data but we are unable to fetch any records so throw an exception and stop the job throw new IOException("Unable to read additional data from Kafka. See logs for details. Partition " + topicPartition + " Current Offset: " + currentOffset + " End Offset: " + endingOffset); } } record = null; return false; } private boolean isPartitionEmpty() { // We don't need to fetch the ending offset as well since that is looked up when running the job. If the end offset had // changed while running we would have read that record instead of reading no records and calling this method return getEarliestOffset() == endingOffset; } @Override public ConsumerRecord<K, V> getCurrentKey() throws IOException, InterruptedException { return record; } @Override public Void getCurrentValue() throws IOException, InterruptedException { return null; } @Override public float getProgress() throws IOException, InterruptedException { //not most accurate but gives reasonable estimate return ((float) (currentOffset - startingOffset + 1)) / maxNumberOfRecords; } private boolean hasPendingData() { //offset range is exclusive at the end which means the ending offset is one higher // than the actual physical last offset return currentOffset < endingOffset - 1; } /** * @return the record iterator used by the reader */ // Visble for testing protected Iterator<ConsumerRecord<K, V>> getRecordIterator() { return recordIterator; } /** * Loads new records into the record iterator */ // Visible for testing protected void loadRecords() { if ((recordIterator == null) || !recordIterator.hasNext()) { ConsumerRecords<K, V> records = null; int numTries = 0; boolean success = false; while(!success && (numTries < maxNumberAttempts)) { numTries++; try { records = getConsumer().poll(consumerPollTimeout); } catch (RetriableException re) { LOG.warn("Error pulling messages from Kafka", re); } // There was no error but we don't have any records. This could indicate a slowness or failure in Kafka's internal // client or that the partition has no more data if (records != null && records.isEmpty()){ if (LOG.isWarnEnabled()) LOG.warn("No records retrieved from partition {} with poll timeout {} but pending offsets to consume. Current " + "Offset: {}, End Offset: {}", new Object[] { topicPartition, consumerPollTimeout, currentOffset, endingOffset }); } else if (records != null && !records.isEmpty()){ success = true; } if (!success && numTries < maxNumberAttempts) { LOG.info("Record fetch attempt {} / {} failed, retrying", numTries, maxNumberAttempts); } else if (!success) { if (LOG.isWarnEnabled()) LOG.warn("Record fetch attempt {} / {} failed. No more attempts left for partition {}", new Object[] { numTries, maxNumberAttempts, topicPartition }); } } if ((records == null) || records.isEmpty()){ LOG.info("No records retrieved from Kafka partition {} therefore nothing to iterate over", topicPartition); } else{ LOG.info("Retrieved {} records from Kafka partition {} to iterate over starting from offset {}", new Object[] { records.count(), topicPartition, records.iterator().next().offset()}); } recordIterator = records != null ? records.iterator() : ConsumerRecords.<K, V>empty().iterator(); } } /** * @return the consumer */ protected Consumer<K, V> getConsumer() { return consumer; } /** * @return the earliest offset of the topic partition */ protected long getEarliestOffset() { Map<TopicPartition, Long> brokerOffsets = consumer.beginningOffsets( Collections.singletonList(topicPartition)); Long offset = brokerOffsets.get(topicPartition); if(offset == null){ LOG.debug("Unable to determine earliest offset for {} so returning 0", topicPartition); return 0L; } LOG.debug("Earliest offset for {} is {}", topicPartition, offset); return offset; } @Override public void close() throws IOException { LOG.debug("Closing the record reader."); if (consumer != null) { consumer.close(); } } }
2,334
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaInputFormat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.commons.lang.StringUtils; import org.apache.crunch.Pair; import org.apache.crunch.io.FormatBundle; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.TopicPartition; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.regex.Pattern; /** * Basic input format for reading data from Kafka. Data is read and provided as {@link ConsumerRecord} with the data left in its * raw byte form. * <p> * Populating the configuration of the input format is handled with the convenience method of * {@link #writeOffsetsToConfiguration(Map, Configuration)}. This should be done to ensure * the Kafka offset information is available when the input format {@link #getSplits(JobContext) creates its splits} * and {@link #createRecordReader(InputSplit, TaskAttemptContext) readers}. * <p> * To allow for suppression of warnings referring to unknown configs in the Kafka {@code ConsumerConfig}, properties containing * Kafka connection information will be prefixed using {@link #generateConnectionPropertyKey(String) generateConnectionPropertyKey} * and will be written to the {@code FormatBundle} using * {@link #writeConnectionPropertiesToBundle(Properties, FormatBundle) writeConnectionPropertiesToBundle}. These properties can then * be retrieved using {@link #filterConnectionProperties(Properties) filterConnectionProperties}. */ public class KafkaInputFormat extends InputFormat<ConsumerRecord<BytesWritable, BytesWritable>, Void> implements Configurable { /** * Constant for constructing configuration keys for the input format. */ private static final String KAFKA_INPUT_OFFSETS_BASE = "org.apache.crunch.kafka.offsets.topic"; /** * Constant used for building configuration keys and specifying partitions. */ private static final String PARTITIONS = "partitions"; /** * Constant used for building configuration keys and specifying the start of a partition. */ private static final String START = "start"; /** * Constant used for building configuration keys and specifying the end of a partition. */ private static final String END = "end"; /** * Regex to discover all of the defined partitions which should be consumed by the input format. */ private static final String TOPIC_KEY_REGEX = Pattern.quote(KAFKA_INPUT_OFFSETS_BASE) + "\\..*\\." + PARTITIONS + "$"; /** * Constant for constructing configuration keys for the Kafka connection properties. */ private static final String KAFKA_CONNECTION_PROPERTY_BASE = "org.apache.crunch.kafka.connection.properties"; /** * Configuration property for the maximum number of records per input split. Partitions with more qualifying records than this * limit will be divided into multiple splits. */ public static final String KAFKA_MAX_RECORDS_PER_SPLIT = "org.apache.crunch.kafka.split.max"; /** * Default value for {@link #KAFKA_MAX_RECORDS_PER_SPLIT} */ public static final long DEFAULT_KAFKA_MAX_RECORDS_PER_SPLIT = 5000000L; /** * Regex to discover all of the defined Kafka connection properties which should be passed to the ConsumerConfig. */ private static final Pattern CONNECTION_PROPERTY_REGEX = Pattern .compile(Pattern.quote(KAFKA_CONNECTION_PROPERTY_BASE) + "\\..*$"); private Configuration configuration; @Override public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException { Configuration conf = getConf(); long maxRecordsPerSplit = conf.getLong(KAFKA_MAX_RECORDS_PER_SPLIT, DEFAULT_KAFKA_MAX_RECORDS_PER_SPLIT); if (maxRecordsPerSplit < 1L) { throw new IllegalArgumentException("Invalid " + KAFKA_MAX_RECORDS_PER_SPLIT + " value [" + maxRecordsPerSplit + "]"); } Map<TopicPartition, Pair<Long, Long>> offsets = getOffsets(conf); List<InputSplit> splits = new LinkedList<>(); for (Map.Entry<TopicPartition, Pair<Long, Long>> entry : offsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); long start = entry.getValue().first(); long end = entry.getValue().second(); // Chop up any excessively large partitions into multiple splits for more balanced map task durations. This will // also exclude any partitions with no records to read (where the start offset equals the end offset). long splitStart = start; while (splitStart < end) { long splitEnd = Math.min(splitStart + maxRecordsPerSplit, end); splits.add(new KafkaInputSplit(topicPartition.topic(), topicPartition.partition(), splitStart, splitEnd)); splitStart = splitEnd; } } return splits; } @Override public RecordReader<ConsumerRecord<BytesWritable, BytesWritable>, Void> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { return new KafkaRecordReader<>(); } @Override public void setConf(Configuration configuration) { this.configuration = configuration; } @Override public Configuration getConf() { return configuration; } //The following methods are used for reading and writing Kafka Partition offset information into Hadoop's Configuration //objects and into Crunch's FormatBundle. For a specific Kafka Topic it might have one or many partitions and for //each partition it will need a start and end offset. Assuming you have a topic of "abc" and it has 2 partitions the //configuration would be populated with the following: // org.apache.crunch.kafka.offsets.topic.abc.partitions = [0,1] // org.apache.crunch.kafka.offsets.topic.abc.partitions.0.start = <partition start> // org.apache.crunch.kafka.offsets.topic.abc.partitions.0.end = <partition end> // org.apache.crunch.kafka.offsets.topic.abc.partitions.1.start = <partition start> // org.apache.crunch.kafka.offsets.topic.abc.partitions.1.end = <partition end> /** * Writes the start and end offsets for the provided topic partitions to the {@code bundle}. * * @param offsets The starting and ending offsets for the topics and partitions. * @param bundle the bundle into which the information should be persisted. */ public static void writeOffsetsToBundle(Map<TopicPartition, Pair<Long, Long>> offsets, FormatBundle bundle) { for (Map.Entry<String, String> entry : generateValues(offsets).entrySet()) { bundle.set(entry.getKey(), entry.getValue()); } } /** * Writes the start and end offsets for the provided topic partitions to the {@code config}. * * @param offsets The starting and ending offsets for the topics and partitions. * @param config the config into which the information should be persisted. */ public static void writeOffsetsToConfiguration(Map<TopicPartition, Pair<Long, Long>> offsets, Configuration config) { for (Map.Entry<String, String> entry : generateValues(offsets).entrySet()) { config.set(entry.getKey(), entry.getValue()); } } /** * Reads the {@code configuration} to determine which topics, partitions, and offsets should be used for reading data. * * @param configuration the configuration to derive the data to read. * @return a map of {@link TopicPartition} to a pair of start and end offsets. * @throws IllegalStateException if the {@code configuration} does not have the start and end offsets set properly * for a partition. */ public static Map<TopicPartition, Pair<Long, Long>> getOffsets(Configuration configuration) { Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); //find configuration for all of the topics with defined partitions Map<String, String> topicPartitionKeys = configuration.getValByRegex(TOPIC_KEY_REGEX); //for each topic start to process it's partitions for (String key : topicPartitionKeys.keySet()) { String topic = getTopicFromKey(key); int[] partitions = configuration.getInts(key); //for each partition find and add the start/end offset for (int partitionId : partitions) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); long start = configuration.getLong(generatePartitionStartKey(topic, partitionId), Long.MIN_VALUE); long end = configuration.getLong(generatePartitionEndKey(topic, partitionId), Long.MIN_VALUE); if (start == Long.MIN_VALUE || end == Long.MIN_VALUE) { throw new IllegalStateException( "The " + topicPartition + " has an invalid start:" + start + " or end:" + end + " offset configured."); } offsets.put(topicPartition, Pair.of(start, end)); } } return offsets; } private static Map<String, String> generateValues(Map<TopicPartition, Pair<Long, Long>> offsets) { Map<String, String> offsetConfigValues = new HashMap<>(); Map<String, Set<Integer>> topicsPartitions = new HashMap<>(); for (Map.Entry<TopicPartition, Pair<Long, Long>> entry : offsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); String topic = topicPartition.topic(); int partition = topicPartition.partition(); String startKey = generatePartitionStartKey(topic, partition); String endKey = generatePartitionEndKey(topic, partition); //Add the start and end offsets for a specific partition offsetConfigValues.put(startKey, Long.toString(entry.getValue().first())); offsetConfigValues.put(endKey, Long.toString(entry.getValue().second())); Set<Integer> partitions = topicsPartitions.get(topic); if (partitions == null) { partitions = new HashSet<>(); topicsPartitions.put(topic, partitions); } partitions.add(partition); } //generate the partitions values for each topic for (Map.Entry<String, Set<Integer>> entry : topicsPartitions.entrySet()) { String key = KAFKA_INPUT_OFFSETS_BASE + "." + entry.getKey() + "." + PARTITIONS; Set<Integer> partitions = entry.getValue(); String partitionsString = StringUtils.join(partitions, ","); offsetConfigValues.put(key, partitionsString); } return offsetConfigValues; } static String generatePartitionStartKey(String topic, int partition) { return KAFKA_INPUT_OFFSETS_BASE + "." + topic + "." + PARTITIONS + "." + partition + "." + START; } static String generatePartitionEndKey(String topic, int partition) { return KAFKA_INPUT_OFFSETS_BASE + "." + topic + "." + PARTITIONS + "." + partition + "." + END; } static String generateTopicPartitionsKey(String topic) { return KAFKA_INPUT_OFFSETS_BASE + "." + topic + "." + PARTITIONS; } static String getTopicFromKey(String key) { //strip off the base key + a trailing "." String value = key.substring(KAFKA_INPUT_OFFSETS_BASE.length() + 1); //strip off the end part + a preceding "." value = value.substring(0, (value.length() - (PARTITIONS.length() + 1))); return value; } // The following methods are used for writing prefixed Kafka connection properties into Crunch's FormatBundle and // {@link #filterConnectionProperties(Properties props) filtering} out the prefixed properties. This allows for // suppression of {@code ConsumerConfig} warnings that are generated by unused properties carried over from the Hadoop // properties. /** * Writes the Kafka connection properties to the {@code bundle}. The connection properties are prefixed with * "org.apache.crunch.kafka.connection.properties" to allow for suppression of unused {@code ConsumerConfig} warnings * generated by unused Hadoop properties. * * @param connectionProperties The Kafka connection properties to be prefixed for later * {@link #filterConnectionProperties(Properties props) filtering}. * @param bundle The bundle into which the information should be persisted. */ public static void writeConnectionPropertiesToBundle(Properties connectionProperties, FormatBundle bundle) { for (final String name : connectionProperties.stringPropertyNames()) { bundle.set(generateConnectionPropertyKey(name), connectionProperties.getProperty(name)); } } /** * Filters out properties properties written by * {@link #writeConnectionPropertiesToBundle(Properties, FormatBundle) writeConnectionPropertiesToBundle}. * * @param props The properties to be filtered. * @return The properties containing Kafka connection information that were written by * {@link #writeConnectionPropertiesToBundle(Properties, FormatBundle) writeConnectionPropertiesToBundle}. */ public static Properties filterConnectionProperties(Properties props) { Properties filteredProperties = new Properties(); for (final String name : props.stringPropertyNames()) { if (CONNECTION_PROPERTY_REGEX.matcher(name).matches()) { filteredProperties.put(getConnectionPropertyFromKey(name), props.getProperty(name)); } } return filteredProperties; } /** * Prefixes a given property with "org.apache.crunch.kafka.connection.properties" to allow for filtering with * {@link #filterConnectionProperties(Properties) filterConnectionProperties}. * * @param property The Kafka connection property that will be prefixed for retrieval at a later time. * @return The property prefixed with "org.apache.crunch.kafka.connection.properties" */ static String generateConnectionPropertyKey(String property) { return KAFKA_CONNECTION_PROPERTY_BASE + "." + property; } /** * Retrieves the original property that was prefixed using * {@link #generateConnectionPropertyKey(String) generateConnectionPropertyKey}. * * @param key The key that was prefixed using {@link #generateConnectionPropertyKey(String) generateConnectionPropertyKey}. * @return The original property prior to prefixing. */ static String getConnectionPropertyFromKey(String key) { // Strip off the base key + a trailing "." return key.substring(KAFKA_CONNECTION_PROPERTY_BASE.length() + 1); } /** * Generates a {@link Properties} object containing the properties in {@code connectionProperties}, but with every * property prefixed with "org.apache.crunch.kafka.connection.properties". * * @param connectionProperties the properties to be prefixed with "org.apache.crunch.kafka.connection.properties" * @return a {@link Properties} object representing Kafka connection properties */ public static Properties tagExistingKafkaConnectionProperties(Properties connectionProperties) { Properties taggedProperties = new Properties(); for (final String name : connectionProperties.stringPropertyNames()) { taggedProperties.put(generateConnectionPropertyKey(name), connectionProperties.getProperty(name)); } return taggedProperties; } }
2,335
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaSourceConverter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.types.Converter; import org.apache.hadoop.io.BytesWritable; import org.apache.kafka.clients.consumer.ConsumerRecord; /** * {@link Converter} for {@link KafkaSource} */ public class KafkaSourceConverter implements Converter<ConsumerRecord<BytesWritable, BytesWritable>, Void, ConsumerRecord<BytesWritable, BytesWritable>, Iterable<ConsumerRecord<BytesWritable, BytesWritable>>> { private static final long serialVersionUID = 5270341393169043945L; @Override public ConsumerRecord<BytesWritable, BytesWritable> convertInput(ConsumerRecord<BytesWritable, BytesWritable> record, Void aVoid) { return record; } @Override public Iterable<ConsumerRecord<BytesWritable, BytesWritable>> convertIterableInput( ConsumerRecord<BytesWritable, BytesWritable> bytesWritableBytesWritableConsumerRecord, Iterable<Void> iterable) { throw new UnsupportedOperationException("Should not be possible"); } @Override public ConsumerRecord<BytesWritable, BytesWritable> outputKey(ConsumerRecord<BytesWritable, BytesWritable> record) { return record; } @Override public Void outputValue(ConsumerRecord<BytesWritable, BytesWritable> record) { // No value, we just use the record as the key. return null; } @Override @SuppressWarnings("unchecked") public Class<ConsumerRecord<BytesWritable, BytesWritable>> getKeyClass() { return (Class<ConsumerRecord<BytesWritable, BytesWritable>>) (Object) ConsumerRecord.class; } @Override public Class<Void> getValueClass() { return Void.class; } @Override public boolean applyPTypeTransforms() { return false; } }
2,336
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/KafkaData.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.Pair; import org.apache.crunch.ReadableData; import org.apache.crunch.SourceTarget; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.TopicPartition; import java.io.IOException; import java.util.Map; import java.util.Properties; import java.util.Set; class KafkaData<K, V> implements ReadableData<ConsumerRecord<K, V>> { private static final long serialVersionUID = -6582212311361579556L; private final Map<TopicPartition, Pair<Long, Long>> offsets; private final Properties props; KafkaData(Properties connectionProperties, Map<TopicPartition, Pair<Long, Long>> offsets) { this.props = connectionProperties; this.offsets = offsets; } @Override public Set<SourceTarget<?>> getSourceTargets() { return null; } @Override public void configure(Configuration conf) { //no-op } @Override public Iterable<ConsumerRecord<K, V>> read(TaskInputOutputContext<?, ?, ?, ?> context) throws IOException { Consumer<K, V> consumer = new KafkaConsumer<>(props); return new KafkaRecordsIterable<>(consumer, offsets, props); } }
2,337
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/record/ConsumerRecordHelper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.record; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.MapFn; import org.apache.crunch.types.PType; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.io.BytesWritable; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.record.TimestampType; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; /** * Serializer/De-Serializer for Kafka's {@link ConsumerRecord} */ public class ConsumerRecordHelper { /** * PType for {@link ConsumerRecord} */ @SuppressWarnings("unchecked") public static final PType<ConsumerRecord<BytesWritable, BytesWritable>> CONSUMER_RECORD_P_TYPE = Writables .derived((Class<ConsumerRecord<BytesWritable, BytesWritable>>) (Object) ConsumerRecord.class, new ConsumerRecordHelper.BytesToConsumerRecord(), new ConsumerRecordHelper.ConsumerRecordToBytes(), Writables.writables(BytesWritable.class)); /** * Serializes the record into {@code byte[]}s * * @param record the record to serialize * @return the record in {@code byte[]}s * @throws IllegalArgumentException if record is {@code null} * @throws IOException if there is an issue during serialization */ public static byte[] serialize(ConsumerRecord<BytesWritable, BytesWritable> record) throws IOException { if (record == null) throw new IllegalArgumentException("record cannot be null"); ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); try (DataOutputStream dataOut = new DataOutputStream(byteOut)) { dataOut.writeUTF(record.topic()); dataOut.writeInt(record.partition()); dataOut.writeLong(record.offset()); dataOut.writeLong(record.timestamp()); dataOut.writeUTF(record.timestampType().name); dataOut.writeLong(record.checksum()); dataOut.writeInt(record.serializedKeySize()); dataOut.writeInt(record.serializedValueSize()); if (record.key() == null) { dataOut.writeInt(-1); } else { byte[] keyBytes = record.key().getBytes(); dataOut.writeInt(keyBytes.length); dataOut.write(keyBytes); } if (record.value() == null) { dataOut.writeInt(-1); } else { byte[] valueBytes = record.value().getBytes(); dataOut.writeInt(valueBytes.length); dataOut.write(valueBytes); } return byteOut.toByteArray(); } } /** * De-serializes the bytes into a {@link ConsumerRecord} * * @param bytes the bytes of a {@link ConsumerRecord} * @return a {@link ConsumerRecord} from the bytes * @throws IllegalArgumentException if bytes is {@code null} * @throws IOException if there is an issue de-serializing the bytes */ public static ConsumerRecord<BytesWritable, BytesWritable> deserialize(byte[] bytes) throws IOException { if (bytes == null) throw new IllegalArgumentException("bytes cannot be null"); try (DataInputStream dataIn = new DataInputStream(new ByteArrayInputStream(bytes))) { String topic = dataIn.readUTF(); int partition = dataIn.readInt(); long offset = dataIn.readLong(); long timestamp = dataIn.readLong(); String timestampTypeName = dataIn.readUTF(); long checksum = dataIn.readLong(); int serializedKeySize = dataIn.readInt(); int serializedValueSize = dataIn.readInt(); BytesWritable key = null; int keySize = dataIn.readInt(); if (keySize != -1) { byte[] keyBytes = new byte[keySize]; dataIn.readFully(keyBytes); key = new BytesWritable(keyBytes); } BytesWritable value = null; int valueSize = dataIn.readInt(); if (valueSize != -1) { byte[] valueBytes = new byte[valueSize]; dataIn.readFully(valueBytes); value = new BytesWritable(valueBytes); } return new ConsumerRecord<>(topic, partition, offset, timestamp, TimestampType.forName(timestampTypeName), checksum, serializedKeySize, serializedValueSize, key, value); } } /** * {@link MapFn} to convert {@link ConsumerRecord} to {@link BytesWritable} */ public static class ConsumerRecordToBytes extends MapFn<ConsumerRecord<BytesWritable, BytesWritable>, BytesWritable> { private static final long serialVersionUID = -6821080008375335537L; @Override public BytesWritable map(ConsumerRecord<BytesWritable, BytesWritable> record) { try { return new BytesWritable(ConsumerRecordHelper.serialize(record)); } catch (IOException e) { throw new CrunchRuntimeException("Error serializing consumer record " + record, e); } } } /** * {@link MapFn} to convert {@link BytesWritable} to {@link ConsumerRecord} */ public static class BytesToConsumerRecord extends MapFn<BytesWritable, ConsumerRecord<BytesWritable, BytesWritable>> { private static final long serialVersionUID = -6545017910063252322L; @Override public ConsumerRecord<BytesWritable, BytesWritable> map(BytesWritable bytesWritable) { try { return ConsumerRecordHelper.deserialize(bytesWritable.getBytes()); } catch (IOException e) { throw new CrunchRuntimeException("Error deserializing consumer record", e); } } } }
2,338
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/AbstractOffsetReader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset; import org.apache.kafka.common.TopicPartition; import java.io.IOException; import java.util.List; import java.util.Map; /** * Base implementation of {@link OffsetReader} */ public abstract class AbstractOffsetReader implements OffsetReader { @Override public Map<TopicPartition, Long> readOffsets(long persistedOffsetTime) throws IOException { throw new UnsupportedOperationException("Operation to read old offsets is not supported"); } @Override public List<Long> getStoredOffsetPersistenceTimes() throws IOException { throw new UnsupportedOperationException("Operation to retrieve old offset persistence times is not supported"); } }
2,339
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/AbstractOffsetWriter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset; import org.apache.kafka.common.TopicPartition; import java.io.IOException; import java.util.Map; /** * Base implementation of {@link OffsetWriter} */ public abstract class AbstractOffsetWriter implements OffsetWriter { @Override public void write(Map<TopicPartition, Long> offsets) throws IOException { write(System.currentTimeMillis(), offsets); } }
2,340
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/OffsetReader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset; import org.apache.kafka.common.TopicPartition; import java.io.Closeable; import java.io.IOException; import java.util.List; import java.util.Map; /** * Reader API that supports reading offset information from an underlying storage mechanism. */ public interface OffsetReader extends Closeable { /** * Reads the last stored offsets. * * @return the last stored offsets. If there are no stored offsets an empty collection will be returned. * @throws IOException if there is an error reading from the underlying storage. */ Map<TopicPartition, Long> readLatestOffsets() throws IOException; /** * Reads the offsets for a given {@code persistedOffsetTime}. Note that not all storage mechanisms support * complete historical offset information. Use the {@link #getStoredOffsetPersistenceTimes()} to find valid values * to specify for {@code persistedOffsetTime}. * * @param persistedOffsetTime the persistence time when offsets were written to the underlying storage system. * @return returns the offsets persisted at the specified {@code persistedOffsetTime}. If no offsets were persisted * at that time or available to be retrieved then {@code null} will be returned. * @throws IOException if there is an error reading from the underlying storage. */ Map<TopicPartition, Long> readOffsets(long persistedOffsetTime) throws IOException; /** * Returns the list of available persistence times offsets have been written to the underlying storage mechanism. * The list of available persistence times will be returned in the order of earliest to latest. * * @return the collection of persistence times in the form of milliseconds since epoch. If there are no historical * persistence times then an {@code empty list} is returned. * @throws IOException if there is an error reading from the underlying storage. */ public List<Long> getStoredOffsetPersistenceTimes() throws IOException; }
2,341
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/OffsetWriter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset; import org.apache.kafka.common.TopicPartition; import java.io.Closeable; import java.io.IOException; import java.util.Map; /** * Writer for persisting offset information. */ public interface OffsetWriter extends Closeable { /** * Persists the {@code offsets} to a configured location with the current time specified as the as of time. * * @param offsets the offsets to persist * @throws IllegalArgumentException if the {@code offsets} are {@code null}. * @throws IOException if there is an error persisting the offsets. */ void write(Map<TopicPartition, Long> offsets) throws IOException; /** * Persists the {@code offsets} to a configured location with metadata of {@code asOfTime} indicating * the time in milliseconds when the offsets were meaningful. * * @param asOfTime the metadata describing when the offsets are accurate as of a time given in milliseconds * since epoch. * @param offsets the offsets to persist * @throws IllegalArgumentException if the {@code offsets} are {@code null} or the {@code asOfTime} is less than 0. * @throws IOException if there is an error persisting the offsets. */ void write(long asOfTime, Map<TopicPartition, Long> offsets) throws IOException; }
2,342
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/hdfs/HDFSOffsetWriter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.lang.StringUtils; import org.apache.crunch.kafka.offset.AbstractOffsetWriter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.LinkedList; import java.util.List; import java.util.Map; /** * Offset writer implementation that stores the offsets in HDFS. */ public class HDFSOffsetWriter extends AbstractOffsetWriter { private static final Logger LOG = LoggerFactory.getLogger(HDFSOffsetWriter.class); /** * Custom formatter for translating the times into valid file names. */ public static final String PERSIST_TIME_FORMAT = "yyyy-MM-dd'T'HH-mm-ssZ"; /** * Formatter to use when creating the file names in a URI compliant format. */ public static final DateTimeFormatter FILE_FORMATTER = DateTimeFormat.forPattern(PERSIST_TIME_FORMAT).withZoneUTC(); /** * File extension for storing the offsets. */ public static final String FILE_FORMAT_EXTENSION = ".json"; /** * Configuration for the underlying storage. */ private final Configuration config; /** * Mapper for converting data into JSON */ private static final ObjectMapper MAPPER = new ObjectMapper(); /** * Base storage path for offset data */ private final Path baseStoragePath; /** * Creates a writer instance for interacting with the storage specified by the {@code config} and with * the base storage path of {@code baseStoragePath}. * * @param config the config for interacting with the underlying data store. * @param baseStoragePath the base storage path for offset information. * @throws IllegalArgumentException if either argument is {@code null}. */ public HDFSOffsetWriter(Configuration config, Path baseStoragePath) { if (config == null) { throw new IllegalArgumentException("The 'config' cannot be 'null'."); } if (baseStoragePath == null) { throw new IllegalArgumentException("The 'baseStoragePath' cannot be 'null'."); } this.config = config; this.baseStoragePath = baseStoragePath; } @Override public void write(long asOfTime, Map<TopicPartition, Long> offsets) throws IOException { if (offsets == null) { throw new IllegalArgumentException("The 'offsets' cannot be 'null'."); } if (asOfTime < 0) { throw new IllegalArgumentException("The 'asOfTime' cannot be less than 0."); } List<Offsets.PartitionOffset> partitionOffsets = new LinkedList<>(); for(Map.Entry<TopicPartition, Long> entry: offsets.entrySet()){ partitionOffsets.add(Offsets.PartitionOffset.Builder.newBuilder().setOffset(entry.getValue()) .setTopic(entry.getKey().topic()) .setPartition(entry.getKey().partition()).build()); } Offsets storageOffsets = Offsets.Builder.newBuilder().setOffsets(partitionOffsets) .setAsOfTime(asOfTime).build(); FileSystem fs = getFileSystem(); Path offsetPath = getPersistedTimeStoragePath(baseStoragePath, asOfTime); LOG.debug("Writing offsets to {} with as of time {}", offsetPath, asOfTime); try (FSDataOutputStream fsDataOutputStream = fs.create(getPersistedTimeStoragePath(baseStoragePath, asOfTime), true)) { MAPPER.writeValue(fsDataOutputStream, storageOffsets); fsDataOutputStream.flush(); } LOG.debug("Completed writing offsets to {}", offsetPath); } @Override public void close() throws IOException { //no-op } /** * Returns the {@link FileSystem} instance for writing data. Callers are not responsible for closing the instance. * * @return the {@link FileSystem} instance for writing data. * @throws IOException error retrieving underlying file system. */ protected FileSystem getFileSystem() throws IOException { return FileSystem.get(config); } /** * Creates a {@link Path} for storing the offsets for a specified {@code persistedTime}. * * @param baseStoragePath The base path the offsets will be stored at. * @param persistedTime the time of the data being persisted. * @return The path to where the offset information should be stored. * @throws IllegalArgumentException if the {@code baseStoragePath} is {@code null}. */ public static Path getPersistedTimeStoragePath(Path baseStoragePath, long persistedTime) { if (baseStoragePath == null) { throw new IllegalArgumentException("The 'baseStoragePath' cannot be 'null'."); } return new Path(baseStoragePath, persistenceTimeToFileName(persistedTime)); } /** * Converts a {@code fileName} into the time the offsets were persisted. * * @param fileName the file name to parse. * @return the time in milliseconds since epoch that the offsets were stored. * @throws IllegalArgumentException if the {@code fileName} is not of the correct format or is {@code null} or * empty. */ public static long fileNameToPersistenceTime(String fileName) { if (StringUtils.isBlank(fileName)) { throw new IllegalArgumentException("the 'fileName' cannot be 'null' or empty"); } String formattedTimeString = StringUtils.strip(fileName, FILE_FORMAT_EXTENSION); DateTime persistedTime = FILE_FORMATTER.parseDateTime(formattedTimeString); return persistedTime.getMillis(); } /** * Converts a {@code persistedTime} into a file name for persisting the offsets. * * @param persistedTime the persisted time to use to generate the file name. * @return the file name to use when persisting the data. */ public static String persistenceTimeToFileName(long persistedTime) { DateTime dateTime = new DateTime(persistedTime, DateTimeZone.UTC); String formattedTime = FILE_FORMATTER.print(dateTime); return formattedTime + FILE_FORMAT_EXTENSION; } }
2,343
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/hdfs/HDFSOffsetReader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.crunch.kafka.offset.AbstractOffsetReader; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; /** * Reader implementation that reads offset information from HDFS. */ public class HDFSOffsetReader extends AbstractOffsetReader { private static final Logger LOG = LoggerFactory.getLogger(HDFSOffsetReader.class); private final Configuration config; private final Path baseOffsetStoragePath; private static final ObjectMapper MAPPER = new ObjectMapper(); /** * Creates a reader instance for interacting with the storage specified by the {@code config} and with * the base storage path of {@code baseStoragePath}. * * @param config the config for interacting with the underlying data store. * @param baseOffsetStoragePath the base storage path for offset information. If the path does not exist it will * be created. * @throws IllegalArgumentException if either argument is {@code null}. */ public HDFSOffsetReader(Configuration config, Path baseOffsetStoragePath) { if (config == null) { throw new IllegalArgumentException("The 'config' cannot be 'null'."); } if (baseOffsetStoragePath == null) { throw new IllegalArgumentException("The 'baseOffsetStoragePath' cannot be 'null'."); } this.config = config; this.baseOffsetStoragePath = baseOffsetStoragePath; } @Override public Map<TopicPartition, Long> readLatestOffsets() throws IOException { List<Long> storedOffsetPersistenceTimes = getStoredOffsetPersistenceTimes(true); if (storedOffsetPersistenceTimes.isEmpty()) { return Collections.emptyMap(); } long persistedTime = storedOffsetPersistenceTimes.get(0); Map<TopicPartition, Long> offsets = readOffsets(persistedTime); return offsets == null ? Collections.<TopicPartition, Long>emptyMap() : offsets; } @Override public Map<TopicPartition, Long> readOffsets(long persistedOffsetTime) throws IOException { Path offsetFilePath = HDFSOffsetWriter.getPersistedTimeStoragePath(baseOffsetStoragePath, persistedOffsetTime); FileSystem fs = getFileSystem(); if (fs.isFile(offsetFilePath)) { InputStream inputStream = fs.open(offsetFilePath); try { Offsets offsets = MAPPER.readValue(inputStream, Offsets.class); Map<TopicPartition, Long> partitionsMap = new HashMap<>(); for(Offsets.PartitionOffset partitionOffset: offsets.getOffsets()){ partitionsMap.put(new TopicPartition(partitionOffset.getTopic(), partitionOffset.getPartition()), partitionOffset.getOffset()); } return partitionsMap; }finally{ inputStream.close(); } } LOG.error("Offset file at {} is not a file or does not exist.", offsetFilePath); return null; } @Override public List<Long> getStoredOffsetPersistenceTimes() throws IOException { return getStoredOffsetPersistenceTimes(false); } private List<Long> getStoredOffsetPersistenceTimes(boolean newestFirst) throws IOException { List<Long> persistedTimes = new LinkedList<>(); FileSystem fs = getFileSystem(); try { FileStatus[] fileStatuses = fs.listStatus(baseOffsetStoragePath); for (FileStatus status : fileStatuses) { if (status.isFile()) { String fileName = status.getPath().getName(); try { persistedTimes.add(HDFSOffsetWriter.fileNameToPersistenceTime(fileName)); } catch (IllegalArgumentException iae) { LOG.info("Skipping file {} due to filename not being of the correct format.", status.getPath(), iae); } } else { LOG.info("Skippping {} because it is not a file.", status.getPath()); } } } catch (FileNotFoundException fnfe) { LOG.error("Unable to retrieve prior offsets.", fnfe); } //natural order should put oldest (smallest long) first. This will put newest first. if (newestFirst) { Collections.sort(persistedTimes, Collections.reverseOrder()); } else { Collections.sort(persistedTimes); } return Collections.unmodifiableList(persistedTimes); } @Override public void close() throws IOException { } /** * Returns the {@link FileSystem} instance for writing data. Callers are not responsible for closing the instance. * * @return the {@link FileSystem} instance for writing data. * @throws IOException error retrieving underlying file system. */ protected FileSystem getFileSystem() throws IOException { return FileSystem.get(config); } }
2,344
0
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset
Create_ds/crunch/crunch-kafka/src/main/java/org/apache/crunch/kafka/offset/hdfs/Offsets.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.kafka.offset.hdfs; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import org.apache.commons.lang.StringUtils; import org.apache.kafka.common.requests.ListOffsetRequest; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Set; /** * Simple object to represent a collection of Kafka Topic and Partition offset information to make storing * this information easier. */ @JsonDeserialize(builder = Offsets.Builder.class) @JsonInclude(JsonInclude.Include.NON_NULL) public class Offsets { private final long offsetsAsOfTime; private final List<PartitionOffset> offsets; private Offsets(long asOfTime, List<PartitionOffset> offsets) { offsetsAsOfTime = asOfTime; this.offsets = offsets; } /** * Returns the time in milliseconds since epoch that the offset information was retrieved or valid as of. * * @return the time in milliseconds since epoch that the offset information was retrieved or valid as of. */ @JsonProperty("asOfTime") public long getAsOfTime() { return offsetsAsOfTime; } /** * The collection of offset information for specific topics and partitions. * * @return collection of offset information for specific topics and partitions. */ @JsonProperty("offsets") public List<PartitionOffset> getOffsets() { return offsets; } @Override public int hashCode() { return Objects.hash(offsetsAsOfTime, offsets); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (obj instanceof Offsets) { Offsets that = (Offsets) obj; return this.offsetsAsOfTime == that.offsetsAsOfTime && this.offsets.equals(that.offsets); } return false; } /** * Builder for the {@link Offsets}. */ @JsonIgnoreProperties(ignoreUnknown = true) public static class Builder { private long asOf = -1; private List<PartitionOffset> offsets = Collections.emptyList(); /** * Creates a new Builder instance. * * @return a new Builder instance. */ public static Builder newBuilder() { return new Builder(); } /** * Sets the as of time for the collection of offsets. * * @param asOfTime the as of time for the offsets. * @return builder instance * @throws IllegalArgumentException if the {@code asOfTime} is less than 0. */ @JsonProperty("asOfTime") public Builder setAsOfTime(long asOfTime) { if (asOfTime < 0) { throw new IllegalArgumentException("The 'asOfTime' cannot be less than 0."); } this.asOf = asOfTime; return this; } /** * Sets the collection of offsets. * * @param offsets the collection of offsets * @return builder instance * @throws IllegalArgumentException if the {@code offsets} is {@code null}. */ @JsonProperty("offsets") public Builder setOffsets(List<PartitionOffset> offsets) { if (offsets == null) { throw new IllegalArgumentException("The 'offsets' cannot be 'null'."); } List<PartitionOffset> sortedOffsets = new LinkedList<>(offsets); Collections.sort(sortedOffsets); this.offsets = Collections.unmodifiableList(sortedOffsets); return this; } /** * Builds an instance. * * @return a built instance * @throws IllegalStateException if the {@link #setAsOfTime(long) asOfTime} is not set or the specified * {@link #setOffsets(List) offsets} contains duplicate entries for a topic partition. */ public Offsets build() { if (asOf < 0) { throw new IllegalStateException("The 'asOfTime' cannot be less than 0."); } Set<String> uniqueTopicPartitions = new HashSet<>(); for(PartitionOffset partitionOffset : offsets){ uniqueTopicPartitions.add(partitionOffset.getTopic()+partitionOffset.getPartition()); } if (uniqueTopicPartitions.size() != offsets.size()) { throw new IllegalStateException("The 'offsets' contains duplicate entries for a topic and partition."); } return new Offsets(asOf, offsets); } } /** * Simple object that represents a specific topic, partition, and its offset value. */ @JsonDeserialize(builder = PartitionOffset.Builder.class) @JsonInclude(JsonInclude.Include.NON_NULL) public static class PartitionOffset implements Comparable<PartitionOffset> { private final String topic; private final int partition; private final long offset; private PartitionOffset(String topic, int partition, long offset) { this.topic = topic; this.partition = partition; this.offset = offset; } /** * Returns the topic * * @return the topic */ public String getTopic() { return topic; } /** * Returns the partition * * @return the partition */ public int getPartition() { return partition; } /** * Returns the offset * * @return the offset */ public long getOffset() { return offset; } @Override public int compareTo(PartitionOffset other) { int compare = topic.compareTo(other.topic); if (compare == 0) { compare = Integer.compare(partition, other.partition); if (compare == 0) { return Long.compare(offset, other.offset); } } return compare; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (obj instanceof PartitionOffset) { PartitionOffset that = (PartitionOffset) obj; return compareTo(that) == 0; } return false; } @Override public int hashCode() { return Objects.hash(topic, partition, offset); } /** * Builder for {@link PartitionOffset} */ @JsonIgnoreProperties(ignoreUnknown = true) public static class Builder { private String topic; private int partition = -1; private long offset = ListOffsetRequest.EARLIEST_TIMESTAMP; /** * Creates a new builder instance. * * @return a new builder instance. */ public static Builder newBuilder() { return new Builder(); } /** * Set the {@code topic} for the partition offset being built * * @param topic the topic for the partition offset being built. * @return builder instance * @throws IllegalArgumentException if the {@code topic} is {@code null} or empty. */ @JsonProperty("topic") public Builder setTopic(String topic) { if (StringUtils.isBlank(topic)) { throw new IllegalArgumentException("The 'topic' cannot be null or empty."); } this.topic = topic; return this; } /** * Set the {@code partition} for the partition offset being built * * @param partition the partition for the partition offset being built. * @return builder instance * @throws IllegalArgumentException if the {@code partition} is less than 0. */ @JsonProperty("partition") public Builder setPartition(int partition) { if (partition < 0) { throw new IllegalArgumentException("The 'partition' cannot be less than 0."); } this.partition = partition; return this; } /** * Set the {@code offset} for the partition offset being built. If the {@code offset} is not * set then it defaults to {@link ListOffsetRequest#EARLIEST_TIMESTAMP}. * * @param offset the topic for the partition offset being built. * @return builder instance */ @JsonProperty("offset") public Builder setOffset(long offset) { this.offset = offset; return this; } /** * Builds a PartitionOffset instance. * * @return the built PartitionOffset instance. * @throws IllegalStateException if the {@code topic} or {@code partition} are never set or configured * to invalid values. */ public PartitionOffset build() { if (StringUtils.isBlank(topic)) { throw new IllegalStateException("The 'topic' cannot be null or empty."); } if (partition < 0) { throw new IllegalStateException("The 'partition' cannot be less than 0."); } return new PartitionOffset(topic, partition, offset); } } } }
2,345
0
Create_ds/crunch/crunch-hbase/src/test/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/test/java/org/apache/crunch/io/hbase/HFileTargetTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import static org.junit.Assert.assertEquals; import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Job; import org.junit.Test; public class HFileTargetTest { @Test public void testConfigureForMapReduce() throws IOException { Job job = Job.getInstance(); // Add the test config file. We can't just call job.getConfiguration().set() because // setting a configuration with .set will always to precedence. job.getConfiguration().addResource("test-hbase-conf.xml"); HFileTarget target = new HFileTarget("/"); target.configureForMapReduce(job, HBaseTypes.keyValues(), new Path("/"), "name"); assertEquals("12345", job.getConfiguration().get("hbase.client.scanner.timeout.period")); } }
2,346
0
Create_ds/crunch/crunch-hbase/src/test/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/test/java/org/apache/crunch/io/hbase/HBaseTargetTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import java.io.IOException; import org.apache.crunch.Target; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Job; import org.junit.Test; public class HBaseTargetTest { @Test public void testConfigureForMapReduce() throws IOException { Job job = Job.getInstance(); // Add the test config file. We can't just call job.getConfiguration().set() because // setting a configuration with .set will always to precedence. job.getConfiguration().addResource("test-hbase-conf.xml"); HBaseTarget target = new HBaseTarget("testTable"); target.configureForMapReduce(job, HBaseTypes.keyValues(), new Path("/"), "name"); assertEquals("12345", job.getConfiguration().get("hbase.client.scanner.timeout.period")); } @Test public void testEquality() { Target target = new HBaseTarget("testTable"); Target target2 = new HBaseTarget("testTable"); assertEquals(target, target2); assertEquals(target.hashCode(), target2.hashCode()); } @Test public void testEqualityWithExtraConf() { Target target = new HBaseTarget("testTable").outputConf("key", "value"); Target target2 = new HBaseTarget("testTable").outputConf("key", "value"); assertEquals(target, target2); assertEquals(target.hashCode(), target2.hashCode()); } @Test public void testInequality() { Target target = new HBaseTarget("testTable"); Target target2 = new HBaseTarget("testTable2"); assertThat(target, is(not(target2))); assertThat(target.hashCode(), is(not(target2.hashCode()))); } @Test public void testInequalityWithExtraConf() { Target target = new HBaseTarget("testTable").outputConf("key", "value"); Target target2 = new HBaseTarget("testTable").outputConf("key", "value2"); assertThat(target, is(not(target2))); assertThat(target.hashCode(), is(not(target2.hashCode()))); } }
2,347
0
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/test/TemporaryPaths.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.test; import org.apache.crunch.impl.mr.run.RuntimeParameters; import org.apache.hadoop.conf.Configuration; /** * Utilities for working with {@link TemporaryPath}. */ public final class TemporaryPaths { /** * Static factory returning a {@link TemporaryPath} with adjusted * {@link Configuration} properties. */ public static TemporaryPath create() { return new TemporaryPath(RuntimeParameters.TMP_DIR, "hadoop.tmp.dir"); } private TemporaryPaths() { // nothing } }
2,348
0
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io/hbase/WordCountHBaseIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.charset.Charset; import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.PTable; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.PipelineResult; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.test.TemporaryPaths; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormat; import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableSet; public class WordCountHBaseIT { static class StringifyFn extends MapFn<Pair<ImmutableBytesWritable, Pair<Result, Result>>, String> { @Override public String map(Pair<ImmutableBytesWritable, Pair<Result, Result>> input) { byte[] firstStrBytes = input.second().first().getValue(WORD_COLFAM, null); byte[] secondStrBytes = input.second().second().getValue(WORD_COLFAM, null); if (firstStrBytes != null && secondStrBytes != null) { return Joiner.on(',').join(new String(firstStrBytes, Charset.forName("UTF-8")), new String(secondStrBytes, Charset.forName("UTF-8"))); } return ""; } } @Rule public TemporaryPath tmpDir = TemporaryPaths.create(); private static final byte[] COUNTS_COLFAM = Bytes.toBytes("cf"); private static final byte[] WORD_COLFAM = Bytes.toBytes("cf"); private HBaseTestingUtility hbaseTestUtil; @SuppressWarnings("serial") public static PCollection<Put> wordCount(PTable<ImmutableBytesWritable, Result> words) { PTable<String, Long> counts = words.parallelDo( new DoFn<Pair<ImmutableBytesWritable, Result>, String>() { @Override public void process(Pair<ImmutableBytesWritable, Result> row, Emitter<String> emitter) { byte[] word = row.second().getValue(WORD_COLFAM, null); if (word != null) { emitter.emit(Bytes.toString(word)); } } }, words.getTypeFamily().strings()).count(); return counts.parallelDo("convert to put", new DoFn<Pair<String, Long>, Put>() { @Override public void process(Pair<String, Long> input, Emitter<Put> emitter) { Put put = new Put(Bytes.toBytes(input.first())); put.addColumn(COUNTS_COLFAM, null, Bytes.toBytes(input.second())); emitter.emit(put); } }, HBaseTypes.puts()); } @SuppressWarnings("serial") public static PCollection<Delete> clearCounts(PTable<ImmutableBytesWritable, Result> counts) { return counts.parallelDo("convert to delete", new DoFn<Pair<ImmutableBytesWritable, Result>, Delete>() { @Override public void process(Pair<ImmutableBytesWritable, Result> input, Emitter<Delete> emitter) { Delete delete = new Delete(input.first().get()); emitter.emit(delete); } }, HBaseTypes.deletes()); } @Before public void setUp() throws Exception { Configuration conf = HBaseConfiguration.create(tmpDir.getDefaultConfiguration()); conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, tmpDir.getFile("hbase-staging").getAbsolutePath()); hbaseTestUtil = new HBaseTestingUtility(conf); hbaseTestUtil.startMiniCluster(); } @Test public void testWordCount() throws Exception { run(new MRPipeline(WordCountHBaseIT.class, hbaseTestUtil.getConfiguration())); } @Test public void testWordCountCustomFormat() throws Exception { run(new MRPipeline(WordCountHBaseIT.class, hbaseTestUtil.getConfiguration()), MyTableInputFormat.class); assertTrue(MyTableInputFormat.CONSTRUCTED.get()); } @After public void tearDown() throws Exception { hbaseTestUtil.shutdownMiniCluster(); } public void run(Pipeline pipeline) throws Exception { run(pipeline, null); } public void run(Pipeline pipeline, Class<? extends MultiTableInputFormatBase> clazz) throws Exception { Random rand = new Random(); int postFix = rand.nextInt() & 0x7FFFFFFF; TableName inputTableName = TableName.valueOf("crunch_words_" + postFix); TableName outputTableName = TableName.valueOf("crunch_counts_" + postFix); TableName otherTableName = TableName.valueOf("crunch_other_" + postFix); TableName joinTableName = TableName.valueOf("crunch_join_words_" + postFix); Table inputTable = hbaseTestUtil.createTable(inputTableName, WORD_COLFAM); Table outputTable = hbaseTestUtil.createTable(outputTableName, COUNTS_COLFAM); Table otherTable = hbaseTestUtil.createTable(otherTableName, COUNTS_COLFAM); int key = 0; key = put(inputTable, key, "cat"); key = put(inputTable, key, "cat"); key = put(inputTable, key, "dog"); inputTable.close(); //Setup scan using multiple scans that simply cut the rows in half. Scan scan = new Scan(); scan.addFamily(WORD_COLFAM); byte[] cutoffPoint = Bytes.toBytes(2); scan.setStopRow(cutoffPoint); Scan scan2 = new Scan(); scan.addFamily(WORD_COLFAM); scan2.setStartRow(cutoffPoint); HBaseSourceTarget source = null; if(clazz == null){ source = new HBaseSourceTarget(inputTableName, scan, scan2); }else{ source = new HBaseSourceTarget(inputTableName, clazz, new Scan[]{scan, scan2}); } PTable<ImmutableBytesWritable, Result> words = pipeline.read(source); Map<ImmutableBytesWritable, Result> materialized = words.materializeToMap(); assertEquals(3, materialized.size()); PCollection<Put> puts = wordCount(words); pipeline.write(puts, new HBaseTarget(outputTableName)); pipeline.write(puts, new HBaseTarget(otherTableName)); PipelineResult res = pipeline.done(); assertTrue(res.succeeded()); assertIsLong(otherTable, "cat", 2); assertIsLong(otherTable, "dog", 1); assertIsLong(outputTable, "cat", 2); assertIsLong(outputTable, "dog", 1); // verify we can do joins. Table joinTable = hbaseTestUtil.createTable(joinTableName, WORD_COLFAM); try { key = 0; key = put(joinTable, key, "zebra"); key = put(joinTable, key, "donkey"); key = put(joinTable, key, "bird"); key = put(joinTable, key, "horse"); } finally { joinTable.close(); } Scan joinScan = new Scan(); joinScan.addFamily(WORD_COLFAM); PTable<ImmutableBytesWritable, Result> other = pipeline.read(FromHBase.table(joinTableName, joinScan)); PCollection<String> joined = words.join(other).parallelDo(new StringifyFn(), Writables.strings()); assertEquals(ImmutableSet.of("cat,zebra", "cat,donkey", "dog,bird"), ImmutableSet.copyOf(joined.materialize())); pipeline.done(); //verify HBaseTarget supports deletes. Scan clearScan = new Scan(); clearScan.addFamily(COUNTS_COLFAM); pipeline = new MRPipeline(WordCountHBaseIT.class, hbaseTestUtil.getConfiguration()); HBaseSourceTarget clearSource = new HBaseSourceTarget(outputTableName, clearScan); PTable<ImmutableBytesWritable, Result> counts = pipeline.read(clearSource); pipeline.write(clearCounts(counts), new HBaseTarget(outputTableName)); pipeline.done(); assertDeleted(outputTable, "cat"); assertDeleted(outputTable, "dog"); } protected int put(Table table, int key, String value) throws IOException { Put put = new Put(Bytes.toBytes(key)); put.addColumn(WORD_COLFAM, null, Bytes.toBytes(value)); table.put(put); return key + 1; } protected static void assertIsLong(Table table, String key, long i) throws IOException { Get get = new Get(Bytes.toBytes(key)); get.addFamily(COUNTS_COLFAM); Result result = table.get(get); byte[] rawCount = result.getValue(COUNTS_COLFAM, null); assertNotNull(rawCount); assertEquals(i, Bytes.toLong(rawCount)); } protected static void assertDeleted(Table table, String key) throws IOException { Get get = new Get(Bytes.toBytes(key)); get.addFamily(COUNTS_COLFAM); Result result = table.get(get); assertTrue(result.isEmpty()); } public static class MyTableInputFormat extends MultiTableInputFormat{ public static final AtomicBoolean CONSTRUCTED = new AtomicBoolean(); public MyTableInputFormat(){ CONSTRUCTED.set(true); } } }
2,349
0
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io/hbase/RegionLocationTableTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.net.InetSocketAddress; import com.google.common.collect.ImmutableList; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.junit.Before; import org.junit.Test; public class RegionLocationTableTest { private static final String TABLE_NAME = "namespace:DATA_TABLE"; private RegionLocationTable regionLocationTable; @Before public void setUp() { regionLocationTable = RegionLocationTable.create(TABLE_NAME, ImmutableList.of( location(null, new byte[] { 10 }, "serverA"), location(new byte[] { 10 }, new byte[] { 20 }, "serverB"), location(new byte[] { 20 }, new byte[] { 30 }, "serverC"), location(new byte[] { 30 }, null, "serverD"))); } @Test public void testLookupRowInFirstRegion() { assertEquals( InetSocketAddress.createUnresolved("serverA", 0), regionLocationTable.getPreferredNodeForRow(new byte[] { 5 })); } @Test public void testLookupRowInNonBoundaryRegion() { assertEquals( InetSocketAddress.createUnresolved("serverC", 0), regionLocationTable.getPreferredNodeForRow(new byte[] { 25 })); } @Test public void testLookupRowInLastRegion() { assertEquals( InetSocketAddress.createUnresolved("serverD", 0), regionLocationTable.getPreferredNodeForRow(new byte[] { 35 })); } @Test public void testLookupRowOnRegionBoundary() { assertEquals( InetSocketAddress.createUnresolved("serverB", 0), regionLocationTable.getPreferredNodeForRow(new byte[] { 10 })); } @Test public void testEmpty() { RegionLocationTable emptyTable = RegionLocationTable.create(TABLE_NAME, ImmutableList.<HRegionLocation>of()); assertNull( emptyTable.getPreferredNodeForRow(new byte[] { 10 })); } @Test public void testSerializationRoundTrip() throws IOException { ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(); DataOutput dataOutput = new DataOutputStream(byteOutputStream); regionLocationTable.serialize(dataOutput); ByteArrayInputStream byteInputStream = new ByteArrayInputStream(byteOutputStream.toByteArray()); DataInput dataInput = new DataInputStream(byteInputStream); RegionLocationTable deserialized = RegionLocationTable.deserialize(dataInput); // Just a basic test to make sure it works as before assertEquals( InetSocketAddress.createUnresolved("serverA", 0), deserialized.getPreferredNodeForRow(new byte[] { 5 })); } @Test public void testSerializationRoundTrip_EmptyTable() throws IOException { ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(); DataOutput dataOutput = new DataOutputStream(byteOutputStream); RegionLocationTable emptyTable = RegionLocationTable.create(TABLE_NAME, ImmutableList.<HRegionLocation>of()); emptyTable.serialize(dataOutput); ByteArrayInputStream byteInputStream = new ByteArrayInputStream(byteOutputStream.toByteArray()); DataInput dataInput = new DataInputStream(byteInputStream); RegionLocationTable deserialized = RegionLocationTable.deserialize(dataInput); // Just a basic test to make sure it works as before assertNull( deserialized.getPreferredNodeForRow(new byte[] { 10 })); } @Test public void testNullRegionInfo() { RegionLocationTable table = RegionLocationTable.create(TABLE_NAME, ImmutableList.of(location(null, serverName("serverA")))); assertNull( table.getPreferredNodeForRow(new byte[] { 15 })); } @Test public void testNullServerName() { RegionLocationTable table = RegionLocationTable.create(TABLE_NAME, ImmutableList.of(location(regionInfo(new byte[] { 10 }, new byte[] { 20 }), null))); assertNull( table.getPreferredNodeForRow(new byte[] { 15 })); } private static HRegionLocation location(byte[] startKey, byte[] endKey, String hostName) { return location(regionInfo(startKey, endKey), serverName(hostName)); } private static HRegionLocation location(HRegionInfo regionInfo, ServerName serverName) { return new HRegionLocation(regionInfo, serverName); } private static HRegionInfo regionInfo(byte[] startKey, byte[] endKey) { return new HRegionInfo(TableName.valueOf(TABLE_NAME), startKey, endKey); } private static ServerName serverName(String hostName) { return ServerName.valueOf(hostName, 60020, System.currentTimeMillis()); } }
2,350
0
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io/hbase/HFileTargetIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.io.Resources; import org.apache.commons.io.IOUtils; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.crunch.FilterFn; import org.apache.crunch.GroupingOptions; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.PTable; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.PipelineResult; import org.apache.crunch.fn.FilterFns; import org.apache.crunch.impl.dist.DistributedPipeline; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.io.At; import org.apache.crunch.io.CompositePathIterable; import org.apache.crunch.io.seq.SeqFileReaderFactory; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.test.TemporaryPaths; import org.apache.crunch.types.writable.WritableDeepCopier; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceExistException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.KeyValueHeap; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import java.io.BufferedReader; import java.io.DataInput; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.Serializable; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.crunch.types.writable.Writables.nulls; import static org.apache.crunch.types.writable.Writables.tableOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class HFileTargetIT implements Serializable { private static HBaseTestingUtility HBASE_TEST_UTILITY; private static final String TEST_NAMESPACE = "test_namespace"; private static final byte[] TEST_FAMILY = Bytes.toBytes("test_family"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("count"); private static final Path TEMP_DIR = new Path("/tmp"); private static final Random RANDOM = new Random(); private static final FilterFn<String> SHORT_WORD_FILTER = new FilterFn<String>() { @Override public boolean accept(String input) { return input.length() <= 2; } }; @Rule public transient TemporaryPath tmpDir = TemporaryPaths.create(); @BeforeClass public static void setUpClass() throws Exception { // We have to use mini mapreduce cluster, because LocalJobRunner allows only a single reducer // (we will need it to test bulk load against multiple regions). Configuration conf = HBaseConfiguration.create(); // Workaround for HBASE-5711, we need to set config value dfs.datanode.data.dir.perm // equal to the permissions of the temp dirs on the filesystem. These temp dirs were // probably created using this process' umask. So we guess the temp dir permissions as // 0777 & ~umask, and use that to set the config value. Process process = Runtime.getRuntime().exec("/bin/sh -c umask"); BufferedReader br = new BufferedReader(new InputStreamReader(process.getInputStream(), Charset.forName("UTF-8"))); int rc = process.waitFor(); if(rc == 0) { String umask = br.readLine(); int umaskBits = Integer.parseInt(umask, 8); int permBits = 0777 & ~umaskBits; String perms = Integer.toString(permBits, 8); conf.set("dfs.datanode.data.dir.perm", perms); } HBASE_TEST_UTILITY = new HBaseTestingUtility(conf); HBASE_TEST_UTILITY.startMiniCluster(1); } private static Table createTable(int splits) throws Exception { return createTable(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, splits); } private static Table createTable(int splits, HColumnDescriptor... hcols) throws Exception { return createTable(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, splits, hcols); } private static Table createTable(String namespace, int splits) throws Exception { return createTable(namespace, splits, new HColumnDescriptor(TEST_FAMILY)); } private static Table createTable(String namespace, int splits, HColumnDescriptor... hcols) throws Exception { TableName tableName; if (NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR.equals(namespace)) { tableName = TableName.valueOf(Bytes.toBytes("test_table_" + RANDOM.nextInt(1000000000))); } else { tableName = TableName.valueOf(Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM + "test_table_" + RANDOM.nextInt(1000000000))); } HTableDescriptor htable = new HTableDescriptor(tableName); for (HColumnDescriptor hcol : hcols) { htable.addFamily(hcol); } if (!NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR.equals(namespace)) { try { HBASE_TEST_UTILITY.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); } catch (NamespaceExistException e) { // Ignore expected exception } } return HBASE_TEST_UTILITY.createTable(htable, Bytes.split(Bytes.toBytes("a"), Bytes.toBytes("z"), splits)); } @AfterClass public static void tearDownClass() throws Exception { HBASE_TEST_UTILITY.shutdownMiniCluster(); } @Before public void setUp() throws IOException { FileSystem fs = HBASE_TEST_UTILITY.getTestFileSystem(); fs.delete(TEMP_DIR, true); } @Test public void testHFileTarget() throws Exception { Pipeline pipeline = new MRPipeline(HFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration()); Path inputPath = copyResourceFileToHDFS("shakes.txt"); Path outputPath = getTempPathOnHDFS("out"); PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings())); PCollection<String> words = split(shakespeare, "\\s+"); PTable<String, Long> wordCounts = words.count(); pipeline.write(convertToKeyValues(wordCounts), ToHBase.hfile(outputPath)); PipelineResult result = pipeline.run(); assertTrue(result.succeeded()); FileSystem fs = FileSystem.get(HBASE_TEST_UTILITY.getConfiguration()); KeyValue kv = readFromHFiles(fs, outputPath, "and"); assertEquals(375L, Bytes.toLong(CellUtil.cloneValue(kv))); } @Test public void testBulkLoad() throws Exception { bulkLoadTest(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); } @Test public void testBulkLoadWithNamespace() throws Exception { bulkLoadTest(TEST_NAMESPACE); } private void bulkLoadTest(String namespace) throws Exception { Pipeline pipeline = new MRPipeline(HFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration()); Path inputPath = copyResourceFileToHDFS("shakes.txt"); Path outputPath = getTempPathOnHDFS("out"); byte[] columnFamilyA = Bytes.toBytes("colfamA"); byte[] columnFamilyB = Bytes.toBytes("colfamB"); Admin admin = HBASE_TEST_UTILITY.getAdmin(); Table testTable = createTable(namespace, 26, new HColumnDescriptor(columnFamilyA), new HColumnDescriptor(columnFamilyB)); Connection connection = admin.getConnection(); RegionLocator regionLocator = connection.getRegionLocator(testTable.getName()); PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings())); PCollection<String> words = split(shakespeare, "\\s+"); PTable<String,Long> wordCounts = words.count(); PCollection<Put> wordCountPuts = convertToPuts(wordCounts, columnFamilyA, columnFamilyB); HFileUtils.writePutsToHFilesForIncrementalLoad( wordCountPuts, connection, testTable.getName(), outputPath); PipelineResult result = pipeline.run(); assertTrue(result.succeeded()); new LoadIncrementalHFiles(HBASE_TEST_UTILITY.getConfiguration()) .doBulkLoad(outputPath, admin, testTable, regionLocator); Map<String, Long> EXPECTED = ImmutableMap.<String, Long>builder() .put("__EMPTY__", 1345L) .put("the", 528L) .put("and", 375L) .put("I", 314L) .put("of", 314L) .build(); for (Map.Entry<String, Long> e : EXPECTED.entrySet()) { assertEquals((long) e.getValue(), getWordCountFromTable(testTable, columnFamilyA, e.getKey())); assertEquals((long) e.getValue(), getWordCountFromTable(testTable, columnFamilyB, e.getKey())); } } /** See CRUNCH-251 */ @Test public void testMultipleHFileTargets() throws Exception { Pipeline pipeline = new MRPipeline(HFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration()); Path inputPath = copyResourceFileToHDFS("shakes.txt"); Path outputPath1 = getTempPathOnHDFS("out1"); Path outputPath2 = getTempPathOnHDFS("out2"); Admin admin = HBASE_TEST_UTILITY.getAdmin(); Connection connection = admin.getConnection(); // Test both default and non-default namespaces Table table1 = createTable(26); Table table2 = createTable(TEST_NAMESPACE, 26); RegionLocator regionLocator1 = connection.getRegionLocator(table1.getName()); RegionLocator regionLocator2 = connection.getRegionLocator(table2.getName()); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(HBASE_TEST_UTILITY.getConfiguration()); boolean onlyAffectedRegions = true; PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings())); PCollection<String> words = split(shakespeare, "\\s+"); PCollection<String> shortWords = words.filter(SHORT_WORD_FILTER); PCollection<String> longWords = words.filter(FilterFns.not(SHORT_WORD_FILTER)); PTable<String, Long> shortWordCounts = shortWords.count(); PTable<String, Long> longWordCounts = longWords.count(); HFileUtils.writePutsToHFilesForIncrementalLoad( convertToPuts(shortWordCounts), connection, table1.getName(), outputPath1); HFileUtils.writePutsToHFilesForIncrementalLoad( convertToPuts(longWordCounts), connection, table2.getName(), outputPath2, onlyAffectedRegions); PipelineResult result = pipeline.run(); assertTrue(result.succeeded()); loader.doBulkLoad(outputPath1, admin, table1, regionLocator1); loader.doBulkLoad(outputPath2, admin, table2, regionLocator2); assertEquals(314L, getWordCountFromTable(table1, "of")); assertEquals(375L, getWordCountFromTable(table2, "and")); } @Test public void testHFileUsesFamilyConfig() throws Exception { DataBlockEncoding newBlockEncoding = DataBlockEncoding.PREFIX; assertNotSame(newBlockEncoding, DataBlockEncoding.valueOf(HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING)); Pipeline pipeline = new MRPipeline(HFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration()); Path inputPath = copyResourceFileToHDFS("shakes.txt"); Path outputPath = getTempPathOnHDFS("out"); Admin admin = HBASE_TEST_UTILITY.getAdmin(); Connection connection = admin.getConnection(); HColumnDescriptor hcol = new HColumnDescriptor(TEST_FAMILY); hcol.setDataBlockEncoding(newBlockEncoding); hcol.setBloomFilterType(BloomType.ROWCOL); Table testTable = createTable(26, hcol); PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings())); PCollection<String> words = split(shakespeare, "\\s+"); PTable<String,Long> wordCounts = words.count(); PCollection<Put> wordCountPuts = convertToPuts(wordCounts); HFileUtils.writePutsToHFilesForIncrementalLoad( wordCountPuts, connection, testTable.getName(), outputPath); PipelineResult result = pipeline.run(); assertTrue(result.succeeded()); int hfilesCount = 0; Configuration conf = HBASE_TEST_UTILITY.getConfiguration(); FileSystem fs = outputPath.getFileSystem(conf); for (FileStatus e : fs.listStatus(new Path(outputPath, Bytes.toString(TEST_FAMILY)))) { Path f = e.getPath(); if (!f.getName().startsWith("part-")) { // filter out "_SUCCESS" continue; } HFile.Reader reader = null; try { reader = HFile.createReader(fs, f, new CacheConfig(conf), true, conf); assertEquals(DataBlockEncoding.PREFIX, reader.getDataBlockEncoding()); BloomType bloomFilterType = BloomType.valueOf(Bytes.toString( reader.loadFileInfo().get(HStoreFile.BLOOM_FILTER_TYPE_KEY))); assertEquals(BloomType.ROWCOL, bloomFilterType); DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); assertNotNull(bloomMeta); BloomFilter bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); assertNotNull(bloomFilter); } finally { if (reader != null) { reader.close(); } } hfilesCount++; } assertTrue(hfilesCount > 0); } /** * @see <a href='https://issues.apache.org/jira/browse/CRUNCH-588'>CRUNCH-588</a> */ @Test public void testOnlyAffectedRegionsWhenWritingHFiles() throws Exception { Pipeline pipeline = new MRPipeline(HFileTargetIT.class, HBASE_TEST_UTILITY.getConfiguration()); Path inputPath = copyResourceFileToHDFS("shakes.txt"); Path outputPath1 = getTempPathOnHDFS("out1"); Admin admin = HBASE_TEST_UTILITY.getAdmin(); Connection connection = admin.getConnection(); Table table1 = createTable(26); RegionLocator regionLocator1 = connection.getRegionLocator(table1.getName()); PCollection<String> shakespeare = pipeline.read(At.textFile(inputPath, Writables.strings())); PCollection<String> words = split(shakespeare, "\\s+"); // take the top 5 here to reduce the number of affected regions in the table PTable<String, Long> count = words.filter(SHORT_WORD_FILTER).count().top(5); boolean onlyAffectedRegions = true; PCollection<Put> wordPuts = convertToPuts(count); HFileUtils.writePutsToHFilesForIncrementalLoad( wordPuts, connection, table1.getName(), outputPath1, onlyAffectedRegions); // locate partition file directory and read it in to verify // the number of regions to be written to are less than the // number of regions in the table String tempPath = ((DistributedPipeline) pipeline).createTempPath().toString(); Path temp = new Path(tempPath.substring(0, tempPath.lastIndexOf("/"))); FileSystem fs = FileSystem.get(pipeline.getConfiguration()); Path partitionPath = null; for (final FileStatus fileStatus : fs.listStatus(temp)) { RemoteIterator<LocatedFileStatus> remoteFIles = fs.listFiles(fileStatus.getPath(), true); while(remoteFIles.hasNext()) { LocatedFileStatus file = remoteFIles.next(); if(file.getPath().toString().contains("partition")) { partitionPath = file.getPath(); System.out.println("found written partitions in path: " + partitionPath.toString()); break; } } if(partitionPath != null) { break; } } if(partitionPath == null) { throw new AssertionError("Partition path was not found"); } Class<BytesWritable> keyClass = BytesWritable.class; List<BytesWritable> writtenPartitions = new ArrayList<>(); WritableDeepCopier wdc = new WritableDeepCopier(keyClass); SeqFileReaderFactory<BytesWritable> s = new SeqFileReaderFactory<>(keyClass); // read back in the partition file Iterator<BytesWritable> iter = CompositePathIterable.create(fs, partitionPath, s).iterator(); while (iter.hasNext()) { BytesWritable next = iter.next(); writtenPartitions.add((BytesWritable) wdc.deepCopy(next)); } ImmutableList<byte[]> startKeys = ImmutableList.copyOf(regionLocator1.getStartKeys()); // assert that only affected regions were loaded into assertTrue(startKeys.size() > writtenPartitions.size()); // write out and read back in the start keys for each region. // do this to get proper byte alignment Path regionStartKeys = tmpDir.getPath("regionStartKeys"); List<KeyValue> startKeysToWrite = Lists.newArrayList(); for (final byte[] startKey : startKeys.subList(1, startKeys.size())) { startKeysToWrite.add(KeyValueUtil.createFirstOnRow(startKey)); } writeToSeqFile(pipeline.getConfiguration(), regionStartKeys, startKeysToWrite); List<BytesWritable> writtenStartKeys = new ArrayList<>(); iter = CompositePathIterable.create(fs, partitionPath, s).iterator(); while (iter.hasNext()) { BytesWritable next = iter.next(); writtenStartKeys.add((BytesWritable) wdc.deepCopy(next)); } // assert the keys read back in match start keys for a region on the table for (final BytesWritable writtenPartition : writtenPartitions) { boolean foundMatchingKv = false; for (final BytesWritable writtenStartKey : writtenStartKeys) { if (writtenStartKey.equals(writtenPartition)) { foundMatchingKv = true; break; } } if(!foundMatchingKv) { throw new AssertionError("Written KeyValue: " + writtenPartition + " did not match any known start keys of the table"); } } pipeline.done(); } private static void writeToSeqFile( Configuration conf, Path path, List<KeyValue> splitPoints) throws IOException { SequenceFile.Writer writer = SequenceFile.createWriter( path.getFileSystem(conf), conf, path, NullWritable.class, BytesWritable.class); for (KeyValue key : splitPoints) { writer.append(NullWritable.get(), HBaseTypes.keyValueToBytes(key)); } writer.close(); } private static PCollection<Put> convertToPuts(PTable<String, Long> in) { return convertToPuts(in, TEST_FAMILY); } private static PCollection<Put> convertToPuts(PTable<String, Long> in, final byte[]...columnFamilies) { return in.parallelDo(new MapFn<Pair<String, Long>, Put>() { @Override public Put map(Pair<String, Long> input) { String w = input.first(); if (w.length() == 0) { w = "__EMPTY__"; } long c = input.second(); Put p = new Put(Bytes.toBytes(w)); for (byte[] columnFamily : columnFamilies) { p.addColumn(columnFamily, TEST_QUALIFIER, Bytes.toBytes(c)); } return p; } }, HBaseTypes.puts()); } private static PCollection<KeyValue> convertToKeyValues(PTable<String, Long> in) { return in.parallelDo(new MapFn<Pair<String, Long>, Pair<KeyValue, Void>>() { @Override public Pair<KeyValue, Void> map(Pair<String, Long> input) { String w = input.first(); if (w.length() == 0) { w = "__EMPTY__"; } long c = input.second(); Cell cell = CellUtil.createCell(Bytes.toBytes(w), Bytes.toBytes(c)); return Pair.of(KeyValueUtil.copyToNewKeyValue(cell), null); } }, tableOf(HBaseTypes.keyValues(), nulls())) .groupByKey(GroupingOptions.builder() .sortComparatorClass(HFileUtils.KeyValueComparator.class) .build()) .ungroup() .keys(); } private static PCollection<String> split(PCollection<String> in, final String regex) { return in.parallelDo(new DoFn<String, String>() { @Override public void process(String input, Emitter<String> emitter) { for (String w : input.split(regex)) { emitter.emit(w); } } }, Writables.strings()); } /** Reads the first value on a given row from a bunch of hfiles. */ private static KeyValue readFromHFiles(FileSystem fs, Path mrOutputPath, String row) throws IOException { List<KeyValueScanner> scanners = Lists.newArrayList(); KeyValue fakeKV = KeyValueUtil.createFirstOnRow(Bytes.toBytes(row)); for (FileStatus e : fs.listStatus(mrOutputPath)) { Path f = e.getPath(); if (!f.getName().startsWith("part-")) { // filter out "_SUCCESS" continue; } StoreFileReader reader = new StoreFileReader( fs, f, new CacheConfig(fs.getConf()), true, new AtomicInteger(), false, fs.getConf()); StoreFileScanner scanner = reader.getStoreFileScanner(false, false, false, 0, 0, false); scanner.seek(fakeKV); // have to call seek of each underlying scanner, otherwise KeyValueHeap won't work scanners.add(scanner); } assertTrue(!scanners.isEmpty()); KeyValueScanner kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR); boolean seekOk = kvh.seek(fakeKV); assertTrue(seekOk); Cell kv = kvh.next(); kvh.close(); return KeyValueUtil.copyToNewKeyValue(kv); } private static Path copyResourceFileToHDFS(String resourceName) throws IOException { Configuration conf = HBASE_TEST_UTILITY.getConfiguration(); FileSystem fs = FileSystem.get(conf); Path resultPath = getTempPathOnHDFS(resourceName); InputStream in = null; OutputStream out = null; try { in = Resources.getResource(resourceName).openConnection().getInputStream(); out = fs.create(resultPath); IOUtils.copy(in, out); } finally { IOUtils.closeQuietly(in); IOUtils.closeQuietly(out); } return resultPath; } private static Path getTempPathOnHDFS(String fileName) throws IOException { Configuration conf = HBASE_TEST_UTILITY.getConfiguration(); FileSystem fs = FileSystem.get(conf); Path result = new Path(TEMP_DIR, fileName); return result.makeQualified(fs); } private static long getWordCountFromTable(Table table, String word) throws IOException { return getWordCountFromTable(table, TEST_FAMILY, word); } private static long getWordCountFromTable(Table table, byte[] columnFamily, String word) throws IOException { Get get = new Get(Bytes.toBytes(word)); get.addFamily(columnFamily); byte[] value = table.get(get).value(); if (value == null) { fail("no such row: " + word); } return Bytes.toLong(value); } }
2,351
0
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/it/java/org/apache/crunch/io/hbase/HFileSourceIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.Pipeline; import org.apache.crunch.PipelineResult; import org.apache.crunch.Source; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.io.To; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.test.TemporaryPaths; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import java.io.File; import java.io.IOException; import java.io.Serializable; import java.util.Collections; import java.util.List; import static org.apache.crunch.types.writable.Writables.strings; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; public class HFileSourceIT implements Serializable { private static byte[] ROW1 = Bytes.toBytes("row1"); private static byte[] ROW2 = Bytes.toBytes("row2"); private static byte[] ROW3 = Bytes.toBytes("row3"); private static byte[] FAMILY1 = Bytes.toBytes("family1"); private static byte[] FAMILY2 = Bytes.toBytes("family2"); private static byte[] FAMILY3 = Bytes.toBytes("family3"); private static byte[] QUALIFIER1 = Bytes.toBytes("qualifier1"); private static byte[] QUALIFIER2 = Bytes.toBytes("qualifier2"); private static byte[] QUALIFIER3 = Bytes.toBytes("qualifier3"); private static byte[] QUALIFIER4 = Bytes.toBytes("qualifier4"); private static byte[] VALUE1 = Bytes.toBytes("value1"); private static byte[] VALUE2 = Bytes.toBytes("value2"); private static byte[] VALUE3 = Bytes.toBytes("value3"); private static byte[] VALUE4 = Bytes.toBytes("value4"); @Rule public transient TemporaryPath tmpDir = TemporaryPaths.create(); private transient Configuration conf; @Before public void setUp() { conf = tmpDir.getDefaultConfiguration(); } @Test public void testHFileSource() throws IOException { List<KeyValue> kvs = generateKeyValues(100); Path inputPath = tmpDir.getPath("in"); Path outputPath = tmpDir.getPath("out"); writeKeyValuesToHFile(inputPath, kvs); Pipeline pipeline = new MRPipeline(HFileSourceIT.class, conf); PCollection<KeyValue> in = pipeline.read(FromHBase.hfile(inputPath)); PCollection<String> texts = in.parallelDo(new MapFn<KeyValue, String>() { @Override public String map(KeyValue input) { return input.toString(); } }, strings()); texts.write(To.textFile(outputPath)); PipelineResult result = pipeline.run(); assertTrue(result.succeeded()); List<String> lines = FileUtils.readLines(new File(outputPath.toString(), "part-m-00000")); assertEquals(kvs.size(), lines.size()); for (int i = 0; i < kvs.size(); i++) { assertEquals(kvs.get(i).toString(), lines.get(i)); } } @Test public void testReadHFile() throws Exception { List<KeyValue> kvs = generateKeyValues(100); assertEquals(kvs, doTestReadHFiles(kvs, new Scan())); } @Test public void testScanHFiles() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 0, VALUE2)); List<Result> results = doTestScanHFiles(kvs, new Scan()); assertEquals(1, results.size()); Result result = Iterables.getOnlyElement(results); assertArrayEquals(ROW1, result.getRow()); assertEquals(2, result.rawCells().length); assertArrayEquals(VALUE1, CellUtil.cloneValue(result.getColumnLatestCell(FAMILY1, QUALIFIER1))); assertArrayEquals(VALUE2, CellUtil.cloneValue(result.getColumnLatestCell(FAMILY1, QUALIFIER2))); } @Test public void testScanHFiles_maxVersions() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 3, VALUE3), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, VALUE2)); Scan scan = new Scan(); scan.setMaxVersions(2); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(1, results.size()); Result result = Iterables.getOnlyElement(results); List<Cell> kvs2 = result.getColumnCells(FAMILY1, QUALIFIER1); assertEquals(3, kvs2.size()); assertArrayEquals(VALUE3, CellUtil.cloneValue(kvs2.get(0))); assertArrayEquals(VALUE2, CellUtil.cloneValue(kvs2.get(1))); assertArrayEquals(VALUE1, CellUtil.cloneValue(kvs2.get(2))); } @Test public void testScanHFiles_startStopRows() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW2, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW3, FAMILY1, QUALIFIER1, 0, VALUE1)); Scan scan = new Scan(); scan.setStartRow(ROW2); scan.setStopRow(ROW3); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(1, results.size()); Result result = Iterables.getOnlyElement(results); assertArrayEquals(ROW2, result.getRow()); } @Test public void testScanHFiles_startRowIsTooSmall() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW2, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW3, FAMILY1, QUALIFIER1, 0, VALUE1)); Scan scan = new Scan(); scan.setStartRow(ROW1); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(2, results.size()); assertArrayEquals(ROW2, results.get(0).getRow()); assertArrayEquals(ROW3, results.get(1).getRow()); } //@Test public void testScanHFiles_startRowIsTooLarge() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW2, FAMILY1, QUALIFIER1, 0, VALUE1)); Scan scan = new Scan(); scan.setStartRow(ROW3); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(0, results.size()); } @Test public void testScanHFiles_startRowDoesNotExist() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW3, FAMILY3, QUALIFIER3, 0, VALUE3)); Scan scan = new Scan(); scan.setStartRow(ROW2); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(1, results.size()); assertArrayEquals(ROW3, results.get(0).getRow()); } @Test public void testScanHFiles_familyMap() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 0, VALUE1), new KeyValue(ROW1, FAMILY2, QUALIFIER2, 0, VALUE2), new KeyValue(ROW1, FAMILY2, QUALIFIER3, 0, VALUE3), new KeyValue(ROW1, FAMILY3, QUALIFIER4, 0, VALUE4)); Scan scan = new Scan(); scan.addFamily(FAMILY1); scan.addColumn(FAMILY2, QUALIFIER2); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(1, results.size()); Result result = Iterables.getOnlyElement(results); assertEquals(2, result.size()); assertNotNull(result.getColumnLatestCell(FAMILY1, QUALIFIER1)); assertNotNull(result.getColumnLatestCell(FAMILY2, QUALIFIER2)); } @Test public void testScanHFiles_timeRange() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 2, VALUE2), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 3, VALUE3)); Scan scan = new Scan(); scan.setTimeRange(2, 3); List<Result> results = doTestScanHFiles(kvs, scan); assertEquals(1, results.size()); Result result = Iterables.getOnlyElement(results); assertEquals(1, result.size()); assertNotNull(result.getColumnLatestCell(FAMILY1, QUALIFIER2)); } @Test public void testScanHFiles_delete() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, VALUE2), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, KeyValue.Type.Delete)); List<Result> results = doTestScanHFiles(kvs, new Scan()); assertEquals(1, results.size()); assertArrayEquals(VALUE1, results.get(0).getValue(FAMILY1, QUALIFIER1)); } @Test public void testScanHFiles_deleteColumn() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, VALUE2), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, KeyValue.Type.DeleteColumn)); List<Result> results = doTestScanHFiles(kvs, new Scan()); assertEquals(0, results.size()); } @Test public void testScanHFiles_deleteFamily() throws IOException { List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 2, VALUE2), new KeyValue(ROW1, FAMILY1, QUALIFIER3, 3, VALUE3), new KeyValue(ROW1, FAMILY1, QUALIFIER1, 2, KeyValue.Type.DeleteFamily)); List<Result> results = doTestScanHFiles(kvs, new Scan()); assertEquals(1, results.size()); assertNull(results.get(0).getValue(FAMILY1, QUALIFIER1)); assertNull(results.get(0).getValue(FAMILY1, QUALIFIER2)); assertArrayEquals(VALUE3, results.get(0).getValue(FAMILY1, QUALIFIER3)); } @Test public void testHFileSize() throws IOException { Path inputPath = tmpDir.getPath("in"); List<KeyValue> kvs = ImmutableList.of( new KeyValue(ROW1, FAMILY1, QUALIFIER1, 1, VALUE1), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 2, VALUE2), new KeyValue(ROW1, FAMILY1, QUALIFIER2, 3, VALUE3)); writeKeyValuesToHFile(inputPath, kvs); FileSystem fs = FileSystem.get(conf); FileStatus[] fileStatuses = fs.listStatus(inputPath.getParent()); long size = 0; for(FileStatus s: fileStatuses){ size += s.getLen(); } Source<KeyValue> hfile = FromHBase.hfile(inputPath); assertTrue(hfile.getSize(conf) >= size); } private List<Result> doTestScanHFiles(List<KeyValue> kvs, Scan scan) throws IOException { Path inputPath = tmpDir.getPath("in"); writeKeyValuesToHFile(inputPath, kvs); Pipeline pipeline = new MRPipeline(HFileSourceIT.class, conf); PCollection<Result> results = HFileUtils.scanHFiles(pipeline, inputPath, scan); return ImmutableList.copyOf(results.materialize()); } private List<KeyValue> doTestReadHFiles(List<KeyValue> kvs, Scan scan) throws IOException { Path inputPath = tmpDir.getPath("in"); writeKeyValuesToHFile(inputPath, kvs); Pipeline pipeline = new MRPipeline(HFileSourceIT.class, conf); PCollection<KeyValue> results = pipeline.read(FromHBase.hfile(inputPath)); return ImmutableList.copyOf(results.materialize()); } private List<KeyValue> generateKeyValues(int count) { List<KeyValue> kvs = Lists.newArrayList(); for (int i = 0; i < count; i++) { kvs.add(new KeyValue( Bytes.toBytes("row_" + i), Bytes.toBytes("family"), Bytes.toBytes("qualifier_" + i))); } Collections.sort(kvs, KeyValue.COMPARATOR); return kvs; } private Path writeKeyValuesToHFile(Path inputPath, List<KeyValue> kvs) throws IOException { HFile.Writer w = null; try { List<KeyValue> sortedKVs = Lists.newArrayList(kvs); Collections.sort(sortedKVs, KeyValue.COMPARATOR); FileSystem fs = FileSystem.get(conf); w = HFile.getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, inputPath) .withComparator(CellComparatorImpl.COMPARATOR) .withFileContext(new HFileContext()) .create(); for (KeyValue kv : sortedKVs) { w.append(kv); } return inputPath; } finally { IOUtils.closeQuietly(w); } } }
2,352
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBasePairConverter.java
/* * * * * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.apache.crunch.io.hbase; import org.apache.crunch.Pair; import org.apache.crunch.types.Converter; class HBasePairConverter<K, V> implements Converter<K, V, Pair<K, V>, Pair<K, Iterable<V>>> { private Class<K> keyClass; private Class<V> valueClass; public HBasePairConverter(Class<K> keyClass, Class<V> valueClass) { this.keyClass = keyClass; this.valueClass = valueClass; } @Override public Pair<K, V> convertInput(K key, V value) { return Pair.of(key, value); } @Override public K outputKey(Pair<K, V> value) { return value.first(); } @Override public V outputValue(Pair<K, V> value) { return value.second(); } @Override public Class<K> getKeyClass() { return keyClass; } @Override public Class<V> getValueClass() { return valueClass; } @Override public boolean applyPTypeTransforms() { return false; } @Override public Pair<K, Iterable<V>> convertIterableInput(K key, Iterable<V> value) { return Pair.of(key, value); } }
2,353
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileInputFormat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; /** * Simple input format for HFiles. */ public class HFileInputFormat extends FileInputFormat<NullWritable, KeyValue> { private static final Logger LOG = LoggerFactory.getLogger(HFileInputFormat.class); static final String START_ROW_KEY = "crunch.hbase.hfile.input.format.start.row"; static final String STOP_ROW_KEY = "crunch.hbase.hfile.input.format.stop.row"; /** * File filter that removes all "hidden" files. This might be something worth removing from * a more general purpose utility; it accounts for the presence of metadata files created * in the way we're doing exports. */ static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() { public boolean accept(Path p) { String name = p.getName(); return !name.startsWith("_") && !name.startsWith("."); } }; /** * Record reader for HFiles. */ private static class HFileRecordReader extends RecordReader<NullWritable, KeyValue> { private Reader in; protected Configuration conf; private HFileScanner scanner; /** * A private cache of the key value so it doesn't need to be loaded twice from the scanner. */ private KeyValue value = null; private byte[] startRow = null; private byte[] stopRow = null; private boolean reachedStopRow = false; private long count; private boolean seeked = false; @Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { FileSplit fileSplit = (FileSplit) split; conf = context.getConfiguration(); Path path = fileSplit.getPath(); FileSystem fs = path.getFileSystem(conf); LOG.info("Initialize HFileRecordReader for {}", path); this.in = HFile.createReader(fs, path, new CacheConfig(conf), true, conf); // The file info must be loaded before the scanner can be used. // This seems like a bug in HBase, but it's easily worked around. this.in.loadFileInfo(); this.scanner = in.getScanner(false, false); String startRowStr = conf.get(START_ROW_KEY); if (startRowStr != null) { this.startRow = decodeHexOrDie(startRowStr); } String stopRowStr = conf.get(STOP_ROW_KEY); if (stopRowStr != null) { this.stopRow = decodeHexOrDie(stopRowStr); } } private static byte[] decodeHexOrDie(String s) { try { return Hex.decodeHex(s.toCharArray()); } catch (DecoderException e) { throw new AssertionError("Failed to decode hex string: " + s); } } @Override public boolean nextKeyValue() throws IOException, InterruptedException { if (reachedStopRow) { return false; } boolean hasNext; if (!seeked) { if (startRow != null) { if(LOG.isInfoEnabled()) { LOG.info("Seeking to start row {}", Bytes.toStringBinary(startRow)); } Cell cell = PrivateCellUtil.createFirstOnRow(startRow, 0, (short) startRow.length); hasNext = seekAtOrAfter(scanner, cell); } else { LOG.info("Seeking to start"); hasNext = scanner.seekTo(); } seeked = true; } else { hasNext = scanner.next(); } if (!hasNext) { return false; } value = KeyValueUtil.copyToNewKeyValue(scanner.getCell()); if (stopRow != null && Bytes.compareTo( value.getRowArray(), value.getRowOffset(), value.getRowLength(), stopRow, 0, stopRow.length) >= 0) { if(LOG.isInfoEnabled()) { LOG.info("Reached stop row {}", Bytes.toStringBinary(stopRow)); } reachedStopRow = true; value = null; return false; } count++; return true; } @Override public NullWritable getCurrentKey() throws IOException, InterruptedException { return NullWritable.get(); } @Override public KeyValue getCurrentValue() throws IOException, InterruptedException { return value; } @Override public float getProgress() throws IOException, InterruptedException { // This would be inaccurate if KVs are not uniformly-sized or we have performed a seek to // the start row, but better than nothing anyway. return 1.0f * count / in.getEntries(); } @Override public void close() throws IOException { if (in != null) { in.close(); in = null; } } // This method is copied from o.a.h.hbase.regionserver.StoreFileScanner, as we don't want // to depend on it. private static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { int result = s.seekTo(k); if(result < 0) { // Passed KV is smaller than first KV in file, work from start of file return s.seekTo(); } else if(result > 0) { // Passed KV is larger than current KV in file, if there is a next // it is the "after", if not then this scanner is done. return s.next(); } // Seeked to the exact key return true; } } @Override protected List<FileStatus> listStatus(JobContext job) throws IOException { List<FileStatus> result = new ArrayList<FileStatus>(); // Explode out directories that match the original FileInputFormat filters since HFiles are written to directories where the // directory name is the column name for (FileStatus status : super.listStatus(job)) { if (status.isDirectory()) { FileSystem fs = status.getPath().getFileSystem(job.getConfiguration()); for (FileStatus match : fs.listStatus(status.getPath(), HIDDEN_FILE_FILTER)) { result.add(match); } } else{ result.add(status); } } return result; } @Override public RecordReader<NullWritable, KeyValue> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { return new HFileRecordReader(); } @Override protected boolean isSplitable(JobContext context, Path filename) { // This file isn't splittable. return false; } }
2,354
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBaseTarget.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import java.io.IOException; import java.util.Map; import java.util.Objects; import com.google.common.collect.Maps; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.SourceTarget; import org.apache.crunch.Target; import org.apache.crunch.io.CrunchOutputs; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.MapReduceTarget; import org.apache.crunch.io.OutputHandler; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.MutationSerialization; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableOutputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class HBaseTarget implements MapReduceTarget { private static final Logger LOG = LoggerFactory.getLogger(HBaseTarget.class); protected String table; private transient TableName tableName; private Map<String, String> extraConf = Maps.newHashMap(); public HBaseTarget(String table) { this(TableName.valueOf(table)); } public HBaseTarget(TableName tableName){ this.tableName = tableName; this.table = tableName.getNameAsString(); } @Override public boolean equals(Object other) { if (this == other) return true; if (other == null) return false; if (!other.getClass().equals(getClass())) return false; HBaseTarget o = (HBaseTarget) other; return Objects.equals(table, o.table) && Objects.equals(extraConf, o.extraConf); } @Override public int hashCode() { HashCodeBuilder hcb = new HashCodeBuilder(); return hcb.append(table).append(extraConf).toHashCode(); } @Override public String toString() { return "HBaseTable(" + table + ")"; } @Override public boolean accept(OutputHandler handler, PType<?> ptype) { if (Put.class.equals(ptype.getTypeClass()) || Delete.class.equals(ptype.getTypeClass())) { handler.configure(this, ptype); return true; } return false; } @Override public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) { final Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); conf.setStrings("io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName()); Class<?> typeClass = ptype.getTypeClass(); // Either Put or Delete try { TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); FileOutputFormat.setOutputPath(job, outputPath); } catch (IOException e) { throw new CrunchRuntimeException(e); } if (null == name) { job.setOutputFormatClass(TableOutputFormat.class); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(typeClass); conf.set(TableOutputFormat.OUTPUT_TABLE, table); for (Map.Entry<String, String> e : extraConf.entrySet()) { conf.set(e.getKey(), e.getValue()); } } else { FormatBundle<TableOutputFormat> bundle = FormatBundle.forOutput(TableOutputFormat.class); bundle.set(TableOutputFormat.OUTPUT_TABLE, table); for (Map.Entry<String, String> e : extraConf.entrySet()) { bundle.set(e.getKey(), e.getValue()); } CrunchOutputs.addNamedOutput(job, name, bundle, ImmutableBytesWritable.class, typeClass); } } @Override public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) { return null; } @Override public Target outputConf(String key, String value) { extraConf.put(key, value); return this; } @Override public Target fileSystem(FileSystem fileSystem) { // not currently supported/applicable for HBase return this; } @Override public FileSystem getFileSystem() { // not currently supported/applicable for HBase return null; } @Override public boolean handleExisting(WriteMode strategy, long lastModifiedAt, Configuration conf) { LOG.info("HBaseTarget ignores checks for existing outputs..."); return false; } @Override public Converter<?, ?, ?, ?> getConverter(final PType<?> ptype) { if (Put.class.equals(ptype.getTypeClass())) { return new HBaseValueConverter<Put>(Put.class); } else if (Delete.class.equals(ptype.getTypeClass())) { return new HBaseValueConverter<Delete>(Delete.class); } else { throw new IllegalArgumentException("HBaseTarget only supports Put and Delete, not: " + ptype.getTypeClass()); } } protected TableName getTableName(){ if(tableName == null){ tableName = TableName.valueOf(table); } return tableName; } }
2,355
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBaseSourceTarget.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import java.io.IOException; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.crunch.Pair; import org.apache.crunch.ReadableData; import org.apache.crunch.Source; import org.apache.crunch.SourceTarget; import org.apache.crunch.TableSource; import org.apache.crunch.impl.mr.run.CrunchMapper; import org.apache.crunch.io.CrunchInputs; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.ReadableSourceTarget; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PTableType; import org.apache.crunch.types.PType; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormat; import org.apache.hadoop.hbase.mapreduce.MultiTableInputFormatBase; import org.apache.hadoop.hbase.mapreduce.ResultSerialization; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; import com.google.common.collect.ObjectArrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class HBaseSourceTarget extends HBaseTarget implements ReadableSourceTarget<Pair<ImmutableBytesWritable, Result>>, TableSource<ImmutableBytesWritable, Result> { private static final Logger LOG = LoggerFactory.getLogger(HBaseSourceTarget.class); private static final PTableType<ImmutableBytesWritable, Result> PTYPE = Writables.tableOf( Writables.writables(ImmutableBytesWritable.class), HBaseTypes.results()); protected Scan[] scans; protected String scansAsString; private FormatBundle<? extends MultiTableInputFormatBase> inputBundle; public HBaseSourceTarget(String table, Scan scan) { this(table, new Scan[] { scan }); } public HBaseSourceTarget(String table, Scan scan, Scan... additionalScans) { this(table, ObjectArrays.concat(scan, additionalScans)); } public HBaseSourceTarget(TableName table, Scan scan, Scan... additionalScans) { this(table, ObjectArrays.concat(scan, additionalScans)); } public HBaseSourceTarget(String table, Scan[] scans) { this(table, MultiTableInputFormat.class, scans); } public HBaseSourceTarget(TableName table, Scan[] scans) { this(table, MultiTableInputFormat.class, scans); } public HBaseSourceTarget(String table, Class<? extends MultiTableInputFormatBase> clazz, Scan[] scans) { this(TableName.valueOf(table), clazz, scans); } public HBaseSourceTarget(TableName tableName, Class<? extends MultiTableInputFormatBase> clazz, Scan[] scans) { super(tableName); this.scans = scans; try { byte[] tableNameAsBytes = Bytes.toBytes(table); //Copy scans and enforce that they are for the table specified Scan[] tableScans = new Scan[scans.length]; String[] scanStrings = new String[scans.length]; for(int i = 0; i < scans.length; i++){ tableScans[i] = new Scan(scans[i]); //enforce Scan is for same table tableScans[i].setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, tableNameAsBytes); //Convert the Scan into a String scanStrings[i] = convertScanToString(tableScans[i]); } this.scans = tableScans; this.scansAsString = StringUtils.arrayToString(scanStrings); this.inputBundle = FormatBundle.forInput(clazz) .set(MultiTableInputFormat.SCANS, scansAsString); } catch (IOException e) { throw new RuntimeException(e); } } @Override public Source<Pair<ImmutableBytesWritable, Result>> inputConf(String key, String value) { inputBundle.set(key, value); return this; } @Override public SourceTarget<Pair<ImmutableBytesWritable, Result>> fileSystem(FileSystem fileSystem) { // not currently supported/applicable for HBase return this; } @Override public PType<Pair<ImmutableBytesWritable, Result>> getType() { return PTYPE; } @Override public PTableType<ImmutableBytesWritable, Result> getTableType() { return PTYPE; } @Override public boolean equals(Object other) { if (other == null || !(other instanceof HBaseSourceTarget)) { return false; } HBaseSourceTarget o = (HBaseSourceTarget) other; // XXX scan does not have equals method return inputBundle.equals(o.inputBundle); } @Override public int hashCode() { return new HashCodeBuilder().append(table).append(scansAsString).toHashCode(); } @Override public String toString() { return "HBaseTable(" + table + ")"; } @Override public void configureSource(Job job, int inputId) throws IOException { TableMapReduceUtil.addDependencyJars(job); Configuration conf = job.getConfiguration(); conf.setStrings("io.serializations", conf.get("io.serializations"), ResultSerialization.class.getName()); if (inputId == -1) { job.setMapperClass(CrunchMapper.class); job.setInputFormatClass(inputBundle.getFormatClass()); inputBundle.configure(conf); } else { Path dummy = new Path("/hbase/" + table); CrunchInputs.addInputPath(job, dummy, inputBundle, inputId); } } static String convertScanToString(Scan scan) throws IOException { ClientProtos.Scan proto = ProtobufUtil.toScan(scan); return Base64.encodeBase64String(proto.toByteArray()); } public static Scan convertStringToScan(String string) throws IOException { ClientProtos.Scan proto = ClientProtos.Scan.parseFrom(Base64.decodeBase64(string)); return ProtobufUtil.toScan(proto); } @Override public long getSize(Configuration conf) { // TODO something smarter here. return 1000L * 1000L * 1000L; } @Override public long getLastModifiedAt(Configuration configuration) { LOG.warn("Cannot determine last modified time for source: {}", toString()); return -1; } @Override public Converter<?, ?, ?, ?> getConverter() { return new HBasePairConverter<ImmutableBytesWritable, Result>( ImmutableBytesWritable.class, Result.class); } @Override public Iterable<Pair<ImmutableBytesWritable, Result>> read(Configuration conf) throws IOException { Configuration hconf = HBaseConfiguration.create(conf); Connection connection = ConnectionFactory.createConnection(hconf); Table htable = connection.getTable(getTableName()); return new HTableIterable(connection, htable, scans); } @Override public ReadableData<Pair<ImmutableBytesWritable, Result>> asReadable() { return new HBaseData(table, scansAsString, this); } @Override public SourceTarget<Pair<ImmutableBytesWritable, Result>> conf(String key, String value) { inputConf(key, value); outputConf(key, value); return this; } }
2,356
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/RegionLocationTable.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import javax.annotation.Nullable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.NavigableMap; import com.google.common.collect.Maps; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; /** * Provides lookup functionality for the region server location for row keys in an HBase table. * <p> * This is a helper class to optimize the locality of HFiles created with {@link HFileOutputFormatForCrunch}, by * specifying the name of the region server which is hosting the region of a given row as the preferred HDFS data node * for hosting the written HFile. This is intended to ensure that bulk-created HFiles will be available on the local * filesystem on the region servers using the created HFile, thus allowing short-circuit reads to the local file system * on the bulk-created HFiles. */ class RegionLocationTable { /** * Per-output configuration key which contains the path to a serialized region location table. */ public static final String REGION_LOCATION_TABLE_PATH = "crunch.hfileregionlocation.path"; private final String tableName; private final NavigableMap<byte[], String> regionStartToServerHostName; public static RegionLocationTable create(String tableName, List<HRegionLocation> regionLocationList) { NavigableMap<byte[], String> regionStartToServerHostName = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); for (HRegionLocation regionLocation : regionLocationList) { HRegionInfo regionInfo = regionLocation.getRegionInfo(); if (regionInfo == null) { continue; } byte[] startKey = regionInfo.getStartKey(); if (startKey == null) { startKey = HConstants.EMPTY_START_ROW; } ServerName serverName = regionLocation.getServerName(); if (serverName != null) { regionStartToServerHostName.put(startKey, serverName.getHostname()); } } return new RegionLocationTable(tableName, regionStartToServerHostName); } private RegionLocationTable(String tableName, NavigableMap<byte[], String> regionStartToServerHostName) { this.tableName = tableName; this.regionStartToServerHostName = regionStartToServerHostName; } /** * Returns the name of the HBase table to which this region location table applies. * * @return name of the related HBase table */ public String getTableName() { return tableName; } /** * Returns the optional preferred node for a row. * <p> * The return value of this method is an {@link InetSocketAddress} to be in line with the HFile API (and * underlying HDFS API) which use InetSocketAddress. The port number is always 0 on the returned InetSocketAddress, * as it is not known from outside the scope of a region server. The HDFS API is implemented to deal "correctly" * with this, mapping host name to a random data node on the same machine, which is sufficient for the purposes * here. * <p> * The return value will be null if no preferred node is known for the given row. * * @param rowKey row key of the row for which the preferred node is to be calculated * @return socket address of the preferred storage node for the given row, or null */ @Nullable public InetSocketAddress getPreferredNodeForRow(byte[] rowKey) { Map.Entry<byte[], String> matchingEntry = regionStartToServerHostName.floorEntry(rowKey); if (matchingEntry != null) { return InetSocketAddress.createUnresolved(matchingEntry.getValue(), 0); } else { return null; } } /** * Serialize this table to a {@link DataOutput}. The serialized value can be deserialized via the * {@link #deserialize(DataInput)} method. * * @param dataOutput output to which the table is to be serialized */ public void serialize(DataOutput dataOutput) throws IOException { dataOutput.writeUTF(tableName); dataOutput.writeInt(regionStartToServerHostName.size()); for (Map.Entry<byte[], String> regionToHostEntry : regionStartToServerHostName.entrySet()) { byte[] rowKey = regionToHostEntry.getKey(); dataOutput.writeInt(rowKey.length); dataOutput.write(rowKey); dataOutput.writeUTF(regionToHostEntry.getValue()); } } /** * Deserialize a table which was serialized to with the {@link #serialize(DataOutput)} method. * * @param dataInput input containing a serialized instance of this class * @return the deserialized table */ public static RegionLocationTable deserialize(DataInput dataInput) throws IOException { NavigableMap<byte[], String> regionStartToServerHostName = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); String tableName = dataInput.readUTF(); int numEntries = dataInput.readInt(); for (int i = 0; i < numEntries; i++) { int rowKeyLength = dataInput.readInt(); byte[] rowKey = new byte[rowKeyLength]; dataInput.readFully(rowKey, 0, rowKeyLength); String hostName = dataInput.readUTF(); regionStartToServerHostName.put(rowKey, hostName); } return new RegionLocationTable(tableName, regionStartToServerHostName); } }
2,357
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HTableIterator.java
/* * * * * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.apache.crunch.io.hbase; import org.apache.crunch.Pair; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Iterator; import java.util.List; class HTableIterator implements Iterator<Pair<ImmutableBytesWritable, Result>> { private static final Logger LOG = LoggerFactory.getLogger(HTableIterator.class); private final Table table; private final Connection connection; private final Iterator<Scan> scans; private ResultScanner scanner; private Iterator<Result> iter; public HTableIterator(Connection connection, Table table, List<Scan> scans) { this.table = table; this.connection = connection; this.scans = scans.iterator(); try{ this.scanner = table.getScanner(this.scans.next()); }catch(IOException ioe){ throw new RuntimeException(ioe); } this.iter = scanner.iterator(); } @Override public boolean hasNext() { boolean hasNext = iter.hasNext(); if (!hasNext) { scanner.close(); hasNext = scans.hasNext(); if(hasNext){ try{ scanner = table.getScanner(this.scans.next()); iter = scanner.iterator(); } catch(IOException ioe){ throw new RuntimeException("Unable to create next scanner from "+ table.getName(), ioe); } } else { try { table.close(); } catch (IOException e) { LOG.error("Exception closing Table: {}", table.getName(), e); } try { connection.close(); } catch (IOException e) { LOG.error("Exception closing Table: {}", table.getName(), e); } } } return hasNext; } @Override public Pair<ImmutableBytesWritable, Result> next() { Result next = iter.next(); return Pair.of(new ImmutableBytesWritable(next.getRow()), next); } @Override public void remove() { throw new UnsupportedOperationException(); } }
2,358
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBaseTypes.java
/* * * * * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.MapFn; import org.apache.crunch.types.PType; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.mapreduce.MutationSerialization; import org.apache.hadoop.hbase.mapreduce.ResultSerialization; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; import org.apache.hadoop.util.ReflectionUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; public final class HBaseTypes { public static final PType<Put> puts() { return Writables.derived(Put.class, new MapInFn<Put>(Put.class, MutationSerialization.class), new MapOutFn<Put>(Put.class, MutationSerialization.class), Writables.bytes()); } public static final PType<Delete> deletes() { return Writables.derived(Delete.class, new MapInFn<Delete>(Delete.class, MutationSerialization.class), new MapOutFn<Delete>(Delete.class, MutationSerialization.class), Writables.bytes()); } public static final PType<Result> results() { return Writables.derived(Result.class, new MapInFn<Result>(Result.class, ResultSerialization.class), new MapOutFn<Result>(Result.class, ResultSerialization.class), Writables.bytes()); } public static final PType<KeyValue> keyValues() { return Writables.derived(KeyValue.class, new MapFn<BytesWritable, KeyValue>() { @Override public KeyValue map(BytesWritable input) { return bytesToKeyValue(input); } }, new MapFn<KeyValue, BytesWritable>() { @Override public BytesWritable map(KeyValue input) { return keyValueToBytes(input); } }, Writables.writables(BytesWritable.class)); } public static final PType<Cell> cells() { return Writables.derived(Cell.class, new MapFn<BytesWritable, Cell>() { @Override public Cell map(BytesWritable input) { return bytesToKeyValue(input); } }, new MapFn<Cell, BytesWritable>() { @Override public BytesWritable map(Cell input) { return keyValueToBytes(input); } }, Writables.writables(BytesWritable.class)); } public static BytesWritable keyValueToBytes(Cell input) { return keyValueToBytes(KeyValueUtil.copyToNewKeyValue(input)); } public static BytesWritable keyValueToBytes(KeyValue kv) { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); try { KeyValue.write(kv, dos); return new BytesWritable(baos.toByteArray()); } catch (Exception e) { throw new CrunchRuntimeException(e); } } public static KeyValue bytesToKeyValue(BytesWritable input) { return bytesToKeyValue(input.getBytes(), 0, input.getLength()); } public static KeyValue bytesToKeyValue(byte[] array, int offset, int limit) { ByteArrayInputStream bais = new ByteArrayInputStream(array, offset, limit); DataInputStream dis = new DataInputStream(bais); try { return KeyValue.create(dis); } catch (IOException e) { throw new CrunchRuntimeException(e); } } private static class MapInFn<T> extends MapFn<ByteBuffer, T> { private Class<T> clazz; private Class<? extends Serialization> serClazz; private transient Deserializer<T> deserializer; public MapInFn(Class<T> clazz, Class<? extends Serialization> serClazz) { this.clazz = clazz; this.serClazz = serClazz; } @Override public void initialize() { this.deserializer = ReflectionUtils.newInstance(serClazz, null).getDeserializer(clazz); if (deserializer == null) { throw new CrunchRuntimeException("No Hadoop deserializer for class: " + clazz); } } @Override public T map(ByteBuffer bb) { if (deserializer == null) { initialize(); } ByteArrayInputStream bais = new ByteArrayInputStream(bb.array(), bb.position(), bb.limit()); try { deserializer.open(bais); T ret = deserializer.deserialize(null); deserializer.close(); return ret; } catch (Exception e) { throw new CrunchRuntimeException("Deserialization errror", e); } } } private static class MapOutFn<T> extends MapFn<T, ByteBuffer> { private Class<T> clazz; private Class<? extends Serialization> serClazz; private transient Serializer<T> serializer; public MapOutFn(Class<T> clazz, Class<? extends Serialization> serClazz) { this.clazz = clazz; this.serClazz = serClazz; } @Override public void initialize() { this.serializer = ReflectionUtils.newInstance(serClazz, null).getSerializer(clazz); if (serializer == null) { throw new CrunchRuntimeException("No Hadoop serializer for class: " + clazz); } } @Override public ByteBuffer map(T out) { if (serializer == null) { initialize(); } ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { serializer.open(baos); serializer.serialize(out); serializer.close(); return ByteBuffer.wrap(baos.toByteArray()); } catch (Exception e) { throw new CrunchRuntimeException("Serialization errror", e); } } } private HBaseTypes() {} }
2,359
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import static org.apache.crunch.types.writable.Writables.bytes; import static org.apache.crunch.types.writable.Writables.nulls; import static org.apache.crunch.types.writable.Writables.tableOf; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NavigableSet; import java.util.Set; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.primitives.Longs; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.crunch.FilterFn; import org.apache.crunch.GroupingOptions; import org.apache.crunch.MapFn; import org.apache.crunch.PCollection; import org.apache.crunch.PTable; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.impl.dist.DistributedPipeline; import org.apache.crunch.lib.sort.TotalOrderPartitioner; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.SequenceFile; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class HFileUtils { private static final Logger LOG = LoggerFactory.getLogger(HFileUtils.class); /** Compares {@code KeyValue} by its family, qualifier, timestamp (reversely), type (reversely) and memstoreTS. */ private static final Comparator<KeyValue> KEY_VALUE_COMPARATOR = new Comparator<KeyValue>() { @Override public int compare(KeyValue l, KeyValue r) { int cmp; if ((cmp = compareFamily(l, r)) != 0) { return cmp; } if ((cmp = compareQualifier(l, r)) != 0) { return cmp; } if ((cmp = compareTimestamp(l, r)) != 0) { return cmp; } if ((cmp = compareType(l, r)) != 0) { return cmp; } return 0; } private int compareFamily(KeyValue l, KeyValue r) { return Bytes.compareTo( l.getBuffer(), l.getFamilyOffset(), l.getFamilyLength(), r.getBuffer(), r.getFamilyOffset(), r.getFamilyLength()); } private int compareQualifier(KeyValue l, KeyValue r) { return Bytes.compareTo( l.getBuffer(), l.getQualifierOffset(), l.getQualifierLength(), r.getBuffer(), r.getQualifierOffset(), r.getQualifierLength()); } private int compareTimestamp(KeyValue l, KeyValue r) { // These arguments are intentionally reversed, with r then l, to sort // the timestamps in descending order as is expected by HBase return Longs.compare(r.getTimestamp(), l.getTimestamp()); } private int compareType(KeyValue l, KeyValue r) { return (int) r.getTypeByte() - (int) l.getTypeByte(); } }; private static class FilterByFamilyFn<C extends Cell> extends FilterFn<C> { private final byte[] family; private FilterByFamilyFn(byte[] family) { this.family = family; } @Override public boolean accept(C input) { return Bytes.equals( input.getFamilyArray(), input.getFamilyOffset(), input.getFamilyLength(), family, 0, family.length); } @Override public boolean disableDeepCopy() { return true; } } private static class StartRowFilterFn<C extends Cell> extends FilterFn<C> { private final byte[] startRow; private StartRowFilterFn(byte[] startRow) { this.startRow = startRow; } @Override public boolean accept(C input) { return Bytes.compareTo( input.getRowArray(), input.getRowOffset(), input.getRowLength(), startRow, 0, startRow.length) >= 0; } } private static class StopRowFilterFn<C extends Cell> extends FilterFn<C> { private final byte[] stopRow; private StopRowFilterFn(byte[] stopRow) { this.stopRow = stopRow; } @Override public boolean accept(C input) { return Bytes.compareTo( input.getRowArray(), input.getRowOffset(), input.getRowLength(), stopRow, 0, stopRow.length) < 0; } } private static class FamilyMapFilterFn<C extends Cell> extends FilterFn<C> { private static class Column implements Serializable { private final byte[] family; private final byte[] qualifier; private Column(byte[] family, byte[] qualifier) { this.family = family; this.qualifier = qualifier; } private byte[] getFamily() { return family; } private byte[] getQualifier() { return qualifier; } } private final List<byte[]> families = Lists.newArrayList(); private final List<Column> qualifiers = Lists.newArrayList(); private transient Set<ByteBuffer> familySet; private transient Set<Pair<ByteBuffer, ByteBuffer>> qualifierSet; private FamilyMapFilterFn(Map<byte[], NavigableSet<byte[]>> familyMap) { // Holds good families and qualifiers in Lists, as ByteBuffer is not Serializable. for (Map.Entry<byte[], NavigableSet<byte[]>> e : familyMap.entrySet()) { byte[] f = e.getKey(); if (e.getValue() == null) { families.add(f); } else { for (byte[] q : e.getValue()) { qualifiers.add(new Column(f, q)); } } } } @Override public void initialize() { ImmutableSet.Builder<ByteBuffer> familiySetBuilder = ImmutableSet.builder(); ImmutableSet.Builder<Pair<ByteBuffer, ByteBuffer>> qualifierSetBuilder = ImmutableSet.builder(); for (byte[] f : families) { familiySetBuilder.add(ByteBuffer.wrap(f)); } for (Column e : qualifiers) { byte[] f = e.getFamily(); byte[] q = e.getQualifier(); qualifierSetBuilder.add(Pair.of(ByteBuffer.wrap(f), ByteBuffer.wrap(q))); } this.familySet = familiySetBuilder.build(); this.qualifierSet = qualifierSetBuilder.build(); } @Override public boolean accept(C input) { ByteBuffer f = ByteBuffer.wrap(input.getFamilyArray(), input.getFamilyOffset(), input.getFamilyLength()); ByteBuffer q = ByteBuffer.wrap(input.getQualifierArray(), input.getQualifierOffset(), input.getQualifierLength()); return familySet.contains(f) || qualifierSet.contains(Pair.of(f, q)); } } private static class TimeRangeFilterFn<C extends Cell> extends FilterFn<C> { private final long minTimestamp; private final long maxTimestamp; private TimeRangeFilterFn(TimeRange timeRange) { // Can't save TimeRange to member directly, as it is not Serializable. this.minTimestamp = timeRange.getMin(); this.maxTimestamp = timeRange.getMax(); } @Override public boolean accept(C input) { return (minTimestamp <= input.getTimestamp() && input.getTimestamp() < maxTimestamp); } } public static class KeyValueComparator implements RawComparator<BytesWritable> { @Override public int compare(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { // BytesWritable and KeyValue each serialize 4 bytes to indicate length if (llength < 8) { throw new AssertionError("Too small llength: " + llength); } if (rlength < 8) { throw new AssertionError("Too small rlength: " + rlength); } Cell leftKey = new KeyValue(left, loffset + 8, llength - 8); Cell rightKey = new KeyValue(right, roffset + 8, rlength - 8); int rowCmp = Bytes.compareTo( leftKey.getRowArray(), leftKey.getRowOffset(), leftKey.getRowLength(), rightKey.getRowArray(), rightKey.getRowOffset(), rightKey.getRowLength()); if (rowCmp != 0) { return rowCmp; } else { return KeyValue.COMPARATOR.compare(leftKey, rightKey); } } @Override public int compare(BytesWritable left, BytesWritable right) { return KeyValue.COMPARATOR.compare( new KeyValue(left.getBytes(), 4, left.getLength() - 4), new KeyValue(right.getBytes(), 4, right.getLength() - 4)); } } private static class ExtractRowFn<C extends Cell> extends MapFn<C, ByteBuffer> { @Override public ByteBuffer map(Cell input) { // we have to make a copy of row, because the buffer may be changed after this call return ByteBuffer.wrap(CellUtil.cloneRow(input)); } } /** * Scans HFiles. * * @param pipeline the pipeline * @param path path to HFiles * @return {@code Result}s */ public static PCollection<Result> scanHFiles(Pipeline pipeline, Path path) { return scanHFiles(pipeline, path, new Scan()); } /** * Scans HFiles with source filesystem. * * @param pipeline the pipeline * @param path path to HFiles * @param fs filesystem where HFiles are located * @return {@code Result}s */ public static PCollection<Result> scanHFiles(Pipeline pipeline, Path path, FileSystem fs) { return scanHFiles(pipeline, path, new Scan(), fs); } /** * Scans HFiles with filter conditions. * * @param pipeline the pipeline * @param path path to HFiles * @param scan filtering conditions * @return {@code Result}s * @see #combineIntoRow(org.apache.crunch.PCollection, org.apache.hadoop.hbase.client.Scan) */ public static PCollection<Result> scanHFiles(Pipeline pipeline, Path path, Scan scan) { return scanHFiles(pipeline, ImmutableList.of(path), scan); } /** * Scans HFiles with filter conditions and source filesystem. * * @param pipeline the pipeline * @param path path to HFiles * @param scan filtering conditions * @param fs filesystem where HFiles are located * @return {@code Result}s * @see #combineIntoRow(org.apache.crunch.PCollection, org.apache.hadoop.hbase.client.Scan) */ public static PCollection<Result> scanHFiles(Pipeline pipeline, Path path, Scan scan, FileSystem fs) { return scanHFiles(pipeline, ImmutableList.of(path), scan, fs); } /** * Scans HFiles with filter conditions. * * @param pipeline the pipeline * @param paths paths to HFiles * @param scan filtering conditions * @return {@code Result}s * @see #combineIntoRow(org.apache.crunch.PCollection, org.apache.hadoop.hbase.client.Scan) */ public static PCollection<Result> scanHFiles(Pipeline pipeline, List<Path> paths, Scan scan) { return scanHFiles(pipeline, paths, scan, null); } /** * Scans HFiles with filter conditions and source filesystem. * * @param pipeline the pipeline * @param paths paths to HFiles * @param scan filtering conditions * @param fs filesystem where HFiles are located * @return {@code Result}s * @see #combineIntoRow(org.apache.crunch.PCollection, org.apache.hadoop.hbase.client.Scan) */ public static PCollection<Result> scanHFiles(Pipeline pipeline, List<Path> paths, Scan scan, FileSystem fs) { PCollection<KeyValue> in = pipeline.read(new HFileSource(paths, scan).fileSystem(fs)); return combineIntoRow(in, scan); } /** * Converts a bunch of {@link Cell}s into {@link Result}. * * All {@code Cell}s belong to the same row are combined. Deletes are dropped and only * the latest version is kept. * * @param cells the input {@code Cell}s * @return {@code Result}s */ public static <C extends Cell> PCollection<Result> combineIntoRow(PCollection<C> cells) { return combineIntoRow(cells, new Scan()); } /** * Converts a bunch of {@link Cell}s into {@link Result}. * * All {@code Cell}s belong to the same row are combined. Users may provide some filter * conditions (specified by {@code scan}). Deletes are dropped and only the number * of versions specified by {@code scan.getMaxVersions()} are kept. * * @param cells the input {@code Cell}s * @param scan filter conditions, currently we support start row, stop row, family map, * time range, and max versions * @return {@code Result}s */ public static <C extends Cell> PCollection<Result> combineIntoRow(PCollection<C> cells, Scan scan) { if (!Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)) { cells = cells.filter(new StartRowFilterFn<C>(scan.getStartRow())); } if (!Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { cells = cells.filter(new StopRowFilterFn<C>(scan.getStopRow())); } if (scan.hasFamilies()) { cells = cells.filter(new FamilyMapFilterFn<C>(scan.getFamilyMap())); } TimeRange timeRange = scan.getTimeRange(); if (timeRange != null && (timeRange.getMin() > 0 || timeRange.getMax() < Long.MAX_VALUE)) { cells = cells.filter(new TimeRangeFilterFn<C>(timeRange)); } // TODO(chaoshi): support Scan#getFilter PTable<ByteBuffer, C> cellsByRow = cells.by(new ExtractRowFn<C>(), bytes()); final int versions = scan.getMaxVersions(); return cellsByRow.groupByKey().parallelDo("CombineKeyValueIntoRow", new DoFn<Pair<ByteBuffer, Iterable<C>>, Result>() { @Override public void process(Pair<ByteBuffer, Iterable<C>> input, Emitter<Result> emitter) { List<KeyValue> cells = Lists.newArrayList(); for (Cell kv : input.second()) { try { cells.add(KeyValueUtil.copyToNewKeyValue(kv)); // assuming the input fits in memory } catch (Exception e) { throw new RuntimeException(e); } } Result result = doCombineIntoRow(cells, versions); if (result == null) { return; } emitter.emit(result); } }, HBaseTypes.results()); } /** * Writes out cells to HFiles for incremental load. * * @param cells the HBase cells to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location */ public static <C extends Cell> void writeToHFilesForIncrementalLoad( PCollection<C> cells, Connection connection, TableName tableName, Path outputPath) throws IOException { writeToHFilesForIncrementalLoad(cells, connection, tableName, outputPath, false); } /** * Writes out cells to HFiles for incremental load. * * @param cells the HBase cells to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param fs the filesystem where the HFiles will be written */ public static <C extends Cell> void writeToHFilesForIncrementalLoad( PCollection<C> cells, Connection connection, TableName tableName, Path outputPath, FileSystem fs) throws IOException { writeToHFilesForIncrementalLoad(cells, connection, tableName, outputPath, false, fs); } /** * Writes out cells to HFiles for incremental load. * * @param cells the HBase cells to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param limitToAffectedRegions used to indicate that the regions the {@code puts} will be loaded into should be * identified prior to writing HFiles. Identifying the regions ahead of time will reduce the number of reducers needed * when writing. This is beneficial if the data to be loaded only touches a small enough subset of the total regions in * the table. If set to false, the number of reducers will equal the number of regions in the table. * @see <a href='https://issues.apache.org/jira/browse/CRUNCH-588'>CRUNCH-588</a> */ public static <C extends Cell> void writeToHFilesForIncrementalLoad( PCollection<C> cells, Connection connection, TableName tableName, Path outputPath, boolean limitToAffectedRegions) throws IOException { writeToHFilesForIncrementalLoad(cells, connection, tableName, outputPath, limitToAffectedRegions, null); } /** * Writes out cells to HFiles for incremental load. * * @param cells the HBase cells to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param limitToAffectedRegions used to indicate that the regions the {@code puts} will be loaded into should be * identified prior to writing HFiles. Identifying the regions ahead of time will reduce the number of reducers needed * when writing. This is beneficial if the data to be loaded only touches a small enough subset of the total regions in * the table. If set to false, the number of reducers will equal the number of regions in the table. * @param fs the filesystem where the HFiles will be written * @see <a href='https://issues.apache.org/jira/browse/CRUNCH-588'>CRUNCH-588</a> */ public static <C extends Cell> void writeToHFilesForIncrementalLoad( PCollection<C> cells, Connection connection, TableName tableName, Path outputPath, boolean limitToAffectedRegions, FileSystem fs) throws IOException { Table table = connection.getTable(tableName); RegionLocator regionLocator = connection.getRegionLocator(tableName); HColumnDescriptor[] families = table.getTableDescriptor().getColumnFamilies(); if (families.length == 0) { LOG.warn("{} has no column families", table); return; } PCollection<C> partitioned = sortAndPartition(cells, regionLocator, limitToAffectedRegions); RegionLocationTable regionLocationTable = RegionLocationTable.create( table.getName().getNameAsString(), regionLocator.getAllRegionLocations()); Path regionLocationFilePath = new Path(((DistributedPipeline) cells.getPipeline()).createTempPath(), "regionLocations_" + table.getName().getNameAsString().replace(":", "_")); writeRegionLocationTable(cells.getPipeline().getConfiguration(), regionLocationFilePath, regionLocationTable); for (HColumnDescriptor f : families) { byte[] family = f.getName(); partitioned .filter(new FilterByFamilyFn<C>(family)) .write(new HFileTarget(new Path(outputPath, Bytes.toString(family)), f) .outputConf(RegionLocationTable.REGION_LOCATION_TABLE_PATH, regionLocationFilePath.toString()) .fileSystem(fs)); } } /** * Writes out puts to HFiles for incremental load. * * @param puts the HBase puts to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location */ public static void writePutsToHFilesForIncrementalLoad( PCollection<Put> puts, Connection connection, TableName tableName, Path outputPath) throws IOException { writePutsToHFilesForIncrementalLoad(puts, connection, tableName, outputPath, false); } /** * Writes out puts to HFiles for incremental load. * * @param puts the HBase puts to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param fs the filesystem where the HFiles will be written */ public static void writePutsToHFilesForIncrementalLoad( PCollection<Put> puts, Connection connection, TableName tableName, Path outputPath, FileSystem fs) throws IOException { writePutsToHFilesForIncrementalLoad(puts, connection, tableName, outputPath, false, fs); } /** * Writes out puts to HFiles for incremental load. * * @param puts the HBase puts to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param limitToAffectedRegions used to indicate that the regions the {@code puts} will be loaded into should be * identified prior to writing HFiles. Identifying the regions ahead of time will reduce the number of reducers needed * when writing. This is beneficial if the data to be loaded only touches a small enough subset of the total regions in * the table. If set to false, the number of reducers will equal the number of regions in the table. * @see <a href='https://issues.apache.org/jira/browse/CRUNCH-588'>CRUNCH-588</a> */ public static void writePutsToHFilesForIncrementalLoad( PCollection<Put> puts, Connection connection, TableName tableName, Path outputPath, boolean limitToAffectedRegions) throws IOException { writePutsToHFilesForIncrementalLoad(puts, connection, tableName, outputPath, limitToAffectedRegions, null); } /** * Writes out puts to HFiles for incremental load. * * @param puts the HBase puts to write * @param connection HBase client connection * @param tableName HBase table name * @param outputPath HFile location * @param limitToAffectedRegions used to indicate that the regions the {@code puts} will be loaded into should be * identified prior to writing HFiles. Identifying the regions ahead of time will reduce the number of reducers needed * when writing. This is beneficial if the data to be loaded only touches a small enough subset of the total regions in * the table. If set to false, the number of reducers will equal the number of regions in the table. * @param fs the filesystem where the HFiles will be written * @see <a href='https://issues.apache.org/jira/browse/CRUNCH-588'>CRUNCH-588</a> */ public static void writePutsToHFilesForIncrementalLoad( PCollection<Put> puts, Connection connection, TableName tableName, Path outputPath, boolean limitToAffectedRegions, FileSystem fs) throws IOException { PCollection<Cell> cells = puts.parallelDo("ConvertPutToCells", new DoFn<Put, Cell>() { @Override public void process(Put input, Emitter<Cell> emitter) { for (Cell cell : Iterables.concat(input.getFamilyCellMap().values())) { emitter.emit(cell); } } }, HBaseTypes.cells()); writeToHFilesForIncrementalLoad(cells, connection, tableName, outputPath, limitToAffectedRegions, fs); } public static <C extends Cell> PCollection<C> sortAndPartition(PCollection<C> cells, RegionLocator regionLocator) throws IOException { return sortAndPartition(cells, regionLocator, false); } /** * Sorts and partitions the provided <code>cells</code> for the given <code>regionLocator</code> to ensure all elements that belong * in the same region end up in the same reducer. The flag <code>limitToAffectedRegions</code>, when set to true, will identify * the regions the data in <code>cells</code> belongs to and will set the number of reducers equal to the number of identified * affected regions. If set to false, then all regions will be used, and the number of reducers will be set to the number * of regions in the table. */ public static <C extends Cell> PCollection<C> sortAndPartition(PCollection<C> cells, RegionLocator regionLocator, boolean limitToAffectedRegions) throws IOException { Configuration conf = cells.getPipeline().getConfiguration(); PTable<C, Void> t = cells.parallelDo( "Pre-partition", new MapFn<C, Pair<C, Void>>() { @Override public Pair<C, Void> map(C input) { return Pair.of(input, (Void) null); } }, tableOf(cells.getPType(), nulls())); List<KeyValue> splitPoints; if(limitToAffectedRegions) { splitPoints = getSplitPoints(regionLocator, t); } else { splitPoints = getSplitPoints(regionLocator); } Path partitionFile = new Path(((DistributedPipeline) cells.getPipeline()).createTempPath(), "partition"); writePartitionInfo(conf, partitionFile, splitPoints); GroupingOptions options = GroupingOptions.builder() .partitionerClass(TotalOrderPartitioner.class) .sortComparatorClass(KeyValueComparator.class) .conf(TotalOrderPartitioner.PARTITIONER_PATH, partitionFile.toString()) .numReducers(splitPoints.size() + 1) .build(); return t.groupByKey(options).ungroup().keys(); } private static List<KeyValue> getSplitPoints(RegionLocator regionLocator) throws IOException { List<byte[]> startKeys = ImmutableList.copyOf(regionLocator.getStartKeys()); if (startKeys.isEmpty()) { throw new AssertionError(regionLocator.getName().getNameAsString() + " has no regions!"); } List<KeyValue> splitPoints = Lists.newArrayList(); for (byte[] startKey : startKeys.subList(1, startKeys.size())) { KeyValue kv = KeyValueUtil.createFirstOnRow(startKey); LOG.debug("split row: " + Bytes.toString(CellUtil.cloneRow(kv))); splitPoints.add(kv); } return splitPoints; } private static <C> List<KeyValue> getSplitPoints(RegionLocator regionLocator, PTable<C, Void> affectedRows) throws IOException { List<byte[]> startKeys; try { startKeys = Lists.newArrayList(regionLocator.getStartKeys()); if (startKeys.isEmpty()) { throw new AssertionError(regionLocator.getName().getNameAsString() + " has no regions!"); } } catch (IOException e) { throw new CrunchRuntimeException(e); } Collections.sort(startKeys, Bytes.BYTES_COMPARATOR); Iterable<ByteBuffer> bufferedStartKeys = affectedRows .parallelDo(new DetermineAffectedRegionsFn(startKeys), Writables.bytes()).materialize(); // set to get rid of the potential duplicate start keys emitted ImmutableSet.Builder<KeyValue> startKeyBldr = ImmutableSet.builder(); for (final ByteBuffer bufferedStartKey : bufferedStartKeys) { startKeyBldr.add(KeyValueUtil.createFirstOnRow(bufferedStartKey.array())); } return ImmutableList.copyOf(startKeyBldr.build()); } /** * Spins through the {@link Cell}s and determines which regions the data * will be loaded into. Searching the regions is done via a binary search. The * region start key should be provided by the caller to cut down on calls to * HMaster to get those start keys. */ public static class DetermineAffectedRegionsFn<C extends Cell> extends DoFn<Pair<C, Void>, ByteBuffer> { private final Set<Cell> startKeysToEmit = new HashSet<>(); List<byte[]> startKeys; TotalOrderPartitioner.Node partitions; List<Cell> regionStartKeys = Lists.newArrayList(); public DetermineAffectedRegionsFn(List<byte[]> startKeys) { this.startKeys = startKeys; } @Override public void initialize() { for (byte[] startKey : startKeys.subList(1, startKeys.size())) { Cell cell = KeyValueUtil.createFirstOnRow(startKey); regionStartKeys.add(cell); } partitions = new TotalOrderPartitioner.BinarySearchNode<>(regionStartKeys.toArray(new Cell[regionStartKeys.size()]), new KeyValue.KVComparator()); } @Override public void process(Pair<C, Void> input, Emitter<ByteBuffer> emitter) { int position = partitions.findPartition(new KeyValue(input.first().getFamilyArray())); // if the position is after the last key, use the last start key // as the split for this key, since it should fall into that region if (position >= regionStartKeys.size() && regionStartKeys.size() > 1) { position = regionStartKeys.size() - 1; } Cell foundCell = regionStartKeys.get(position); if (!startKeysToEmit.contains(foundCell)) { startKeysToEmit.add(foundCell); emitter.emit(ByteBuffer.wrap(CellUtil.cloneRow(foundCell))); } } } private static void writePartitionInfo( Configuration conf, Path path, List<KeyValue> splitPoints) throws IOException { LOG.info("Writing {} split points to {}", splitPoints.size(), path); SequenceFile.Writer writer = SequenceFile.createWriter( path.getFileSystem(conf), conf, path, NullWritable.class, BytesWritable.class); for (KeyValue key : splitPoints) { writer.append(NullWritable.get(), HBaseTypes.keyValueToBytes(key)); } writer.close(); } private static void writeRegionLocationTable(Configuration conf, Path outputPath, RegionLocationTable regionLocationTable) throws IOException { LOG.info("Writing region location table for {} to {}", regionLocationTable.getTableName(), outputPath); try (FSDataOutputStream fsDataOutputStream = outputPath.getFileSystem(conf).create(outputPath)) { regionLocationTable.serialize(fsDataOutputStream); } } private static Result doCombineIntoRow(List<KeyValue> kvs, int versions) { // shortcut for the common case if (kvs.isEmpty()) { return null; } if (kvs.size() == 1 && kvs.get(0).getTypeByte() == KeyValue.Type.Put.getCode()) { return Result.create(Collections.<Cell>singletonList(kvs.get(0))); } kvs = maybeDeleteFamily(kvs); // In-place sort KeyValues by family, qualifier and then timestamp reversely (whenever ties, deletes appear first). Collections.sort(kvs, KEY_VALUE_COMPARATOR); List<Cell> results = Lists.newArrayListWithCapacity(kvs.size()); for (int i = 0, j; i < kvs.size(); i = j) { j = i + 1; while (j < kvs.size() && hasSameFamilyAndQualifier(kvs.get(i), kvs.get(j))) { j++; } results.addAll(getLatestKeyValuesOfColumn(kvs.subList(i, j), versions)); } if (results.isEmpty()) { return null; } return Result.create(results); } /** * In-place removes any {@link KeyValue}s whose timestamp is less than or equal to the * delete family timestamp. Also removes the delete family {@code KeyValue}s. */ private static List<KeyValue> maybeDeleteFamily(List<KeyValue> kvs) { long deleteFamilyCut = -1; for (KeyValue kv : kvs) { if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()) { deleteFamilyCut = Math.max(deleteFamilyCut, kv.getTimestamp()); } } if (deleteFamilyCut == 0) { return kvs; } List<KeyValue> results = Lists.newArrayList(); for (KeyValue kv : kvs) { if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()) { continue; } if (kv.getTimestamp() <= deleteFamilyCut) { continue; } results.add(kv); } return results; } private static boolean hasSameFamilyAndQualifier(KeyValue l, KeyValue r) { return Bytes.equals( l.getBuffer(), l.getFamilyOffset(), l.getFamilyLength(), r.getBuffer(), r.getFamilyOffset(), r.getFamilyLength()) && Bytes.equals( l.getBuffer(), l.getQualifierOffset(), l.getQualifierLength(), r.getBuffer(), r.getQualifierOffset(), r.getQualifierLength()); } /** * Goes over the given {@link KeyValue}s and remove {@code Delete}s and {@code DeleteColumn}s. * * @param kvs {@code KeyValue}s that of same row and column and sorted by timestamps in * descending order * @param versions the number of versions to keep * @return the resulting {@code KeyValue}s that contains only {@code Put}s */ private static List<KeyValue> getLatestKeyValuesOfColumn(List<KeyValue> kvs, int versions) { if (kvs.isEmpty()) { return kvs; } if (kvs.get(0).getTypeByte() == KeyValue.Type.Put.getCode()) { return kvs; // shortcut for the common case } List<KeyValue> results = Lists.newArrayListWithCapacity(versions); long previousDeleteTimestamp = -1; for (KeyValue kv : kvs) { if (results.size() >= versions) { break; } if (kv.getTypeByte() == KeyValue.Type.DeleteColumn.getCode()) { break; } else if (kv.getTypeByte() == KeyValue.Type.Put.getCode()) { if (kv.getTimestamp() != previousDeleteTimestamp) { results.add(kv); } } else if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) { previousDeleteTimestamp = kv.getTimestamp(); } else { throw new AssertionError("Unexpected KeyValue type: " + kv.getTypeByte()); } } return results; } }
2,360
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileSource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; import org.apache.commons.codec.binary.Hex; import org.apache.crunch.impl.mr.run.RuntimeParameters; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.ReadableSource; import org.apache.crunch.ReadableData; import org.apache.crunch.io.SourceTargetHelper; import org.apache.crunch.io.impl.FileSourceImpl; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mapreduce.KeyValueSerialization; import org.apache.hadoop.hbase.mapreduce.MutationSerialization; import org.apache.hadoop.hbase.mapreduce.ResultSerialization; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.mapreduce.Job; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; public class HFileSource extends FileSourceImpl<KeyValue> implements ReadableSource<KeyValue> { private static final Logger LOG = LoggerFactory.getLogger(HFileSource.class); private static final PType<KeyValue> KEY_VALUE_PTYPE = HBaseTypes.keyValues(); public HFileSource(Path path) { this(ImmutableList.of(path)); } public HFileSource(List<Path> paths) { this(paths, new Scan()); } // Package-local. Don't want it to be too open, because we only support limited filters yet // (namely start/stop row). Users who need advanced filters should use HFileUtils#scanHFiles. HFileSource(List<Path> paths, Scan scan) { super(paths, KEY_VALUE_PTYPE, createInputFormatBundle(scan) // "combine file" is not supported by HFileInputFormat, as it overrides listStatus(). .set(RuntimeParameters.DISABLE_COMBINE_FILE, "true")); } private static FormatBundle<HFileInputFormat> createInputFormatBundle(Scan scan) { FormatBundle<HFileInputFormat> bundle = FormatBundle.forInput(HFileInputFormat.class); if (!Objects.equal(scan.getStartRow(), HConstants.EMPTY_START_ROW)) { bundle.set(HFileInputFormat.START_ROW_KEY, Hex.encodeHexString(scan.getStartRow())); } if (!Objects.equal(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { bundle.set(HFileInputFormat.STOP_ROW_KEY, Hex.encodeHexString(scan.getStopRow())); } return bundle; } @Override public void configureSource(Job job, int inputId) throws IOException { TableMapReduceUtil.addDependencyJars(job); Configuration conf = job.getConfiguration(); conf.setStrings("io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName(), ResultSerialization.class.getName(), KeyValueSerialization.class.getName()); super.configureSource(job, inputId); } @Override public Iterable<KeyValue> read(Configuration conf) throws IOException { conf = new Configuration(conf); inputBundle.configure(conf); if (conf.get(HFileInputFormat.START_ROW_KEY) != null || conf.get(HFileInputFormat.STOP_ROW_KEY) != null) { throw new IllegalStateException("Cannot filter row ranges in HFileSource.read"); } return read(conf, new HFileReaderFactory()); } @Override public ReadableData<KeyValue> asReadable() { return new HFileReadableData(paths); } public Converter<?, ?, ?, ?> getConverter() { return new HBaseValueConverter<KeyValue>(KeyValue.class); } @Override public String toString() { return "HFile(" + pathsAsString() + ")"; } @Override public long getSize(Configuration conf) { // HFiles are stored into <family>/<file>, but the default implementation does not support this. // This is used for estimating the number of reducers. (Otherwise we will always get 1 reducer.) long sum = 0; for (Path path : getPaths()) { try { sum += getSizeInternal(conf, path); } catch (IOException e) { LOG.warn("Failed to estimate size of {}", path); } LOG.info("Size after read of path = {} = {}", path.toString(), sum); } return sum; } private long getSizeInternal(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); FileStatus[] statuses = fs.globStatus(path, HFileInputFormat.HIDDEN_FILE_FILTER); if (statuses == null) { return 0; } long sum = 0; for (FileStatus status : statuses) { if (status.isDirectory()) { sum += SourceTargetHelper.getPathSize(fs, status.getPath()); } else { sum += status.getLen(); } } return sum; } }
2,361
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/AtHBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import org.apache.crunch.SourceTarget; import org.apache.hadoop.hbase.client.Scan; /** * Static factory methods for creating HBase {@link SourceTarget} types. */ public class AtHBase { public static HBaseSourceTarget table(String table) { return table(table, new Scan()); } public static HBaseSourceTarget table(String table, Scan scan) { return new HBaseSourceTarget(table, scan); } }
2,362
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileTarget.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import org.apache.crunch.io.SequentialFileNamingScheme; import org.apache.crunch.io.impl.FileTargetImpl; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PTableType; import org.apache.crunch.types.PType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.KeyValueSerialization; import org.apache.hadoop.mapreduce.Job; public class HFileTarget extends FileTargetImpl { public HFileTarget(String path) { this(new Path(path)); } public HFileTarget(Path path) { this(path, null); } public HFileTarget(Path path, HColumnDescriptor hcol) { super(path, HFileOutputFormatForCrunch.class, SequentialFileNamingScheme.getInstance()); if (hcol != null) { outputConf(HFileOutputFormatForCrunch.HCOLUMN_DESCRIPTOR_COMPRESSION_TYPE_KEY, hcol.getCompressionType().getName()); outputConf(HFileOutputFormatForCrunch.HCOLUMN_DESCRIPTOR_DATA_BLOCK_ENCODING_KEY, hcol.getDataBlockEncoding().name()); outputConf(HFileOutputFormatForCrunch.HCOLUMN_DESCRIPTOR_BLOOM_FILTER_TYPE_KEY, hcol.getBloomFilterType().name()); } } @Override public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); conf.setStrings("io.serializations", conf.get("io.serializations"), KeyValueSerialization.class.getName()); super.configureForMapReduce(job, ptype, outputPath, name); } @Override public Converter<?, ?, ?, ?> getConverter(PType<?> ptype) { PType<?> valueType = ptype; if (ptype instanceof PTableType) { valueType = ((PTableType) ptype).getValueType(); } if (!Cell.class.isAssignableFrom(valueType.getTypeClass())) { throw new IllegalArgumentException("HFileTarget only supports Cell outputs"); } if (ptype instanceof PTableType) { return new HBasePairConverter<ImmutableBytesWritable, Cell>(ImmutableBytesWritable.class, Cell.class); } return new HBaseValueConverter<Cell>(Cell.class); } @Override public String toString() { return "HFile(" + path + ")"; } }
2,363
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileReadableData.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import org.apache.crunch.io.FileReaderFactory; import org.apache.crunch.io.impl.ReadableDataImpl; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue; import java.util.List; public class HFileReadableData extends ReadableDataImpl<KeyValue> { public HFileReadableData(List<Path> paths) { super(paths); } @Override protected FileReaderFactory<KeyValue> getFileReaderFactory() { return new HFileReaderFactory(); } }
2,364
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/ToHBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import org.apache.crunch.Target; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; /** * Static factory methods for creating HBase {@link Target} types. */ public class ToHBase { public static Target table(String table) { return table(TableName.valueOf(table)); } public static Target table(TableName table) { return new HBaseTarget(table); } public static Target hfile(String path) { return new HFileTarget(path); } public static Target hfile(Path path) { return new HFileTarget(path); } }
2,365
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBaseData.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableSet; import org.apache.crunch.Pair; import org.apache.crunch.ReadableData; import org.apache.crunch.SourceTarget; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hadoop.util.StringUtils; import java.io.IOException; import java.util.Set; public class HBaseData implements ReadableData<Pair<ImmutableBytesWritable, Result>> { private final String table; private transient TableName tableName; private final String scansAsString; private transient SourceTarget parent; public HBaseData(String table, String scansAsString, SourceTarget<?> parent) { this.table = table; this.tableName = TableName.valueOf(table); this.scansAsString = scansAsString; this.parent = parent; } @Override public Set<SourceTarget<?>> getSourceTargets() { if (parent != null) { return ImmutableSet.<SourceTarget<?>>of(parent); } else { return ImmutableSet.of(); } } @Override public void configure(Configuration conf) { // No-op } @Override public Iterable<Pair<ImmutableBytesWritable, Result>> read( TaskInputOutputContext<?, ?, ?, ?> ctxt) throws IOException { Configuration hconf = HBaseConfiguration.create(ctxt.getConfiguration()); Connection connection = ConnectionFactory.createConnection(hconf); Table htable = connection.getTable(getTableName()); String[] scanStrings = StringUtils.getStrings(scansAsString); int length = scanStrings == null ? 0 : scanStrings.length; Scan[] scans = new Scan[length]; for(int i = 0; i < length; i++){ scans[i] = HBaseSourceTarget.convertStringToScan(scanStrings[i]); } return new HTableIterable(connection, htable, scans); } private TableName getTableName(){ if(tableName == null){ tableName = TableName.valueOf(table); } return tableName; } }
2,366
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HBaseValueConverter.java
/* * * * * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.apache.crunch.io.hbase; import org.apache.crunch.types.Converter; import org.apache.hadoop.io.NullWritable; public class HBaseValueConverter<V> implements Converter<Object, V, V, Iterable<V>> { private final Class<V> serializationClass; public HBaseValueConverter(Class<V> serializationClass) { this.serializationClass = serializationClass; } @Override public V convertInput(Object key, V value) { return value; } @Override public Object outputKey(V value) { return NullWritable.get(); } @Override public V outputValue(V value) { return value; } @Override public Class<Object> getKeyClass() { return (Class<Object>) (Class<?>) NullWritable.class; } @Override public Class<V> getValueClass() { return serializationClass; } @Override public boolean applyPTypeTransforms() { return false; } @Override public Iterable<V> convertIterableInput(Object key, Iterable<V> value) { return value; } }
2,367
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/FromHBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import java.util.List; import com.google.common.collect.ImmutableList; import org.apache.crunch.Source; import org.apache.crunch.TableSource; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; /** * Static factory methods for creating HBase {@link Source} types. */ public class FromHBase { public static TableSource<ImmutableBytesWritable, Result> table(String table) { return table(table, new Scan()); } public static TableSource<ImmutableBytesWritable, Result> table(String table, Scan scan) { return table(TableName.valueOf(table), scan); } public static TableSource<ImmutableBytesWritable, Result> table(String table, List<Scan> scans) { return table(TableName.valueOf(table), scans); } public static TableSource<ImmutableBytesWritable, Result> table(TableName table) { return table(table, new Scan()); } public static TableSource<ImmutableBytesWritable, Result> table(TableName table, Scan scan) { return table(table, ImmutableList.of(scan)); } public static TableSource<ImmutableBytesWritable, Result> table(TableName table, List<Scan> scans) { if (scans.isEmpty()) { throw new IllegalArgumentException("Must supply at least one scan"); } return new HBaseSourceTarget(table, scans.toArray(new Scan[scans.size()])); } public static Source<KeyValue> hfile(String path) { return hfile(new Path(path)); } public static Source<KeyValue> hfile(Path path) { return new HFileSource(path); } }
2,368
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileReaderFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import org.apache.crunch.io.FileReaderFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import java.io.IOException; import java.util.Iterator; public class HFileReaderFactory implements FileReaderFactory<KeyValue> { public static final String HFILE_SCANNER_CACHE_BLOCKS = "crunch.hfile.scanner.cache.blocks"; public static final String HFILE_SCANNER_PREAD = "crunch.hfile.scanner.pread"; @Override public Iterator<KeyValue> read(FileSystem fs, Path path) { Configuration conf = fs.getConf(); CacheConfig cacheConfig = new CacheConfig(conf); try { HFile.Reader hfr = HFile.createReader(fs, path, cacheConfig, true, conf); HFileScanner scanner = hfr.getScanner( conf.getBoolean(HFILE_SCANNER_CACHE_BLOCKS, false), conf.getBoolean(HFILE_SCANNER_PREAD, false)); scanner.seekTo(); return new HFileIterator(scanner); } catch (IOException e) { throw new RuntimeException(e); } } private static class HFileIterator implements Iterator<KeyValue> { private final HFileScanner scanner; private KeyValue curr; public HFileIterator(HFileScanner scanner) { this.scanner = scanner; this.curr = KeyValueUtil.copyToNewKeyValue(scanner.getCell()); } @Override public boolean hasNext() { return curr != null; } @Override public KeyValue next() { KeyValue ret = curr; try { if (scanner.next()) { curr = KeyValueUtil.copyToNewKeyValue(scanner.getCell()); } else { curr = null; } } catch (IOException e) { throw new RuntimeException(e); } return ret; } @Override public void remove() { throw new UnsupportedOperationException("HFileIterator is read-only"); } } }
2,369
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HTableIterable.java
/* * * * * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package org.apache.crunch.io.hbase; import org.apache.crunch.Pair; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import java.io.IOException; import java.util.Arrays; import java.util.Iterator; class HTableIterable implements Iterable<Pair<ImmutableBytesWritable, Result>> { private final Table table; private final Scan[] scans; private final Connection connection; public HTableIterable(Connection connection, Table table, Scan... scans) { this.table = table; this.connection = connection; this.scans = scans; } @Override public Iterator<Pair<ImmutableBytesWritable, Result>> iterator() { return new HTableIterator(connection, table, Arrays.asList(scans)); } }
2,370
0
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hbase/src/main/java/org/apache/crunch/io/hbase/HFileOutputFormatForCrunch.java
/** * Copyright 2009 The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hbase; import com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; /** * This is a thin wrapper of {@link HFile.Writer}. It only calls {@link HFile.Writer#append} * when records are emitted. It only supports writing data into a single column family. Records MUST be sorted * by their column qualifier, then timestamp reversely. All data are written into a single HFile. * * HBase's official {@code HFileOutputFormat} is not used, because it shuffles on row-key only and * does in-memory sort at reducer side (so the size of output HFile is limited to reducer's memory). * As crunch supports more complex and flexible MapReduce pipeline, we would prefer thin and pure * {@code OutputFormat} here. */ public class HFileOutputFormatForCrunch extends FileOutputFormat<Object, Cell> { // HCOLUMN_DESCRIPTOR_KEY is no longer used, but left for binary compatibility public static final String HCOLUMN_DESCRIPTOR_KEY = "hbase.hfileoutputformat.column.descriptor"; public static final String HCOLUMN_DESCRIPTOR_COMPRESSION_TYPE_KEY = "hbase.hfileoutputformat.column.descriptor.compressiontype"; public static final String HCOLUMN_DESCRIPTOR_DATA_BLOCK_ENCODING_KEY = "hbase.hfileoutputformat.column.descriptor.datablockencoding"; public static final String HCOLUMN_DESCRIPTOR_BLOOM_FILTER_TYPE_KEY = "hbase.hfileoutputformat.column.descriptor.bloomfiltertype"; private static final String COMPACTION_EXCLUDE_CONF_KEY = "hbase.mapreduce.hfileoutputformat.compaction.exclude"; private static final Logger LOG = LoggerFactory.getLogger(HFileOutputFormatForCrunch.class); private final byte [] now = Bytes.toBytes(System.currentTimeMillis()); @Override public RecordWriter<Object, Cell> getRecordWriter(final TaskAttemptContext context) throws IOException, InterruptedException { Path outputPath = getDefaultWorkFile(context, ""); Configuration conf = context.getConfiguration(); FileSystem fs = outputPath.getFileSystem(conf); final boolean compactionExclude = conf.getBoolean( COMPACTION_EXCLUDE_CONF_KEY, false); LOG.info("Output path: {}", outputPath); Configuration noCacheConf = new Configuration(conf); noCacheConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); StoreFileWriter.Builder writerBuilder = new StoreFileWriter.Builder(conf, new CacheConfig(noCacheConf), fs) .withComparator(CellComparatorImpl.COMPARATOR) .withFilePath(outputPath) .withFileContext(getContext(conf)); String bloomFilterType = conf.get(HCOLUMN_DESCRIPTOR_BLOOM_FILTER_TYPE_KEY); if (bloomFilterType != null) { writerBuilder.withBloomType(BloomType.valueOf(bloomFilterType)); } final StoreFileWriter writer = writerBuilder.build(); return new RecordWriter<Object, Cell>() { long maxSeqId = 0L; @Override public void write(Object row, Cell cell) throws IOException { KeyValue copy = KeyValueUtil.copyToNewKeyValue(cell); if (copy.getTimestamp() == HConstants.LATEST_TIMESTAMP) { copy.updateLatestStamp(now); } writer.append(copy); long seqId = cell.getSequenceId(); if (seqId > maxSeqId) { maxSeqId = seqId; } } @Override public void close(TaskAttemptContext c) throws IOException { // true => product of major compaction writer.appendMetadata(maxSeqId, true); writer.appendFileInfo(HStoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); writer.appendFileInfo(HStoreFile.BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); writer.appendFileInfo(HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); writer.close(); } }; } private HFileContext getContext(Configuration conf) { HFileContextBuilder contextBuilder = new HFileContextBuilder(); String compressionType = conf.get(HCOLUMN_DESCRIPTOR_COMPRESSION_TYPE_KEY); if (compressionType != null) { contextBuilder.withCompression(HFileWriterImpl.compressionByName(compressionType)); } String dataBlockEncoding = conf.get(HCOLUMN_DESCRIPTOR_DATA_BLOCK_ENCODING_KEY); if (dataBlockEncoding != null) { contextBuilder.withDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncoding)); } return contextBuilder.build(); } }
2,371
0
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io/hcatalog/HCatTargetITSpec.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.crunch.PCollection; import org.apache.crunch.PGroupedTable; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.test.CrunchTestSupport; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.types.avro.Avros; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.data.schema.HCatSchema; import org.apache.thrift.TException; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import static junit.framework.Assert.assertEquals; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; public class HCatTargetITSpec extends CrunchTestSupport { private static IMetaStoreClient client; private static Configuration conf; private static TemporaryPath tempDir; @Rule public TestName testName = new TestName(); @BeforeClass public static void setUp() throws Throwable { HCatTestSuiteIT.startTest(); client = HCatTestSuiteIT.getClient(); conf = HCatTestSuiteIT.getConf(); tempDir = HCatTestSuiteIT.getRootPath(); } @AfterClass public static void tearDown() throws Exception { HCatTestSuiteIT.endTest(); } @Test public void test_successfulWriteToHCatTarget() throws IOException, HiveException, TException { String tableName = testName.getMethodName(); Path tableRootLocation = tempDir.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); FieldSchema partitionSchema = new FieldSchema(); partitionSchema.setName("timestamp"); partitionSchema.setType("string"); HCatTestUtils.createTable(client, "default", tableName, TableType.EXTERNAL_TABLE, tableRootLocation, Lists.newArrayList(partitionSchema)); Pipeline pipeline = new MRPipeline(HCatSourceITSpec.class, conf); PCollection<String> contents = pipeline.readTextFile(tableRootLocation.toString()); PCollection<HCatRecord> hcatRecords = contents.parallelDo(new HCatTestUtils.Fns.MapHCatRecordFn(), Writables.writables(HCatRecord.class)); Map<String, String> partitions = new HashMap<String, String>() { { { put("timestamp", "1234"); } } }; pipeline.write(hcatRecords, ToHCat.table("default", tableName, partitions)); pipeline.run(); // ensure partition was created List<Partition> partitionList = client.listPartitions("default", tableName, (short) 5); assertThat(partitionList.size(), is(1)); Partition newPartition = Iterators.getOnlyElement(partitionList.iterator()); assertThat(newPartition.getValuesIterator().next(), is("1234")); // read data from table to ensure it was written correctly HCatSourceTarget source = (HCatSourceTarget) FromHCat.table("default", tableName, "timestamp='1234'"); PCollection<HCatRecord> read = pipeline.read(source); HCatSchema schema = source.getTableSchema(pipeline.getConfiguration()); ArrayList<Pair<Integer, String>> mat = Lists.newArrayList( read.parallelDo(new HCatTestUtils.Fns.MapPairFn(schema), Avros.tableOf(Avros.ints(), Avros.strings())) .materialize()); assertEquals(ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana")), mat); partitions = new HashMap<String, String>() { { { put("timestamp", "5678"); } } }; pipeline.write(read, ToHCat.table("default", tableName, partitions)); pipeline.done(); } @Test public void test_successfulWriteToHCatTarget_GroupByKey() throws IOException, HiveException, TException { String tableName = testName.getMethodName(); Path tableRootLocation = tempDir.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); FieldSchema partitionSchema = new FieldSchema(); partitionSchema.setName("timestamp"); partitionSchema.setType("string"); HCatTestUtils.createTable(client, "default", tableName, TableType.EXTERNAL_TABLE, tableRootLocation, Lists.newArrayList(partitionSchema)); Pipeline pipeline = new MRPipeline(HCatSourceITSpec.class, conf); PCollection<String> contents = pipeline.readTextFile(tableRootLocation.toString()); PCollection<HCatRecord> hcatRecords = contents.parallelDo(new HCatTestUtils.Fns.MapHCatRecordFn(), Writables.writables(HCatRecord.class)); Map<String, String> partitions = new HashMap<String, String>() { { { put("timestamp", "1234"); } } }; HCatTarget target = new HCatTarget(tableName, partitions); pipeline.write(hcatRecords, target); pipeline.run(); // ensure partition was created List<Partition> partitionList = client.listPartitions("default", tableName, (short) 5); assertThat(partitionList.size(), is(1)); Partition newPartition = Iterators.getOnlyElement(partitionList.iterator()); assertThat(newPartition.getValuesIterator().next(), is("1234")); // read data from table to ensure it was written correctly HCatSourceTarget source = (HCatSourceTarget) FromHCat.table("default", tableName, "timestamp='1234'"); PCollection<HCatRecord> read = pipeline.read(source); HCatSchema schema = source.getTableSchema(pipeline.getConfiguration()); PGroupedTable<String, DefaultHCatRecord> table = read.parallelDo(new HCatTestUtils.Fns.GroupByHCatRecordFn(), Writables.tableOf(Writables.strings(), Writables.writables(DefaultHCatRecord.class))).groupByKey(); Iterable<Pair<Integer, String>> mat = table .parallelDo(new HCatTestUtils.Fns.IterableToHCatRecordMapFn(), Writables.writables(HCatRecord.class)) .parallelDo(new HCatTestUtils.Fns.MapPairFn(schema), Avros.tableOf(Avros.ints(), Avros.strings())) .materialize(); assertEquals(ImmutableList.of(Pair.of(29, "indiana"), Pair.of(17, "josh")), ImmutableList.copyOf(mat)); pipeline.done(); } @Test public void test_HCatTarget_WriteToNonNativeTable_HBase() throws Exception { HBaseTestingUtility hbaseTestUtil = null; try { String db = "default"; String sourceHiveTable = "source_table"; String destinationHiveTable = "dest_table"; Configuration configuration = HBaseConfiguration.create(conf); hbaseTestUtil = new HBaseTestingUtility(configuration); hbaseTestUtil.startMiniZKCluster(); hbaseTestUtil.startMiniHBaseCluster(1, 1); org.apache.hadoop.hbase.client.Table sourceTable = hbaseTestUtil.createTable(TableName.valueOf(sourceHiveTable), "fam"); String key1 = "this-is-a-key"; Put put = new Put(Bytes.toBytes(key1)); put.addColumn("fam".getBytes(), "foo".getBytes(), "17".getBytes()); sourceTable.put(put); String key2 = "this-is-a-key-too"; Put put2 = new Put(Bytes.toBytes(key2)); put2.addColumn("fam".getBytes(), "foo".getBytes(), "29".getBytes()); sourceTable.put(put2); sourceTable.close(); // create Hive Table for source table org.apache.hadoop.hive.ql.metadata.Table tbl = new org.apache.hadoop.hive.ql.metadata.Table(db, sourceHiveTable); tbl.setOwner(UserGroupInformation.getCurrentUser().getShortUserName()); tbl.setTableType(TableType.EXTERNAL_TABLE); FieldSchema f1 = new FieldSchema(); f1.setName("foo"); f1.setType("int"); FieldSchema f2 = new FieldSchema(); f2.setName("key"); f2.setType("string"); tbl.setProperty("storage_handler", "org.apache.hadoop.hive.hbase.HBaseStorageHandler"); tbl.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); tbl.setFields(ImmutableList.of(f1, f2)); tbl.setSerdeParam("hbase.columns.mapping", "fam:foo,:key"); this.client.createTable(tbl.getTTable()); // creates destination table hbaseTestUtil.createTable(TableName.valueOf(destinationHiveTable), "fam"); org.apache.hadoop.hive.ql.metadata.Table destTable = new org.apache.hadoop.hive.ql.metadata.Table(db, destinationHiveTable); destTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName()); destTable.setTableType(TableType.EXTERNAL_TABLE); destTable.setProperty("storage_handler", "org.apache.hadoop.hive.hbase.HBaseStorageHandler"); destTable.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); destTable.setFields(ImmutableList.of(f1, f2)); destTable.setSerdeParam("hbase.columns.mapping", "fam:foo,:key"); this.client.createTable(destTable.getTTable()); Pipeline p = new MRPipeline(HCatSourceITSpec.class, configuration); PCollection<HCatRecord> records = p.read(FromHCat.table(sourceHiveTable)); p.write(records, ToHCat.table(destinationHiveTable)); p.done(); Connection connection = null; try { Scan scan = new Scan(); connection = ConnectionFactory.createConnection(configuration); org.apache.hadoop.hbase.client.Table table = connection.getTable(TableName.valueOf(destinationHiveTable)); ResultScanner scanner = table.getScanner(scan); Result result = null; List<Pair<String, Integer>> actual = new ArrayList<>(); while (((result = scanner.next()) != null)) { String value = Bytes.toString(result.getValue("fam".getBytes(), "foo".getBytes())); actual.add(Pair.of(Bytes.toString(result.getRow()), Integer.parseInt(value))); } Assert.assertEquals(ImmutableList.of(Pair.of(key1, 17), Pair.of(key2, 29)), actual); } finally { IOUtils.closeQuietly(connection); } } finally { if (hbaseTestUtil != null) { hbaseTestUtil.shutdownMiniHBaseCluster(); hbaseTestUtil.shutdownMiniZKCluster(); } } } // writes data to the specified location and ensures the directory exists // prior to writing private Path writeDataToHdfs(String data, Path location, Configuration conf) throws IOException { FileSystem fs = location.getFileSystem(conf); Path writeLocation = new Path(location, UUID.randomUUID().toString()); fs.mkdirs(location); fs.create(writeLocation); ByteArrayInputStream baos = new ByteArrayInputStream(data.getBytes("UTF-8")); try (FSDataOutputStream fos = fs.create(writeLocation)) { IOUtils.copy(baos, fos); } return writeLocation; } }
2,372
0
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io/hcatalog/HCatTestSuiteIT.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import org.apache.crunch.test.TemporaryPath; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hive.hcatalog.common.HCatUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.junit.runners.Suite; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.UUID; /** * Test suite to re-use the same hive metastore instance for all tests in the * suite */ @RunWith(Suite.class) @Suite.SuiteClasses({ HCatSourceITSpec.class, HCatTargetITSpec.class }) public class HCatTestSuiteIT { private static final Logger LOGGER = LoggerFactory.getLogger(HCatTestSuiteIT.class); private static boolean runAsSuite = false; public static TemporaryPath hadoopTempDir = new TemporaryPath("crunch.tmp.dir", "hadoop.tmp.dir"); static HiveConf hconf; static IMetaStoreClient client; static Configuration conf = null; @BeforeClass public static void startSuite() throws Exception { runAsSuite = true; setupFileSystem(); setupMetaStore(); } @AfterClass public static void endSuite() throws Exception { cleanup(); } public static Configuration getConf() { return conf; } public static TemporaryPath getRootPath() { return hadoopTempDir; } public static IMetaStoreClient getClient() { return client; } private static void setupMetaStore() throws Exception { conf = hadoopTempDir.getDefaultConfiguration(); // set the warehouse location to the location of the temp dir, so managed // tables return a size estimate of the table String databaseLocation = hadoopTempDir.getPath("metastore_db").toString(); String derbyLocation = hadoopTempDir.getPath("derby.log").toString(); String jdbcUrl = "jdbc:derby:;databaseName=" + databaseLocation + ";create=true"; conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, jdbcUrl); conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.toString(), hadoopTempDir.getRootPath().toString()); // allow HMS to create any tables necessary conf.set("datanucleus.schema.autoCreateTables", "true"); // disable verification as the tables won't exist at startup conf.set("hive.metastore.schema.verification", "false"); // write derby logs to the temp directory to be cleaned up automagically after the test runs System.setProperty("derby.stream.error.file", derbyLocation); hconf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hconf); } private static void setupFileSystem() throws Exception { try { hadoopTempDir.create(); } catch (Throwable throwable) { throw (Exception) throwable; } } public static void startTest() throws Exception { if (!runAsSuite) { setupFileSystem(); setupMetaStore(); } } public static void endTest() throws Exception { if (!runAsSuite) { cleanup(); } } private static void cleanup() throws IOException { hadoopTempDir.delete(); client.close(); } }
2,373
0
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io/hcatalog/HCatSourceITSpec.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.crunch.PCollection; import org.apache.crunch.PTable; import org.apache.crunch.Pair; import org.apache.crunch.Pipeline; import org.apache.crunch.ReadableData; import org.apache.crunch.impl.mr.MRPipeline; import org.apache.crunch.test.CrunchTestSupport; import org.apache.crunch.test.TemporaryPath; import org.apache.crunch.types.avro.Avros; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.data.schema.HCatSchema; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import org.mockito.Mockito; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.when; public class HCatSourceITSpec extends CrunchTestSupport { private static IMetaStoreClient client; private static TemporaryPath temporaryPath; private static Configuration conf; @Rule public TestName testName = new TestName(); @BeforeClass public static void setUp() throws Throwable { HCatTestSuiteIT.startTest(); client = HCatTestSuiteIT.getClient(); temporaryPath = HCatTestSuiteIT.getRootPath(); conf = HCatTestSuiteIT.getConf(); } @AfterClass public static void tearDown() throws Exception { HCatTestSuiteIT.endTest(); } @Test public void testBasic() throws Exception { String tableName = testName.getMethodName(); Path tableRootLocation = temporaryPath.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); HCatTestUtils.createUnpartitionedTable(client, tableName, TableType.MANAGED_TABLE, tableRootLocation); Pipeline p = new MRPipeline(HCatSourceITSpec.class, conf); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table(tableName); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); List<Pair<Integer, String>> mat = Lists.newArrayList( records.parallelDo(new HCatTestUtils.Fns.MapPairFn(schema), Avros.tableOf(Avros.ints(), Avros.strings())) .materialize()); p.done(); assertEquals(ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana")), mat); } @Test public void testReadable() throws Exception { String tableName = testName.getMethodName(); Path tableRootLocation = temporaryPath.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); HCatTestUtils.createUnpartitionedTable(client, tableName, TableType.MANAGED_TABLE, tableRootLocation); Pipeline p = new MRPipeline(HCatSourceITSpec.class, conf); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table(tableName); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); ReadableData<HCatRecord> readable = records.asReadable(true); TaskInputOutputContext mockTIOC = Mockito.mock(TaskInputOutputContext.class); when(mockTIOC.getConfiguration()).thenReturn(conf); readable.configure(conf); Iterator<HCatRecord> iterator = readable.read(mockTIOC).iterator(); HCatTestUtils.Fns.MapPairFn fn = new HCatTestUtils.Fns.MapPairFn(schema); List<Pair<Integer, String>> results = new ArrayList<>(); while (iterator.hasNext()) { results.add(fn.map(iterator.next())); } p.done(); assertEquals(ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana")), results); } @Test public void testmaterialize() throws Exception { String tableName = testName.getMethodName(); Path tableRootLocation = temporaryPath.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); HCatTestUtils.createUnpartitionedTable(client, tableName, TableType.MANAGED_TABLE, tableRootLocation); Pipeline p = new MRPipeline(HCatSourceITSpec.class, conf); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table(tableName); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); // force the materialize here on the HCatRecords themselves ... then // transform Iterable<HCatRecord> materialize = records.materialize(); HCatTestUtils.Fns.MapPairFn fn = new HCatTestUtils.Fns.MapPairFn(schema); List<Pair<Integer, String>> results = new ArrayList<>(); for (final HCatRecord record : materialize) { results.add(fn.map(record)); } p.done(); assertEquals(ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana")), results); } @Test public void testMaterialize_partitionedTable_multiplePartitionsRequested() throws Exception { String tableName = testName.getMethodName(); Path tableRootLocation = temporaryPath.getPath(tableName); String part1Data = "17,josh\n29,indiana\n"; String part1Value = "1234"; Path partition1Location = new Path(tableRootLocation, part1Value); String part2Data = "42,jackie\n17,ohio\n"; String part2Value = "5678"; Path partition2Location = new Path(tableRootLocation, part2Value); writeDataToHdfs(part1Data, partition1Location, conf); writeDataToHdfs(part2Data, partition2Location, conf); FieldSchema partitionSchema = new FieldSchema(); partitionSchema.setName("timestamp"); partitionSchema.setType("string"); Table table = HCatTestUtils.createTable(client, "default", tableName, TableType.EXTERNAL_TABLE, tableRootLocation, Collections.singletonList(partitionSchema)); client .add_partition(HCatTestUtils.createPartition(table, partition1Location, Collections.singletonList(part1Value))); client .add_partition(HCatTestUtils.createPartition(table, partition2Location, Collections.singletonList(part2Value))); Pipeline p = new MRPipeline(HCatSourceITSpec.class, conf); String filter = "timestamp=\"" + part1Value + "\" or timestamp=\"" + part2Value + "\""; // HCatSource src = new HCatSource("default", tableName, filter); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table("default", tableName, filter); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); // force the materialize here on the HCatRecords themselves ... then // transform Iterable<HCatRecord> materialize = records.materialize(); HCatTestUtils.Fns.MapPairFn fn = new HCatTestUtils.Fns.MapPairFn(schema); List<Pair<Integer, String>> results = new ArrayList<>(); for (final HCatRecord record : materialize) { results.add(fn.map(record)); } p.done(); assertEquals( ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana"), Pair.of(42, "jackie"), Pair.of(17, "ohio")), results); } @Test public void testGroupBy() throws Exception { String tableName = testName.getMethodName(); Path tableRootLocation = temporaryPath.getPath(tableName); String data = "17,josh\n29,indiana\n"; writeDataToHdfs(data, tableRootLocation, conf); HCatTestUtils.createUnpartitionedTable(client, tableName, TableType.MANAGED_TABLE, tableRootLocation); Pipeline p = new MRPipeline(HCatSourceITSpec.class, conf); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table(tableName); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); // can't use HCatRecord here as the intermediate output is written out by // hadoop, and there is // an explicit check to ensure that the type being written out matches the // defined output type. // e.g. DefaultHCatRecord != HCatRecord, therefore an exception is thrown PTable<String, DefaultHCatRecord> table = records.parallelDo(new HCatTestUtils.Fns.GroupByHCatRecordFn(), Writables.tableOf(Writables.strings(), Writables.writables(DefaultHCatRecord.class))); PTable<Integer, String> finaltable = table.groupByKey().parallelDo(new HCatTestUtils.Fns.HCatRecordMapFn(schema), Avros.tableOf(Avros.ints(), Avros.strings())); List<Pair<Integer, String>> results = new ArrayList<>(); for (final Map.Entry<Integer, String> entry : finaltable.materializeToMap().entrySet()) { results.add(Pair.of(entry.getKey(), entry.getValue())); } p.done(); assertEquals(ImmutableList.of(Pair.of(17, "josh"), Pair.of(29, "indiana")), results); } @Test public void test_HCatRead_NonNativeTable_HBase() throws Exception { HBaseTestingUtility hbaseTestUtil = null; try { String db = "default"; String hiveTable = "test"; Configuration hbaseConf = HBaseConfiguration.create(conf); hbaseTestUtil = new HBaseTestingUtility(hbaseConf); hbaseTestUtil.startMiniZKCluster(); hbaseTestUtil.startMiniHBaseCluster(1, 1); org.apache.hadoop.hbase.client.Table table = hbaseTestUtil.createTable(TableName.valueOf("test-table"), "fam"); String key1 = "this-is-a-key"; Put put = new Put(Bytes.toBytes(key1)); put.addColumn("fam".getBytes(), "foo".getBytes(), "17".getBytes()); table.put(put); String key2 = "this-is-a-key-too"; Put put2 = new Put(Bytes.toBytes(key2)); put2.addColumn("fam".getBytes(), "foo".getBytes(), "29".getBytes()); table.put(put2); table.close(); org.apache.hadoop.hive.ql.metadata.Table tbl = new org.apache.hadoop.hive.ql.metadata.Table(db, hiveTable); tbl.setOwner(UserGroupInformation.getCurrentUser().getShortUserName()); tbl.setTableType(TableType.EXTERNAL_TABLE); FieldSchema f1 = new FieldSchema(); f1.setName("foo"); f1.setType("int"); FieldSchema f2 = new FieldSchema(); f2.setName("key"); f2.setType("string"); tbl.setProperty("hbase.table.name", "test-table"); tbl.setProperty("hbase.mapred.output.outputtable", "test-table"); tbl.setProperty("storage_handler", "org.apache.hadoop.hive.hbase.HBaseStorageHandler"); tbl.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); tbl.setFields(ImmutableList.of(f1, f2)); tbl.setSerdeParam("hbase.columns.mapping", "fam:foo,:key"); this.client.createTable(tbl.getTTable()); Pipeline p = new MRPipeline(HCatSourceITSpec.class, hbaseConf); HCatSourceTarget src = (HCatSourceTarget) FromHCat.table(hiveTable); HCatSchema schema = src.getTableSchema(p.getConfiguration()); PCollection<HCatRecord> records = p.read(src); List<Pair<String, Integer>> mat = Lists.newArrayList( records.parallelDo(new HCatTestUtils.Fns.KeyMapPairFn(schema), Avros.tableOf(Avros.strings(), Avros.ints())) .materialize()); p.done(); assertEquals(ImmutableList.of(Pair.of(key1, 17), Pair.of(key2, 29)), mat); } finally { if (hbaseTestUtil != null) { hbaseTestUtil.shutdownMiniHBaseCluster(); hbaseTestUtil.shutdownMiniZKCluster(); } } } // writes data to the specified location and ensures the directory exists // prior to writing private Path writeDataToHdfs(String data, Path location, Configuration conf) throws IOException { FileSystem fs = location.getFileSystem(conf); Path writeLocation = new Path(location, UUID.randomUUID().toString()); fs.mkdirs(location); fs.create(writeLocation); ByteArrayInputStream baos = new ByteArrayInputStream(data.getBytes("UTF-8")); try (FSDataOutputStream fos = fs.create(writeLocation)) { IOUtils.copy(baos, fos); } return writeLocation; } }
2,374
0
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/it/java/org/apache/crunch/io/hcatalog/HCatTestUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.collect.ImmutableList; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.crunch.MapFn; import org.apache.crunch.Pair; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.hcatalog.common.HCatException; import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.data.schema.HCatSchema; import org.apache.hive.hcatalog.mapreduce.HCatOutputFormat; import org.apache.thrift.TException; import java.io.IOException; import java.util.Collections; import java.util.List; import javax.annotation.Nullable; public class HCatTestUtils { public static class Fns { /** * Maps an HCatRecord with a Key to a pair of the Key and the value of the * column "foo" */ public static class KeyMapPairFn extends MapFn<HCatRecord, Pair<String, Integer>> { private HCatSchema schema; public KeyMapPairFn(HCatSchema schema) { this.schema = schema; } @Override public Pair<String, Integer> map(HCatRecord input) { try { return Pair.of(input.getString("key", schema), input.getInteger("foo", schema)); } catch (HCatException e) { throw new CrunchRuntimeException(e); } } } /** * Takes an HCatRecord and emits a Pair<Integer, String>. assumes the * columns in the record are "foo" (int) and "bar" (string) */ public static class MapPairFn extends MapFn<HCatRecord, Pair<Integer, String>> { private HCatSchema schema; public MapPairFn(HCatSchema schema) { this.schema = schema; } @Override public Pair<Integer, String> map(HCatRecord input) { try { return Pair.of(input.getInteger("foo", schema), input.getString("bar", schema)); } catch (HCatException e) { throw new CrunchRuntimeException(e); } } } /** * Simple MapFn that emits the input record and emits a Pair, with the first * element being "record". Useful for when testing group by with the value * being HCatRecord */ public static class GroupByHCatRecordFn extends MapFn<HCatRecord, Pair<String, DefaultHCatRecord>> { @Override public Pair<String, DefaultHCatRecord> map(HCatRecord input) { return Pair.of("record", (DefaultHCatRecord) input); } } /** * Takes the input iterable of DefaultHCatRecords and emits Pairs that * contain the value of the columns "foo" and "bar" */ public static class HCatRecordMapFn extends DoFn<Pair<String, Iterable<DefaultHCatRecord>>, Pair<Integer, String>> { private HCatSchema schema; public HCatRecordMapFn(HCatSchema schema) { this.schema = schema; } @Override public void process(Pair<String, Iterable<DefaultHCatRecord>> input, Emitter<Pair<Integer, String>> emitter) { for (final HCatRecord record : input.second()) { try { emitter.emit(Pair.of(record.getInteger("foo", schema), record.getString("bar", schema))); } catch (HCatException e) { throw new CrunchRuntimeException(e); } } } } /** * Takes a CSV line and maps it into an HCatRecord */ public static class MapHCatRecordFn extends MapFn<String, HCatRecord> { static HCatSchema dataSchema; @Override public void initialize() { try { dataSchema = HCatOutputFormat.getTableSchema(getConfiguration()); } catch (IOException e) { throw new CrunchRuntimeException(e); } } @Override public HCatRecord map(String input) { try { return getHCatRecord(input.split(",")); } catch (HCatException e) { throw new CrunchRuntimeException(e); } } private static HCatRecord getHCatRecord(String[] csvParts) throws HCatException { // must be set, or all subsequent sets on HCatRecord will fail. setting // the size // initializes the initial backing array DefaultHCatRecord hcatRecord = new DefaultHCatRecord(dataSchema.size()); hcatRecord.set("foo", dataSchema, Integer.parseInt(csvParts[0])); hcatRecord.set("bar", dataSchema, csvParts[1]); return hcatRecord; } } /** * Takes an iterable of HCatRecords and emits each HCatRecord (turns a * PTable into a PCollection) */ public static class IterableToHCatRecordMapFn extends DoFn<Pair<String, Iterable<DefaultHCatRecord>>, HCatRecord> { @Override public void process(Pair<String, Iterable<DefaultHCatRecord>> input, Emitter<HCatRecord> emitter) { for (final HCatRecord record : input.second()) { emitter.emit(record); } } } } public static Table createUnpartitionedTable(IMetaStoreClient client, String tableName, TableType type) throws IOException, HiveException, TException { return createTable(client, "default", tableName, type, null, Collections.<FieldSchema> emptyList()); } public static Table createUnpartitionedTable(IMetaStoreClient client, String tableName, TableType type, @Nullable Path datalocation) throws IOException, HiveException, TException { return createTable(client, "default", tableName, type, datalocation, Collections.<FieldSchema> emptyList()); } public static Table createTable(IMetaStoreClient client, String db, String tableName, TableType type, @Nullable Path datalocation, List<FieldSchema> partCols) throws IOException, HiveException, TException { org.apache.hadoop.hive.ql.metadata.Table tbl = new org.apache.hadoop.hive.ql.metadata.Table(db, tableName); tbl.setOwner(UserGroupInformation.getCurrentUser().getShortUserName()); tbl.setTableType(type); if (datalocation != null) tbl.setDataLocation(datalocation); FieldSchema f1 = new FieldSchema(); f1.setName("foo"); f1.setType("int"); FieldSchema f2 = new FieldSchema(); f2.setName("bar"); f2.setType("string"); if (partCols != null && !partCols.isEmpty()) tbl.setPartCols(partCols); tbl.setFields(ImmutableList.of(f1, f2)); tbl.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); tbl.setSerdeParam("field.delim", ","); tbl.setSerdeParam("serialization.format", ","); tbl.setInputFormatClass("org.apache.hadoop.mapred.TextInputFormat"); tbl.setOutputFormatClass("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); client.createTable(tbl.getTTable()); return client.getTable(db, tableName); } public static Partition createPartition(Table table, Path partLocation, List<String> partValues) { Partition partition = new Partition(); partition.setDbName(table.getDbName()); partition.setTableName(table.getTableName()); partition.setSd(new StorageDescriptor(table.getSd())); partition.setValues(partValues); partition.getSd().setLocation(partLocation.toString()); return partition; } }
2,375
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/HCatTarget.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.base.Objects; import com.google.common.base.Strings; import com.google.common.collect.Maps; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.builder.ToStringBuilder; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.SourceTarget; import org.apache.crunch.Target; import org.apache.crunch.io.CrunchOutputs; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.MapReduceTarget; import org.apache.crunch.io.OutputHandler; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PType; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Job; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.DefaultHCatRecord; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.mapreduce.CrunchHCatOutputFormat; import org.apache.hive.hcatalog.mapreduce.OutputJobInfo; import org.apache.thrift.TException; import java.io.IOException; import java.util.Map; import javax.annotation.Nullable; public class HCatTarget implements MapReduceTarget { private static final PType<HCatRecord> PTYPE = Writables.writables(HCatRecord.class); private static final PType<DefaultHCatRecord> DEFAULT_PTYPE = Writables.writables(DefaultHCatRecord.class); private final OutputJobInfo info; private final FormatBundle bundle = FormatBundle.forOutput(CrunchHCatOutputFormat.class); private Table hiveTableCached; /** * Constructs a new instance to write to the provided hive {@code table} name. * Writes to the "default" database. * * Note: if the destination table is partitioned, this constructor should not * be used. It will only be usable by unpartitioned tables * * @param table * the hive table to write to */ public HCatTarget(String table) { this(null, table, null); } /** * Constructs a new instance to write to the provided hive {@code table} name, * using the provided {@code database}. If null, uses "default" database. * * Note: if the destination table is partitioned, this constructor should not * be used. It will only be usable by unpartitioned tables * * @param database * the hive database to use for table namespacing * @param table * the hive table to write to */ public HCatTarget(@Nullable String database, String table) { this(database, table, null); } /** * Constructs a new instance to write to the provided hive {@code table} name * and {@code partitionValues}. Writes to the "default" database. * * Note: partitionValues will be assembled into a single directory path. * * For example, if the partition values are: * * <pre> * [year, 2017], * [month,11], * [day, 10] * * The constructed directory path will be * "[dataLocationRoot]/year=2017/month=11/day=10" * </pre> * * @param table * the hive table to write to * @param partitionValues * the partition within the table it should be written */ public HCatTarget(String table, Map<String, String> partitionValues) { this(null, table, partitionValues); } /** * Constructs a new instance to write to the provided {@code database}, * {@code table}, and to the specified {@code partitionValues}. If * {@code database} isn't specified, the "default" database is used * * Note: partitionValues will be assembled into a single directory path. * * For example, if the partition values are: * * <pre> * [year, 2017], * [month,11], * [day, 10] * * The constructed directory path will be * "[dataLocationRoot]/year=2017/month=11/day=10" * </pre> * * @param database * the hive database to use for table namespacing * @param table * the hive table to write to * @param partitionValues * the partition within the table it should be written */ public HCatTarget(@Nullable String database, String table, @Nullable Map<String, String> partitionValues) { this.info = OutputJobInfo.create(database, table, partitionValues); } @Override public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) { if (Strings.isNullOrEmpty(name)) { throw new AssertionError("Named output wasn't generated. This shouldn't happen"); } CrunchOutputs.addNamedOutput(job, name, bundle, NullWritable.class, HCatRecord.class); try { CrunchHCatOutputFormat.setOutput(job, info); // set the schema into config. this would be necessary if any downstream // tasks need the schema translated between a format (e.g. avro) and // HCatRecord for the destination table Table table = getHiveTable(job.getConfiguration()); CrunchHCatOutputFormat.setSchema(job, HCatUtil.extractSchema(table)); } catch (TException | IOException e) { throw new CrunchRuntimeException(e); } } @Override public Target outputConf(String key, String value) { bundle.set(key, value); return this; } @Override public Target fileSystem(FileSystem fileSystem) { // not currently supported/applicable for HCatalog return this; } @Override public FileSystem getFileSystem() { // not currently supported/applicable for HCatalog return null; } @Override public boolean handleExisting(WriteMode writeMode, long lastModifiedAt, Configuration conf) { return writeMode == WriteMode.DEFAULT; } @Override public boolean accept(OutputHandler handler, PType<?> ptype) { if (!acceptType(ptype)) { return false; } handler.configure(this, ptype); return true; } @Override public Converter<?, ?, ?, ?> getConverter(PType<?> ptype) { return ptype.getConverter(); } @Override public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) { if (acceptType(ptype)) return (SourceTarget<T>) new HCatSourceTarget(info.getDatabaseName(), info.getTableName()); return null; } @Override public String toString() { return new ToStringBuilder(this) .append("database", info.getDatabaseName()) .append("table", info.getTableName()) .append("partition", info.getPartitionValues()) .toString(); } @Override public int hashCode() { return Objects.hashCode(info.getDatabaseName(), info.getTableName(), info.getPartitionValues()); } @Override public boolean equals(Object o) { if (o == null || !getClass().equals(o.getClass())) { return false; } HCatTarget that = (HCatTarget) o; return Objects.equal(this.info.getDatabaseName(), that.info.getDatabaseName()) && Objects.equal(this.info.getTableName(), that.info.getTableName()) && Objects.equal(this.info.getPartitionValues(), that.info.getPartitionValues()); } private boolean acceptType(PType<?> ptype) { return Objects.equal(ptype, PTYPE) || Objects.equal(ptype, DEFAULT_PTYPE); } private Table getHiveTable(Configuration conf) throws IOException, TException { if (hiveTableCached != null) { return hiveTableCached; } IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatTarget.class)); hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, info.getDatabaseName(), info.getTableName()); return hiveTableCached; } }
2,376
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/HCatRecordDataIterable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.base.Function; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.io.FormatBundle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Iterator; public class HCatRecordDataIterable implements Iterable<HCatRecord> { private static final Logger LOG = LoggerFactory.getLogger(HCatRecordDataIterable.class); private final FormatBundle<HCatInputFormat> bundle; private final Configuration conf; public HCatRecordDataIterable(FormatBundle<HCatInputFormat> bundle, Configuration configuration) { this.bundle = bundle; this.conf = configuration; } @Override public Iterator<HCatRecord> iterator() { try { Job job = Job.getInstance(bundle.configure(conf)); final InputFormat fmt = ReflectionUtils.newInstance(bundle.getFormatClass(), conf); final TaskAttemptContext ctxt = new TaskAttemptContextImpl(conf, new TaskAttemptID()); return Iterators.concat(Lists.transform(fmt.getSplits(job), new Function<InputSplit, Iterator<HCatRecord>>() { @Override public Iterator<HCatRecord> apply(InputSplit split) { RecordReader reader = null; try { reader = fmt.createRecordReader(split, ctxt); reader.initialize(split, ctxt); } catch (IOException | InterruptedException e) { throw new CrunchRuntimeException(e); } return new HCatRecordReaderIterator(reader); } }).iterator()); } catch (Exception e) { throw new CrunchRuntimeException(e); } } private static class HCatRecordReaderIterator<T> implements Iterator<T> { private final RecordReader<WritableComparable, T> reader; private boolean hasNext; private T current; public HCatRecordReaderIterator(RecordReader reader) { this.reader = reader; try { hasNext = reader.nextKeyValue(); if (hasNext) current = this.reader.getCurrentValue(); } catch (IOException | InterruptedException e) { throw new CrunchRuntimeException(e); } } @Override public boolean hasNext() { return hasNext; } @Override public T next() { T ret = current; try { hasNext = reader.nextKeyValue(); if (hasNext) { current = reader.getCurrentValue(); } } catch (IOException | InterruptedException e) { throw new CrunchRuntimeException(e); } return ret; } @Override public void remove() { throw new UnsupportedOperationException("Removing elements is not supported"); } } }
2,377
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/FromHCat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import org.apache.crunch.Source; import org.apache.hive.hcatalog.data.HCatRecord; import javax.annotation.Nullable; /** * Static factory methods for creating sources to read from HCatalog. * * Access examples: * <pre> * {@code * * Pipeline pipeline = new MRPipeline(this.getClass()); * * PCollection<HCatRecord> hcatRecords = pipeline.read(FromHCat.table("my-table")) * } * </pre> */ public final class FromHCat { private FromHCat() { } /** * Creates a {@code Source<HCatRecord>} instance from a hive table in the * default database instance "default". * * @param table * table name * @throw IllegalArgumentException if table is null or empty */ public static Source<HCatRecord> table(String table) { return new HCatSourceTarget(table); } /** * Creates a {code Source<HCatRecord>} instance from a hive table. * * @param database * database name * @param table * table name * @throw IllegalArgumentException if table is null or empty */ public static Source<HCatRecord> table(String database, String table) { return new HCatSourceTarget(database, table); } /** * Creates a {code Source<HCatRecord>} instance from a hive table with custom * filter criteria. If {@code database} is null, uses the default * database instance "default" * * @param database * database name * @param table * table name * @param filter * a custom filter criteria, e.g. specify partitions by * {@code 'date= "20140424"'} or {@code 'date < "20140424"'} * @throw IllegalArgumentException if table is null or empty */ public static Source<HCatRecord> table(@Nullable String database, String table, String filter) { return new HCatSourceTarget(database, table, filter); } }
2,378
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/HCatRecordDataReadable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.base.Objects; import com.google.common.collect.ImmutableSet; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.ReadableData; import org.apache.crunch.SourceTarget; import org.apache.crunch.io.FormatBundle; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; import java.io.IOException; import java.util.Map; import java.util.Set; public class HCatRecordDataReadable implements ReadableData<HCatRecord> { private final FormatBundle<HCatInputFormat> bundle; private final String database; private final String table; private final String filter; public HCatRecordDataReadable(FormatBundle<HCatInputFormat> bundle, String database, String table, String filter) { this.bundle = bundle; this.database = database; this.table = table; this.filter = filter; } @Override public Set<SourceTarget<?>> getSourceTargets() { return ImmutableSet.of(); } @Override public void configure(Configuration conf) { // need to configure the input format, so the JobInputInfo is populated with // the partitions to be processed. the partitions are needed to derive the // input splits and to get a size estimate for the HCatSource. HCatSourceTarget.configureHCatFormat(conf, bundle, database, table, filter); } @Override public Iterable<HCatRecord> read(TaskInputOutputContext<?, ?, ?, ?> context) throws IOException { return new HCatRecordDataIterable(bundle, context.getConfiguration()); } }
2,379
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/HCatSourceTarget.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.apache.commons.lang.builder.ToStringBuilder; import org.apache.commons.lang3.StringUtils; import org.apache.crunch.CrunchRuntimeException; import org.apache.crunch.ReadableData; import org.apache.crunch.Source; import org.apache.crunch.SourceTarget; import org.apache.crunch.impl.mr.run.CrunchMapper; import org.apache.crunch.io.CrunchInputs; import org.apache.crunch.io.FormatBundle; import org.apache.crunch.io.ReadableSourceTarget; import org.apache.crunch.io.SourceTargetHelper; import org.apache.crunch.types.Converter; import org.apache.crunch.types.PType; import org.apache.crunch.types.writable.Writables; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.mapreduce.Job; import org.apache.hive.hcatalog.common.HCatConstants; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.HCatRecord; import org.apache.hive.hcatalog.data.schema.HCatSchema; import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; import org.apache.hive.hcatalog.mapreduce.InputJobInfo; import org.apache.hive.hcatalog.mapreduce.PartInfo; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.io.IOException; import java.util.List; import java.util.Map; import javax.annotation.Nullable; public class HCatSourceTarget extends HCatTarget implements ReadableSourceTarget<HCatRecord> { private static final Logger LOGGER = LoggerFactory.getLogger(HCatSourceTarget.class); private static final PType<HCatRecord> PTYPE = Writables.writables(HCatRecord.class); private Configuration hcatConf; private final FormatBundle<HCatInputFormat> bundle = FormatBundle.forInput(HCatInputFormat.class); private final String database; private final String table; private final String filter; private Table hiveTableCached; // Default guess at the size of the data to materialize private static final long DEFAULT_ESTIMATE = 1024 * 1024 * 1024; /** * Creates a new instance to read from the specified {@code table} and the * {@link org.apache.hadoop.hive.metastore.MetaStoreUtils#DEFAULT_DATABASE_NAME * default} database * * @param table * @throw IllegalArgumentException if table is null or empty */ public HCatSourceTarget(String table) { this(DEFAULT_DATABASE_NAME, table); } /** * Creates a new instance to read from the specified {@code database} and * {@code table} * * @param database * the database to read from * @param table * the table to read from * @throw IllegalArgumentException if table is null or empty */ public HCatSourceTarget(String database, String table) { this(database, table, null); } /** * Creates a new instance to read from the specified {@code database} and * {@code table}, restricting partitions by the specified {@code filter}. If * the database isn't specified it will default to the * {@link org.apache.hadoop.hive.metastore.MetaStoreUtils#DEFAULT_DATABASE_NAME * default} database. * * @param database * the database to read from * @param table * the table to read from * @param filter * the filter to apply to find partitions * @throw IllegalArgumentException if table is null or empty */ public HCatSourceTarget(@Nullable String database, String table, String filter) { super(database, table); this.database = Strings.isNullOrEmpty(database) ? DEFAULT_DATABASE_NAME : database; Preconditions.checkArgument(!StringUtils.isEmpty(table), "table cannot be null or empty"); this.table = table; this.filter = filter; } @Override public SourceTarget<HCatRecord> conf(String key, String value) { return null; } @Override public Source<HCatRecord> inputConf(String key, String value) { bundle.set(key, value); return this; } @Override public SourceTarget<HCatRecord> fileSystem(FileSystem fileSystem) { // not currently supported/applicable for HCatalog return this; } @Override public FileSystem getFileSystem() { // not currently supported/applicable for HCatalog return null; } @Override public PType<HCatRecord> getType() { return PTYPE; } @Override public Converter<?, ?, ?, ?> getConverter() { return PTYPE.getConverter(); } @Override public void configureSource(Job job, int inputId) throws IOException { Configuration jobConf = job.getConfiguration(); if (hcatConf == null) { hcatConf = configureHCatFormat(jobConf, bundle, database, table, filter); } if (inputId == -1) { job.setMapperClass(CrunchMapper.class); job.setInputFormatClass(bundle.getFormatClass()); bundle.configure(jobConf); } else { Path dummy = new Path("/hcat/" + database + "/" + table); CrunchInputs.addInputPath(job, dummy, bundle, inputId); } } static Configuration configureHCatFormat(Configuration conf, FormatBundle<HCatInputFormat> bundle, String database, String table, String filter) { // It is tricky to get the HCatInputFormat configured correctly. // // The first parameter of setInput() is for both input and output. // It reads Hive MetaStore's JDBC URL or HCatalog server's Thrift address, // and saves the schema into the configuration for runtime needs // (e.g. data location). // // Our solution is to create another configuration object, and // compares with the original one to see what has been added. Configuration newConf = new Configuration(conf); try { HCatInputFormat.setInput(newConf, database, table, filter); } catch (IOException e) { throw new CrunchRuntimeException(e); } for (Map.Entry<String, String> e : newConf) { String key = e.getKey(); String value = e.getValue(); if (!Objects.equal(value, conf.get(key))) { bundle.set(key, value); } } return newConf; } @Override public long getSize(Configuration conf) { // this is tricky. we want to derive the size by the partitions being // retrieved. these aren't known until after the HCatInputFormat has // been initialized (see #configureHCatFormat). preferably, the input // format shouldn't be configured twice to cut down on the number of calls // to hive. getSize can be called before configureSource is called when the // collection is being materialized or a groupby has been performed. so, the // InputJobInfo, which has the partitions, won't be present when this // happens. so, configure here or in configureSource just once. if (hcatConf == null) { hcatConf = configureHCatFormat(conf, bundle, database, table, filter); } try { InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(hcatConf.get(HCatConstants.HCAT_KEY_JOB_INFO)); List<PartInfo> partitions = inputJobInfo.getPartitions(); if (partitions.size() > 0) { LOGGER.debug("Found [{}] partitions to read", partitions.size()); long size = 0; for (final PartInfo partition : partitions) { String totalSize = partition.getInputStorageHandlerProperties().getProperty(StatsSetupConst.TOTAL_SIZE); if (StringUtils.isEmpty(totalSize)) { long pathSize = SourceTargetHelper.getPathSize(conf, new Path(partition.getLocation())); if (pathSize == -1) { LOGGER.info("Unable to locate directory [{}]; skipping", partition.getLocation()); // could be an hbase table, in which there won't be a size // estimate if this is a valid native table partition, but no // data, materialize won't find anything } else if (pathSize == 0) { size += DEFAULT_ESTIMATE; } else { size += pathSize; } } else { size += Long.parseLong(totalSize); } } return size; } else { Table hiveTable = getHiveTable(conf); LOGGER.debug("Attempting to get table size from table properties for table [{}]", table); // managed table will have the size on it, but should be caught as a // partition.size == 1 if the table isn't partitioned String totalSize = hiveTable.getParameters().get(StatsSetupConst.TOTAL_SIZE); if (!StringUtils.isEmpty(totalSize)) return Long.parseLong(totalSize); // not likely to be hit. the totalSize should have been available on the // partitions returned (for unpartitioned tables one partition will be // returned, referring to the entire table), or on the table metadata // (only there for managed tables). if neither existed, then check // against the data location as backup. note: external tables can be // somewhere other than the root location as defined by the table, // as partitions can exist elsewhere. ideally this scenario is caught // by the if statement with partitions > 0 LOGGER.debug("Unable to find size on table properties [{}], attempting to get it from table data location [{}]", hiveTable.getTableName(), hiveTable.getDataLocation()); return SourceTargetHelper.getPathSize(conf, hiveTable.getDataLocation()); } } catch (IOException | TException e) { LOGGER.info("Unable to determine an estimate for requested table [{}], using default", table, e); return DEFAULT_ESTIMATE; } } /** * Extracts the {@link HCatSchema} from the specified {@code conf}. * * @param conf * the conf containing the table schema * @return the HCatSchema * * @throws TException * if there was an issue communicating with the metastore * @throws IOException * if there was an issue connecting to the metastore */ public HCatSchema getTableSchema(Configuration conf) throws TException, IOException { Table hiveTable = getHiveTable(conf); return HCatUtil.extractSchema(hiveTable); } @Override public long getLastModifiedAt(Configuration conf) { LOGGER.warn("Unable to determine the last modified time for db [{}] and table [{}]", database, table); return -1; } @Override public boolean equals(Object o) { if (o == null || !getClass().equals(o.getClass())) { return false; } HCatSourceTarget that = (HCatSourceTarget) o; return Objects.equal(this.database, that.database) && Objects.equal(this.table, that.table) && Objects.equal(this.filter, that.filter); } @Override public int hashCode() { return Objects.hashCode(table, database, filter); } @Override public String toString() { return new ToStringBuilder(this).append("database", database).append("table", table).append("filter", filter) .toString(); } private Table getHiveTable(Configuration conf) throws IOException, TException { if (hiveTableCached != null) { return hiveTableCached; } IMetaStoreClient hiveMetastoreClient = HCatUtil.getHiveMetastoreClient(new HiveConf(conf, HCatSourceTarget.class)); hiveTableCached = HCatUtil.getTable(hiveMetastoreClient, database, table); return hiveTableCached; } @Override public Iterable<HCatRecord> read(Configuration conf) throws IOException { if (hcatConf == null) { hcatConf = configureHCatFormat(conf, bundle, database, table, filter); } return new HCatRecordDataIterable(bundle, hcatConf); } @Override public ReadableData<HCatRecord> asReadable() { return new HCatRecordDataReadable(bundle, database, table, filter); } }
2,380
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/crunch/io/hcatalog/ToHCat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.io.hcatalog; import org.apache.crunch.Target; import java.util.Map; /** * Factory static helper methods for writing data to HCatalog * * <pre> * {@code * Pipeline pipeline = new MRPipeline(this.getClass()); * * PCollection<HCatRecord> hcatRecords = pipeline.read(FromHCat.table("this-table"); * * pipeline.write(hcatRecords, ToHCat.table("that-table")); * } * </pre> */ public class ToHCat { /** * Constructs a new instance to write to the provided hive {@code table} name. * Writes to the "default" database. * * Note: if the destination table is partitioned, this constructor should not * be used. It will only be usable by unpartitioned tables * * @param tableName * the hive table to write to */ public static Target table(String tableName) { return new HCatTarget(tableName); } /** * Constructs a new instance to write to the provided hive {@code tableName}, * using the provided {@code database}. If null, uses "default" database. * * Note: if the destination table is partitioned, this constructor should not * be used. It will only be usable by unpartitioned tables * * @param database * the hive database to use for table namespacing * @param tableName * the hive table to write to */ public static Target table(String database, String tableName) { return new HCatTarget(database, tableName); } /** * Constructs a new instance to write to the provided hive {@code table} name * and {@code partitionValues}. Writes to the "default" database. * * Note: partitionValues will be assembled into a single directory path. * * For example, if the partition values are: * * <pre> * [year, 2017], * [month,11], * [day, 10] * * The constructed directory path will be * "[dataLocationRoot]/year=2017/month=11/day=10" * </pre> * * @param tableName * the hive table to write to * @param partition * the partition within the table it should be written */ public static Target table(String tableName, Map<String, String> partition) { return new HCatTarget(tableName, partition); } /** * Constructs a new instance to write to the provided {@code database}, * {@code tableName}, and to the specified {@code partition}. If * {@code database} isn't specified, the "default" database is used * * Note: partitionValues will be assembled into a single directory path. * * For example, if the partition values are: * * <pre> * [year, 2017], * [month,11], * [day, 10] * * The constructed directory path will be * "[dataLocationRoot]/year=2017/month=11/day=10" * </pre> * * @param database * the hive database to use for table namespacing * @param tableName * the hive table to write to * @param partition * the partition within the table it should be written */ public static Target table(String database, String tableName, Map<String, String> partition) { return new HCatTarget(database, tableName, partition); } }
2,381
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatMapRedUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.TaskAttemptContextImpl; import org.apache.hadoop.mapred.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.util.List; /** * Common helper methods for translating between v1 and v2 of map reduce */ public class HCatMapRedUtils { public static org.apache.hadoop.mapred.TaskAttemptContext getOldTaskAttemptContext(TaskAttemptContext context) { return new TaskAttemptContextImpl(new JobConf(context.getConfiguration()), getTaskAttemptID(context)); } /** * Creates a {@code TaskAttemptID} from the provided TaskAttemptContext. This * also performs logic to strip the crunch named output from the TaskAttemptID * already associated with the TaskAttemptContext. The TaskAttemptID requires * there to be six parts, separated by "_". With the named output the JobID * has 7 parts. That needs to be stripped away before a new TaskAttemptID can * be constructed. * * @param context * The TaskAttemptContext * @return A TaskAttemptID with the crunch named output removed */ public static TaskAttemptID getTaskAttemptID(TaskAttemptContext context) { String taskAttemptId = context.getTaskAttemptID().toString(); List<String> taskAttemptIDParts = Lists.newArrayList(taskAttemptId.split("_")); if (taskAttemptIDParts.size() < 7) return TaskAttemptID.forName(taskAttemptId); // index 2 is the 3rd element in the task attempt id, which will be the // named output taskAttemptIDParts.remove(2); String reducedTaskAttemptId = StringUtils.join(taskAttemptIDParts, "_"); return TaskAttemptID.forName(reducedTaskAttemptId); } }
2,382
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/CrunchDefaultOutputCommitterContainer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.mapred.OutputCommitter; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.io.IOException; /** * A thin extension of the Hive {@link DefaultOutputCommitterContainer}. This is * to insert crunch specific logic to strip the named output from the * TaskAttemptID. */ class CrunchDefaultOutputCommitterContainer extends DefaultOutputCommitterContainer { /** * @param context * current JobContext * @param baseCommitter * OutputCommitter to contain * @throws IOException */ public CrunchDefaultOutputCommitterContainer(JobContext context, OutputCommitter baseCommitter) throws IOException { super(context, baseCommitter); } @Override public void setupTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().setupTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } @Override public void abortTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().abortTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } @Override public void commitTask(TaskAttemptContext context) throws IOException { getBaseOutputCommitter().commitTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } @Override public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtils.getOldTaskAttemptContext(context)); } }
2,383
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/CrunchDefaultOutputFormatContainer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.io.IOException; class CrunchDefaultOutputFormatContainer extends DefaultOutputFormatContainer { public CrunchDefaultOutputFormatContainer(org.apache.hadoop.mapred.OutputFormat<WritableComparable<?>, Writable> of) { super(of); } @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException { return new CrunchDefaultOutputCommitterContainer(context, new JobConf(context.getConfiguration()).getOutputCommitter()); } }
2,384
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/CrunchFileOutputCommitterContainer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.mapred.OutputCommitter;; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.io.IOException; /** * Thin extension to construct valid * {@link org.apache.hadoop.mapred.TaskAttemptID}, and remove the crunch named * output from the {@link org.apache.hadoop.mapreduce.TaskAttemptID}. */ public class CrunchFileOutputCommitterContainer extends FileOutputCommitterContainer { private final boolean dynamicPartitioningUsed; /** * @param context * current JobContext * @param baseCommitter * OutputCommitter to contain * @throws IOException */ public CrunchFileOutputCommitterContainer(JobContext context, OutputCommitter baseCommitter) throws IOException { super(context, baseCommitter); dynamicPartitioningUsed = HCatOutputFormat.getJobInfo(context.getConfiguration()).isDynamicPartitioningUsed(); } @Override public void setupTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { getBaseOutputCommitter().setupTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } } @Override public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtils.getOldTaskAttemptContext(context)); } else { // called explicitly through FileRecordWriterContainer.close() if dynamic // - return false by default return true; } } @Override public void commitTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { // See HCATALOG-499 FileOutputFormatContainer.setWorkOutputPath(context); getBaseOutputCommitter().commitTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } else { try { TaskCommitContextRegistry.getInstance().commitTask(context); } finally { TaskCommitContextRegistry.getInstance().discardCleanupFor(context); } } } @Override public void abortTask(TaskAttemptContext context) throws IOException { if (!dynamicPartitioningUsed) { getBaseOutputCommitter().abortTask(HCatMapRedUtils.getOldTaskAttemptContext(context)); } else { try { TaskCommitContextRegistry.getInstance().abortTask(context); } finally { TaskCommitContextRegistry.getInstance().discardCleanupFor(context); } } } }
2,385
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/CrunchFileOutputFormatContainer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import java.io.IOException; /** * Thin extention to return an {@link OutputCommitter} that carries out crunch * specific logic */ class CrunchFileOutputFormatContainer extends FileOutputFormatContainer { /** * @param of * base OutputFormat to contain */ public CrunchFileOutputFormatContainer(OutputFormat<? super WritableComparable<?>, ? super Writable> of) { super(of); } @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException { // this needs to be manually set, under normal circumstances MR Task does // this setWorkOutputPath(context); return new CrunchFileOutputCommitterContainer(context, HCatBaseOutputFormat.getJobInfo(context.getConfiguration()).isDynamicPartitioningUsed() ? null : new JobConf(context.getConfiguration()).getOutputCommitter()); } }
2,386
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/CrunchHCatOutputFormat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hive.hcatalog.data.HCatRecord; import java.io.IOException; /** * Thin extension to supply {@link OutputFormat}'s that carrying out crunch * specific logic */ public class CrunchHCatOutputFormat extends HCatOutputFormat { @Override protected OutputFormat<WritableComparable<?>, HCatRecord> getOutputFormat(JobContext context) throws IOException { OutputJobInfo jobInfo = getJobInfo(context.getConfiguration()); HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo()); // Always configure storage handler with jobproperties/jobconf before // calling any methods on it configureOutputStorageHandler(context); if (storageHandler instanceof FosterStorageHandler) { return new CrunchFileOutputFormatContainer( ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(), context.getConfiguration())); } else { return new CrunchDefaultOutputFormatContainer( ReflectionUtils.newInstance(storageHandler.getOutputFormatClass(), context.getConfiguration())); } } }
2,387
0
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog
Create_ds/crunch/crunch-hcatalog/src/main/java/org/apache/hive/hcatalog/mapreduce/package-info.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * The package of classes here is needed to extend the default classes provided * by Hive. The classes in that package are package private, and therefore could * not be overridden outside of that package scope. Crunch needs to extend the * classes to override the behavior of creating * {@link org.apache.hadoop.mapred.TaskAttemptID}'s. * * {@link org.apache.hadoop.mapred.TaskAttemptID#forName(java.lang.String)} is * used by default in * {@link org.apache.hive.hcatalog.mapreduce.DefaultOutputCommitterContainer} * and {@link org.apache.hive.hcatalog.mapreduce.FileOutputFormatContainer} to * translate between MR v1 and MR v2. This causes issues because a TaskAttemptID * requires the string representation to be 6 elements, separated by underscores * ('_'). Crunch adds the named output to the JobID (which is used when creating * the TaskAttemptID) which gives the TaskAttemptID 7 elements. * * e.g. * * <pre> * attempt_1508401628996_out0_16350_m_000000_0 * </pre> * * So, the crunch classes in this package change the logic when creating * TaskAttemptID's to strip the named output before creating the TaskAttemptID. * * e.g * * <pre> * attempt_1508401628996_out0_16350_m_000000_0 -> attempt_1508401628996_16350_m_000000_0 * </pre> */ package org.apache.hive.hcatalog.mapreduce;
2,388
0
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch/lambda/LCollectionTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import com.google.common.collect.ImmutableMap; import org.apache.crunch.Pair; import org.apache.crunch.impl.mem.MemPipeline; import org.apache.crunch.types.avro.Avros; import org.junit.Test; import java.util.Arrays; import java.util.Map; import java.util.Optional; import static org.apache.crunch.lambda.TestCommon.*; import static org.apache.crunch.lambda.TypedRecord.rec; import static org.apache.crunch.types.avro.Avros.*; import static org.junit.Assert.*; public class LCollectionTest { private LCollection<TypedRecord> lc() { return Lambda.wrap(MemPipeline.typedCollectionOf(Avros.reflects(TypedRecord.class), rec(14, "Alice", 101L), rec(25, "Bo B", 102L), rec(21, "Char Lotte", 103L), rec(28, "David", 104L), rec(31, "Erik", 105L))); } @Test public void testParallelDo() throws Exception { LCollection<String> result = lc().parallelDo(ctx -> { if (ctx.element().key > 26) ctx.emit(ctx.element().name); }, strings()); assertCollectionOf(result, "David", "Erik"); } @Test public void testParallelDoPair() throws Exception { LTable<Integer, String> result = lc().parallelDo(ctx -> { if (ctx.element().key > 26) ctx.emit(Pair.of(ctx.element().key, ctx.element().name)); }, tableOf(ints(), strings())); assertCollectionOf(result, Pair.of(28, "David"), Pair.of(31, "Erik")); } @Test public void testMap() throws Exception { assertCollectionOf(lc().map(r -> r.key, ints()), 14, 25, 21, 28, 31); } @Test public void testMapPair() throws Exception { assertCollectionOf(lc().map(r -> Pair.of(r.key, r.value), tableOf(ints(), longs())), Pair.of(14, 101L), Pair.of(25, 102L), Pair.of(21, 103L), Pair.of(28, 104L), Pair.of(31, 105L)); } @Test public void testFlatMap() throws Exception { assertCollectionOf( lc().flatMap(s -> Arrays.stream(s.name.split(" ")), strings()), "Alice", "Bo", "B", "Char", "Lotte", "David", "Erik"); } @Test public void testFilterMap() throws Exception { Map<String, String> lookupMap = ImmutableMap.of("Erik", "BOOM", "Alice", "POW"); assertCollectionOf( lc().filterMap(r -> lookupMap.containsKey(r.name) ? Optional.of(lookupMap.get(r.name)) : Optional.empty(), strings()), "BOOM", "POW" ); } @Test public void testFilter() throws Exception { assertCollectionOf(lc().filter(r -> r.key == 21), rec(21, "Char Lotte", 103L)); } @Test public void testIncrement() throws Exception { lc().increment("hello", "world"); long value = MemPipeline.getCounters().findCounter("hello", "world").getValue(); assertEquals(5L, value); } @Test public void testIncrementIf() throws Exception { lc().incrementIf("hello", "conditional_world", r -> r.key < 25); long value = MemPipeline.getCounters().findCounter("hello", "conditional_world").getValue(); assertEquals(2L, value); } @Test public void testBy() throws Exception { assertCollectionOf( lc().filter(r -> r.key == 21).by(r -> r.key, ints()), Pair.of(21, rec(21, "Char Lotte", 103L))); } @Test public void testCount() throws Exception { assertCollectionOf( Lambda.wrap(MemPipeline.typedCollectionOf(strings(), "a", "a", "a", "b", "b")).count(), Pair.of("a", 3L), Pair.of("b", 2L) ); } }
2,389
0
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch/lambda/LGroupedTableTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.crunch.Pair; import org.apache.crunch.fn.Aggregators; import org.apache.crunch.impl.mem.MemPipeline; import org.junit.Test; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import static org.apache.crunch.lambda.TestCommon.assertCollectionOf; import static org.apache.crunch.types.avro.Avros.*; public class LGroupedTableTest { LGroupedTable<String, Integer> lgt = Lambda.wrap(MemPipeline.typedTableOf(tableOf(strings(), ints()), "a", 2, "a", 3, "b", 5, "c", 7, "c", 11, "c", 13, "c", 13)) .groupByKey(); @Test public void testCombineValues() throws Exception { assertCollectionOf(lgt.combineValues(Aggregators.MAX_INTS()), Pair.of("a", 3), Pair.of("b", 5), Pair.of("c", 13)); } @Test public void testCombineValues1() throws Exception { assertCollectionOf(lgt.combineValues(() -> Integer.MIN_VALUE, Integer::max, Collections::singleton), Pair.of("a", 3), Pair.of("b", 5), Pair.of("c", 13)); } @Test public void testMapValues() throws Exception { assertCollectionOf(lgt.mapValues(vs -> vs.map(i -> i.toString()).reduce((a, b) -> a + "," + b).get(), strings()), Pair.of("a", "2,3"), Pair.of("b", "5"), Pair.of("c", "7,11,13,13")); } @Test public void testCollectValues() throws Exception { assertCollectionOf(lgt.collectValues(ArrayList::new, Collection::add, collections(ints())), Pair.of("a", ImmutableList.of(2,3)), Pair.of("b", ImmutableList.of(5)), Pair.of("c", ImmutableList.of(7, 11, 13, 13))); } @Test public void testCollectAllValues() throws Exception { assertCollectionOf(lgt.collectAllValues(), Pair.of("a", ImmutableList.of(2,3)), Pair.of("b", ImmutableList.of(5)), Pair.of("c", ImmutableList.of(7, 11, 13, 13))); } @Test public void testCollectUniqueValues() throws Exception { assertCollectionOf(lgt.collectUniqueValues(), Pair.of("a", ImmutableSet.of(2, 3)), Pair.of("b", ImmutableSet.of(5)), Pair.of("c", ImmutableSet.of(7, 11, 13))); } @Test public void testReduceValues() throws Exception { assertCollectionOf(lgt.reduceValues((a, b) -> a * b), Pair.of("a", 6), Pair.of("b", 5), Pair.of("c", 7 * 11 * 13 * 13) ); } }
2,390
0
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch/lambda/LTableTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import com.google.common.collect.ImmutableList; import org.apache.crunch.Pair; import org.apache.crunch.impl.mem.MemPipeline; import org.junit.Test; import static org.apache.crunch.lambda.TestCommon.assertCollectionOf; import static org.apache.crunch.types.avro.Avros.*; public class LTableTest { private LTable<String, Integer> lt1 = Lambda.wrap(MemPipeline.typedTableOf(tableOf(strings(), ints()), "a", 2, "a", 3, "b", 5, "c", 7, "c", 11, "c", 13, "c", 13)); private LTable<String, Long> lt2 = Lambda.wrap(MemPipeline.typedTableOf(tableOf(strings(), longs()), "a", 101L, "b", 102L, "c", 103L )); @Test public void testKeys() throws Exception { assertCollectionOf(lt1.keys(), "a", "a", "b", "c", "c", "c", "c"); } @Test public void testValues() throws Exception { assertCollectionOf(lt2.values(), 101L, 102L, 103L); } @Test public void testMapKeys() throws Exception { assertCollectionOf(lt2.mapKeys(String::toUpperCase, strings()), Pair.of("A", 101L), Pair.of("B", 102L), Pair.of("C", 103L) ); } @Test public void testMapValues() throws Exception { assertCollectionOf(lt2.mapValues(v -> v * 2, longs()), Pair.of("a", 202L), Pair.of("b", 204L), Pair.of("c", 206L) ); } @Test public void testJoin() throws Exception { assertCollectionOf(lt1.join(lt2).values(), Pair.of(2, 101L), Pair.of(3, 101L), Pair.of(5, 102L), Pair.of(7, 103L), Pair.of(11, 103L), Pair.of(13, 103L), Pair.of(13, 103L)); } @Test public void testCogroup() throws Exception { assertCollectionOf(lt1.cogroup(lt2).values(), Pair.of(ImmutableList.of(2, 3), ImmutableList.of(101L)), Pair.of(ImmutableList.of(5), ImmutableList.of(102L)), Pair.of(ImmutableList.of(7, 11, 13, 13), ImmutableList.of(103L)) ); } }
2,391
0
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch/lambda/TestCommon.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import com.google.common.collect.Sets; import java.util.Set; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; public class TestCommon { @SafeVarargs public static <T> void assertCollectionOf(LCollection<T> actual, T... expected) { Set<T> actualSet = actual.materialize().collect(Collectors.toSet()); Set<T> expectedSet = Sets.newHashSet(expected); assertEquals(expectedSet, actualSet); } }
2,392
0
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/test/java/org/apache/crunch/lambda/TypedRecord.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; public class TypedRecord { public int key; public String name; public long value; public static TypedRecord rec(int key, String name, long value) { TypedRecord record = new TypedRecord(); record.key = key; record.name = name; record.value = value; return record; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TypedRecord that = (TypedRecord) o; if (key != that.key) return false; if (value != that.value) return false; return name != null ? name.equals(that.name) : that.name == null; } @Override public int hashCode() { int result = key; result = 31 * result + (name != null ? name.hashCode() : 0); result = 31 * result + (int) (value ^ (value >>> 32)); return result; } }
2,393
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/LDoFnWrapper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.DoFn; import org.apache.crunch.Emitter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskInputOutputContext; class LDoFnWrapper<S, T> extends DoFn<S, T> { private final LDoFn<S, T> fn; private transient Context<S, T> ctxt; public LDoFnWrapper(LDoFn<S, T> fn) { this.fn = fn; } @Override public void initialize() { super.initialize(); if (getContext() == null) { this.ctxt = new Context<>(getConfiguration()); } else { this.ctxt = new Context<>(getContext()); } } @Override public void process(S input, Emitter<T> emitter) { fn.process(ctxt.update(input, emitter)); } static class Context<S, T> implements LDoFnContext<S, T> { private S element; private Emitter<T> emitter; private TaskInputOutputContext context; private Configuration conf; public Context(TaskInputOutputContext context) { this.context = context; this.conf = context.getConfiguration(); } public Context(Configuration conf) { this.context = null; this.conf = conf; } public Context<S, T> update(S element, Emitter<T> emitter) { this.element = element; this.emitter = emitter; return this; } public S element() { return element; } public void emit(T t) { emitter.emit(t); } public TaskInputOutputContext getContext() { return context; } public Configuration getConfiguration() { return conf; } public void increment(String groupName, String counterName) { increment(groupName, counterName, 1); } public void increment(String groupName, String counterName, long value) { if (context != null) { context.getCounter(groupName, counterName).increment(value); } } public void increment(Enum<?> counterName) { increment(counterName, 1); } public void increment(Enum<?> counterName, long value) { if (context != null) { context.getCounter(counterName).increment(value); } } } }
2,394
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/Lambda.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.PCollection; import org.apache.crunch.PGroupedTable; import org.apache.crunch.PTable; /** * Entry point for the crunch-lambda API. Use this to create {@link LCollection}, {@link LTable} and * {@link LGroupedTable} objects from their corresponding {@link PCollection}, {@link PTable} and {@link PGroupedTable} * types. * * <p>The crunch-lambda API allows you to write Crunch pipelines using lambda expressions and method references instead * of creating classes (anonymous, inner, or top level) for each operation that needs to be completed. Many pipelines * are composed of a large number of simple operations, rather than a small number of complex operations, making this * strategy much more efficient to code and easy to read for those able to use Java 8 in their distributed computation * environments.</p> * * <p>You use the API by wrapping your Crunch type into an L-type object. This class provides static methods for that. * You can then use the lambda API methods on the L-type object, yielding more L-type objects. If at any point you need * to go back to the standard Crunch world (for compatibility with existing code or complex use cases), you can at any * time call underlying() on an L-type object to get a Crunch object</p> * * <p>Example (the obligatory wordcount):</p> * * <pre>{@code * Pipeline pipeline = new MRPipeline(getClass()); * LCollection<String> inputText = Lambda.wrap(pipeline.readTextFile("/path/to/input/file")); * inputText.flatMap(line -> Arrays.stream(line.split(" ")), Writables.strings()) * .count() * .map(wordCountPair -> wordCountPair.first() + ": " + wordCountPair.second(), strings()) * .write(To.textFile("/path/to/output/file")); * pipeline.run(); * }</pre> * */ public class Lambda { private static LCollectionFactory INSTANCE = new LCollectionFactoryImpl(); public static <S> LCollection<S> wrap(PCollection<S> collection) { return INSTANCE.wrap(collection); } public static <K, V> LTable<K, V> wrap(PTable<K, V> collection) { return INSTANCE.wrap(collection); } public static <K, V> LGroupedTable<K, V> wrap(PGroupedTable<K, V> collection) { return INSTANCE.wrap(collection); } }
2,395
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/LCollection.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.*; import org.apache.crunch.fn.Aggregators; import org.apache.crunch.lambda.fn.SFunction; import org.apache.crunch.lambda.fn.SPredicate; import org.apache.crunch.types.PTableType; import org.apache.crunch.types.PType; import org.apache.crunch.types.PTypeFamily; import java.util.Optional; import java.util.stream.Stream; import java.util.stream.StreamSupport; /** * Java 8 friendly version of the {@link PCollection} interface, allowing distributed operations to be expressed in * terms of lambda expressions and method references, instead of creating a new class implementation for each operation. * @param <S> The type of the elements in this collection */ public interface LCollection<S> { /** * Get the underlying {@link PCollection} for this LCollection */ PCollection<S> underlying(); /** * Get the {@link LCollectionFactory} which can be used to create new Ltype instances */ LCollectionFactory factory(); /** * Transform this LCollection using a standard Crunch {@link DoFn} */ default <T> LCollection<T> parallelDo(DoFn<S, T> fn, PType<T> pType) { return factory().wrap(underlying().parallelDo(fn, pType)); } /** * Transform this LCollection to an {@link LTable} using a standard Crunch {@link DoFn} */ default <K, V> LTable<K, V> parallelDo(DoFn<S, Pair<K, V>> fn, PTableType<K, V> pType) { return factory().wrap(underlying().parallelDo(fn, pType)); } /** * Transform this LCollection using a Lambda-friendly {@link LDoFn}. */ default <T> LCollection<T> parallelDo(LDoFn<S, T> fn, PType<T> pType) { return parallelDo(new LDoFnWrapper<>(fn), pType); } /** * Transform this LCollection using a Lambda-friendly {@link LDoFn}. */ default <K, V> LTable<K, V> parallelDo(LDoFn<S, Pair<K, V>> fn, PTableType<K, V> pType) { return parallelDo(new LDoFnWrapper<>(fn), pType); } /** * Map the elements of this collection 1-1 through the supplied function. */ default <T> LCollection<T> map(SFunction<S, T> fn, PType<T> pType) { return parallelDo(ctx -> ctx.emit(fn.apply(ctx.element())), pType); } /** * Map the elements of this collection 1-1 through the supplied function to yield an {@link LTable} */ default <K, V> LTable<K, V> map(SFunction<S, Pair<K, V>> fn, PTableType<K, V> pType) { return parallelDo(ctx -> ctx.emit(fn.apply(ctx.element())), pType); } /** * Map each element to zero or more output elements using the provided stream-returning function. */ default <T> LCollection<T> flatMap(SFunction<S, Stream<T>> fn, PType<T> pType) { return parallelDo(ctx -> fn.apply(ctx.element()).forEach(ctx::emit), pType); } /** * Map each element to zero or more output elements using the provided stream-returning function to yield an * {@link LTable} */ default <K, V> LTable<K, V> flatMap(SFunction<S, Stream<Pair<K, V>>> fn, PTableType<K, V> pType) { return parallelDo(ctx -> fn.apply(ctx.element()).forEach(ctx::emit), pType); } /** * Combination of a filter and map operation by using a function with {@link Optional} return type. */ default <T> LCollection<T> filterMap(SFunction<S, Optional<T>> fn, PType<T> pType) { return parallelDo(ctx -> fn.apply(ctx.element()).ifPresent(ctx::emit), pType); } /** * Combination of a filter and map operation by using a function with {@link Optional} return type. */ default <K, V> LTable<K, V> filterMap(SFunction<S, Optional<Pair<K, V>>> fn, PTableType<K, V> pType) { return parallelDo(ctx -> fn.apply(ctx.element()).ifPresent(ctx::emit), pType); } /** * Filter the collection using the supplied predicate. */ default LCollection<S> filter(SPredicate<S> predicate) { return parallelDo(ctx -> { if (predicate.test(ctx.element())) ctx.emit(ctx.element());}, pType()); } /** * Union this LCollection with another LCollection of the same type */ default LCollection<S> union(LCollection<S> other) { return factory().wrap(underlying().union(other.underlying())); } /** * Union this LCollection with a {@link PCollection} of the same type */ default LCollection<S> union(PCollection<S> other) { return factory().wrap(underlying().union(other)); } /** * Increment a counter for every element in the collection */ default LCollection<S> increment(Enum<?> counter) { return parallelDo(ctx -> { ctx.increment(counter); ctx.emit(ctx.element()); }, pType()); } /** * Increment a counter for every element in the collection */ default LCollection<S> increment(String counterGroup, String counterName) { return parallelDo(ctx -> { ctx.increment(counterGroup, counterName); ctx.emit(ctx.element()); }, pType()); } /** * Increment a counter for every element satisfying the conditional predicate supplied. */ default LCollection<S> incrementIf(Enum<?> counter, SPredicate<S> condition) { return parallelDo(ctx -> { if (condition.test(ctx.element())) ctx.increment(counter); ctx.emit(ctx.element()); }, pType()); } /** * Increment a counter for every element satisfying the conditional predicate supplied. */ default LCollection<S> incrementIf(String counterGroup, String counterName, SPredicate<S> condition) { return parallelDo(ctx -> { if (condition.test(ctx.element())) ctx.increment(counterGroup, counterName); ctx.emit(ctx.element()); }, pType()); } /** * Cache the underlying {@link PCollection} */ default LCollection<S> cache() { underlying().cache(); return this; } /** * Cache the underlying {@link PCollection} */ default LCollection<S> cache(CachingOptions options) { underlying().cache(options); return this; } /** * Key this LCollection by a key extracted from the element to yield a {@link LTable} mapping the key to the whole * element. */ default <K> LTable<K, S> by(SFunction<S, K> extractFn, PType<K> pType) { return parallelDo( ctx -> ctx.emit(Pair.of(extractFn.apply(ctx.element()), ctx.element())), ptf().tableOf(pType, pType())); } /** * Count distict values in this LCollection, yielding an {@link LTable} mapping each value to the number * of occurrences in the collection. */ default LTable<S, Long> count() { return map(a -> Pair.of(a, 1L), ptf().tableOf(pType(), ptf().longs())) .groupByKey() .combineValues(Aggregators.SUM_LONGS()); } /** * Obtain the contents of this LCollection as a {@link Stream} that can be processed locally. Note, this may trigger * your job to execute in a distributed environment if the pipeline has not yet been run. */ default Stream<S> materialize() { return StreamSupport.stream(underlying().materialize().spliterator(), false); } /** * Get the {@link PTypeFamily} representing how elements of this collection may be serialized. */ default PTypeFamily ptf() { return underlying().getPType().getFamily(); } /** * Get the {@link PType} representing how elements of this collection may be serialized. */ default PType<S> pType() { return underlying().getPType(); } /** * Write this collection to the specified {@link Target} */ default LCollection<S> write(Target target) { underlying().write(target); return this; } /** * Write this collection to the specified {@link Target} with the given {@link org.apache.crunch.Target.WriteMode} */ default LCollection<S> write(Target target, Target.WriteMode writeMode) { underlying().write(target, writeMode); return this; } }
2,396
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/LAggregator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.fn.Aggregators; import org.apache.crunch.lambda.fn.SBiFunction; import org.apache.crunch.lambda.fn.SFunction; import org.apache.crunch.lambda.fn.SSupplier; /** * Crunch Aggregator expressed as a composition of functional interface implementations * @param <V> Type of values to be aggregated * @param <A> Type of object which stores objects as they are being aggregated */ public class LAggregator<V, A> extends Aggregators.SimpleAggregator<V> { private final SSupplier<A> initialSupplier; private final SBiFunction<A, V, A> combineFn; private final SFunction<A, Iterable<V>> outputFn; private A a; public LAggregator(SSupplier<A> initialSupplier, SBiFunction<A, V, A> combineFn, SFunction<A, Iterable<V>> outputFn) { this.initialSupplier = initialSupplier; this.combineFn = combineFn; this.outputFn = outputFn; } @Override public void reset() { a = initialSupplier.get(); } @Override public void update(V v) { a = combineFn.apply(a, v); } @Override public Iterable<V> results() { return outputFn.apply(a); } }
2,397
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/LCollectionFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.PCollection; import org.apache.crunch.PGroupedTable; import org.apache.crunch.PTable; /** * Factory for creating {@link LCollection}, {@link LTable} and {@link LGroupedTable} objects from their corresponding * {@link PCollection}, {@link PTable} and {@link PGroupedTable} types. You probably don't want to use or implement this * interface directly. You should start with the {@link Lambda} class instead. */ public interface LCollectionFactory { /** * Wrap a PCollection into an LCollection */ <S> LCollection<S> wrap(PCollection<S> collection); /** * Wrap a PTable into an LTable */ <K, V> LTable<K, V> wrap(PTable<K, V> collection); /** * Wrap a PGroupedTable into an LGroupedTable */ <K, V> LGroupedTable<K, V> wrap(PGroupedTable<K, V> collection); }
2,398
0
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch
Create_ds/crunch/crunch-lambda/src/main/java/org/apache/crunch/lambda/LGroupedTable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.lambda; import org.apache.crunch.Aggregator; import org.apache.crunch.PGroupedTable; import org.apache.crunch.Pair; import org.apache.crunch.lambda.fn.SBiConsumer; import org.apache.crunch.lambda.fn.SBiFunction; import org.apache.crunch.lambda.fn.SBinaryOperator; import org.apache.crunch.lambda.fn.SFunction; import org.apache.crunch.lambda.fn.SSupplier; import org.apache.crunch.types.PType; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.stream.Stream; import java.util.stream.StreamSupport; /** * Java 8 friendly version of the {@link PGroupedTable} interface, allowing distributed operations to be expressed in * terms of lambda expressions and method references, instead of creating a new class implementation for each operation. * @param <K> key type for this table * @param <V> value type for this table */ public interface LGroupedTable<K, V> extends LCollection<Pair<K, Iterable<V>>> { /** * Get the underlying {@link PGroupedTable} for this LGroupedTable */ PGroupedTable<K, V> underlying(); /** * Combine the value part of the table using the provided Crunch {@link Aggregator}. This will be optimised into * both a combine and reduce in the MapReduce implementation, with similar optimisations available for other * implementations. */ default LTable<K, V> combineValues(Aggregator<V> aggregator) { return factory().wrap(underlying().combineValues(aggregator)); } /** * Combine the value part of the table using the given functions. The supplier is used to create a new aggregating * type, the combineFn adds a value into the aggregate, and the output function transforms the aggregate into * an iterable of the original value type. For example, summation can be expressed as follows: * * <pre>{@code myGroupedTable.combineValues(() -> 0, (sum, value) -> sum + value, Collections::singleton) }</pre> * * <p>This will be optimised into both a combine and reduce in the MapReduce implementation, with similar * optimizations *available for other implementations.</p> */ default <A> LTable<K, V> combineValues( SSupplier<A> initialSupplier, SBiFunction<A, V, A> combineFn, SFunction<A, Iterable<V>> outputFn) { return combineValues(new LAggregator<>(initialSupplier, combineFn, outputFn)); } /** * Map the values in this LGroupedTable using a custom function. This function operates over a stream which can * be consumed only once. * * <p>Note that in serialization systems which heavily reuse objects (such as Avro), you may * in fact get given the same object multiple times with different data as you consume the stream, meaning it may * be necessary to detach values.</p> */ default <T> LTable<K, T> mapValues(SFunction<Stream<V>, T> fn, PType<T> pType) { return parallelDo( ctx -> ctx.emit(Pair.of( ctx.element().first(), fn.apply(StreamSupport.stream(ctx.element().second().spliterator(), false))) ), ptf().tableOf(keyType(), pType)); } /** * Collect the values into an aggregate type. This differs from combineValues in that it outputs the aggregate type * rather than the value type, and is designed to happen in one step (rather than being optimised into multiple * levels). This makes it much more suitable for assembling collections than computing simple numeric aggregates. * * <p>The supplier provides an "empty" object, then the consumer is called with each value. For example, to collect * all values into a {@link Collection}, one can do this:</p> * <pre>{@code * lgt.collectValues(ArrayList::new, Collection::add, lgt.ptf().collections(lgt.valueType())) * }</pre> * * <p>This is in fact the default implementation for the collectAllValues() method.</p> * * <p>Note that in serialization systems which heavily reuse objects (such as Avro), you may * in fact get given the same object multiple times with different data as you consume the stream, meaning it may * be necessary to detach values.</p> */ default <C> LTable<K, C> collectValues(SSupplier<C> emptySupplier, SBiConsumer<C, V> addFn, PType<C> pType) { return parallelDo(ctx -> { C coll = emptySupplier.get(); ctx.element().second().forEach(v -> addFn.accept(coll, v)); ctx.emit(Pair.of(ctx.element().first(), coll)); }, ptf().tableOf(keyType(), pType)); } /** * Collect all values for each key into a {@link Collection} */ default LTable<K, Collection<V>> collectAllValues() { return collectValues(ArrayList::new, Collection::add, ptf().collections(valueType())); } /** * Collect all unique values for each key into a {@link Collection} (note that the value type must have a correctly- * defined equals() and hashcode(). */ default LTable<K, Collection<V>> collectUniqueValues() { return collectValues(HashSet::new, Collection::add, ptf().collections(valueType())); } /** * Reduce the values for each key using the an associative binary operator. * For example {@code reduceValues((a, b) -> a + b)} for summation, {@code reduceValues((a, b) -> a + ", " + b} * for comma-separated string concatenation and {@code reduceValues((a, b) -> a > b ? a : b} for maximum value. */ default LTable<K, V> reduceValues(SBinaryOperator<V> operator) { return combineValues(() -> (V)null, (a, b) -> a == null ? b : operator.apply(a, b), Collections::singleton); } /** * Ungroup this LGroupedTable back into an {@link LTable}. This will still trigger a "reduce" operation, so is * usually only used in special cases like producing a globally-ordered list by feeding the everything through * a single reducers. */ default LTable<K, V> ungroup() { return factory().wrap(underlying().ungroup()); } /** * Get a {@link PType} which can be used to serialize the key part of this grouped table */ default PType<K> keyType() { return underlying().getGroupedTableType().getTableType().getKeyType(); } /** * Get a {@link PType} which can be used to serialize the value part of this grouped table */ default PType<V> valueType() { return underlying().getGroupedTableType().getTableType().getValueType(); } }
2,399