index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/SourceTargetHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
/**
* Functions for configuring the inputs/outputs of MapReduce jobs.
*
*/
public class SourceTargetHelper {
public static long getPathSize(Configuration conf, Path path) throws IOException {
return getPathSize(path.getFileSystem(conf), path);
}
public static long getPathSize(FileSystem fs, Path path) throws IOException {
FileStatus[] stati = fs.globStatus(path);
if (stati == null || stati.length == 0) {
return -1L;
}
long size = 0;
for (FileStatus status : stati) {
if (status.isDir()) {
for (RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(status.getPath(), true); iterator.hasNext();) {
size += iterator.next().getLen();
}
} else {
size += status.getLen();
}
}
return size;
}
public static long getLastModifiedAt(FileSystem fs, Path path) throws IOException {
FileStatus[] stati = fs.globStatus(path);
if (stati == null || stati.length == 0) {
return -1L;
}
long lastMod = -1;
for (FileStatus status : stati) {
if (lastMod < status.getModificationTime()) {
lastMod = status.getModificationTime();
}
}
return lastMod;
}
}
| 2,800 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/ReadableSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.crunch.SourceTarget;
/**
* An interface that indicates that a {@code SourceTarget} instance can be read
* into the local client.
*
* @param <T>
* The type of data read.
*/
public interface ReadableSourceTarget<T> extends ReadableSource<T>, SourceTarget<T> {
}
| 2,801 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/To.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.crunch.Target;
import org.apache.crunch.io.avro.AvroFileTarget;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.io.seq.SeqFileTarget;
import org.apache.crunch.io.text.TextFileTarget;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* <p>Static factory methods for creating common {@link Target} types.</p>
*
* <p>The {@code To} class is intended to be used as part of a literate API
* for writing the output of Crunch pipelines to common file types. We can use
* the {@code Target} objects created by the factory methods in the {@code To}
* class with either the {@code write} method on the {@code Pipeline} class or
* the convenience {@code write} method on {@code PCollection} and {@code PTable}
* instances.
*
* <pre>
* {@code
*
* Pipeline pipeline = new MRPipeline(this.getClass());
* ...
* // Write a PCollection<String> to a text file:
* PCollection<String> words = ...;
* pipeline.write(words, To.textFile("/put/my/words/here"));
*
* // Write a PTable<Text, Text> to a sequence file:
* PTable<Text, Text> textToText = ...;
* textToText.write(To.sequenceFile("/words/to/words"));
*
* // Write a PCollection<MyAvroObject> to an Avro data file:
* PCollection<MyAvroObject> objects = ...;
* objects.write(To.avroFile("/my/avro/files"));
*
* // Write a PTable to a custom FileOutputFormat:
* PTable<KeyWritable, ValueWritable> custom = ...;
* pipeline.write(custom, To.formattedFile("/custom", MyFileFormat.class));
* }
* </pre>
*/
public class To {
/**
* Creates a {@code Target} at the given path name that writes data to
* a custom {@code FileOutputFormat}.
*
* @param pathName The name of the path to write the data to on the filesystem
* @param formatClass The {@code FileOutputFormat<K, V>} to write the data to
* @return A new {@code Target} instance
*/
public static <K extends Writable, V extends Writable> Target formattedFile(
String pathName, Class<? extends FileOutputFormat<K, V>> formatClass) {
return formattedFile(new Path(pathName), formatClass);
}
/**
* Creates a {@code Target} at the given {@code Path} that writes data to
* a custom {@code FileOutputFormat}.
*
* @param path The {@code Path} to write the data to
* @param formatClass The {@code FileOutputFormat} to write the data to
* @return A new {@code Target} instance
*/
public static <K extends Writable, V extends Writable> Target formattedFile(
Path path, Class<? extends FileOutputFormat<K, V>> formatClass) {
return new FileTargetImpl(path, formatClass, SequentialFileNamingScheme.getInstance());
}
/**
* Creates a {@code Target} at the given path name that writes data to
* Avro files. The {@code PType} for the written data must be for Avro records.
*
* @param pathName The name of the path to write the data to on the filesystem
* @return A new {@code Target} instance
*/
public static Target avroFile(String pathName) {
return avroFile(new Path(pathName));
}
/**
* Creates a {@code Target} at the given {@code Path} that writes data to
* Avro files. The {@code PType} for the written data must be for Avro records.
*
* @param path The {@code Path} to write the data to
* @return A new {@code Target} instance
*/
public static Target avroFile(Path path) {
return new AvroFileTarget(path);
}
/**
* Creates a {@code Target} at the given path name that writes data to
* SequenceFiles.
*
* @param pathName The name of the path to write the data to on the filesystem
* @return A new {@code Target} instance
*/
public static Target sequenceFile(String pathName) {
return sequenceFile(new Path(pathName));
}
/**
* Creates a {@code Target} at the given {@code Path} that writes data to
* SequenceFiles.
*
* @param path The {@code Path} to write the data to
* @return A new {@code Target} instance
*/
public static Target sequenceFile(Path path) {
return new SeqFileTarget(path);
}
/**
* Creates a {@code Target} at the given path name that writes data to
* text files.
*
* @param pathName The name of the path to write the data to on the filesystem
* @return A new {@code Target} instance
*/
public static Target textFile(String pathName) {
return textFile(new Path(pathName));
}
/**
* Creates a {@code Target} at the given {@code Path} that writes data to
* text files.
*
* @param path The {@code Path} to write the data to
* @return A new {@code Target} instance
*/
public static Target textFile(Path path) {
return new TextFileTarget(path);
}
}
| 2,802 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/FileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.util.Iterator;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public interface FileReaderFactory<T> {
Iterator<T> read(FileSystem fs, Path path);
}
| 2,803 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/Compress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.avro.file.DataFileConstants;
import org.apache.avro.mapred.AvroJob;
import org.apache.crunch.Target;
import org.apache.crunch.io.parquet.AvroParquetFileSourceTarget;
import org.apache.crunch.io.parquet.AvroParquetFileTarget;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
/**
* Helper functions for compressing output data.
*/
public class Compress {
/**
* Configure the given output target to be compressed using the given codec.
*/
public static <T extends Target> T compress(T target, Class<? extends CompressionCodec> codecClass) {
return (T) target.outputConf("mapred.output.compress", "true")
.outputConf("mapred.output.compression.codec", codecClass.getCanonicalName());
}
/**
* Configure the given output target to be compressed using Gzip.
*/
public static <T extends Target> T gzip(T target) {
return (T) compress(target, GzipCodec.class)
.outputConf(AvroJob.OUTPUT_CODEC, DataFileConstants.DEFLATE_CODEC);
}
/**
* Configure the given output target to be compressed using Snappy. If the Target is one of the AvroParquet targets
* contained in Crunch, the Parquet-specific SnappyCodec will be used instead of the default Hadoop one.
*/
public static <T extends Target> T snappy(T target) {
Class<? extends CompressionCodec> snappyCodec = org.apache.hadoop.io.compress.SnappyCodec.class;
if (target instanceof AvroParquetFileTarget || target instanceof AvroParquetFileSourceTarget) {
snappyCodec = org.apache.parquet.hadoop.codec.SnappyCodec.class;
}
return (T) compress(target, snappyCodec)
.outputConf(AvroJob.OUTPUT_CODEC, DataFileConstants.SNAPPY_CODEC);
}
}
| 2,804 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/PathTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* A target whose output goes to a given path on a file system.
*/
public interface PathTarget extends MapReduceTarget {
Path getPath();
/**
* Get the naming scheme to be used for outputs being written to an output
* path.
*
* @return the naming scheme to be used
*/
FileNamingScheme getFileNamingScheme();
/**
* Handles moving the output data for this target from a temporary location on the
* filesystem to its target path at the end of a MapReduce job.
*
* @param conf The job {@code Configuration}
* @param workingPath The temp directory that contains the output of the job
* @param index The index of this target for jobs that write multiple output files to a single directory
* @throws IOException
*/
void handleOutputs(Configuration conf, Path workingPath, int index) throws IOException;
}
| 2,805 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/CrunchInputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
/**
* Helper functions for configuring multiple {@code InputFormat} instances within a single
* Crunch MapReduce job.
*/
public class CrunchInputs {
public static final String CRUNCH_INPUTS = "crunch.inputs.dir";
private static final char RECORD_SEP = ',';
private static final char FIELD_SEP = ';';
private static final char PATH_SEP = '|';
private static final Joiner JOINER = Joiner.on(FIELD_SEP);
private static final Splitter SPLITTER = Splitter.on(FIELD_SEP);
public static void addInputPath(Job job, Path path, FormatBundle inputBundle, int nodeIndex) {
addInputPaths(job, Collections.singleton(path), inputBundle, nodeIndex);
}
public static void addInputPaths(Job job, Collection<Path> paths, FormatBundle inputBundle, int nodeIndex) {
Configuration conf = job.getConfiguration();
List<String> encodedPathStrs = Lists.newArrayListWithExpectedSize(paths.size());
for (Path path : paths) {
String pathStr = encodePath(path);
Preconditions.checkArgument(pathStr.indexOf(RECORD_SEP) == -1 && pathStr.indexOf(FIELD_SEP) == -1 && pathStr.indexOf(PATH_SEP) == -1);
encodedPathStrs.add(pathStr);
}
String inputs = JOINER.join(inputBundle.serialize(), String.valueOf(nodeIndex), Joiner.on(PATH_SEP).join(encodedPathStrs));
String existing = conf.get(CRUNCH_INPUTS);
conf.set(CRUNCH_INPUTS, existing == null ? inputs : existing + RECORD_SEP + inputs);
}
public static Map<FormatBundle, Map<Integer, List<Path>>> getFormatNodeMap(JobContext job) {
Map<FormatBundle, Map<Integer, List<Path>>> formatNodeMap = Maps.newHashMap();
Configuration conf = job.getConfiguration();
String crunchInputs = conf.get(CRUNCH_INPUTS);
if (crunchInputs == null || crunchInputs.isEmpty()) {
return ImmutableMap.of();
}
for (String input : Splitter.on(RECORD_SEP).split(crunchInputs)) {
List<String> fields = Lists.newArrayList(SPLITTER.split(input));
FormatBundle<InputFormat> inputBundle = FormatBundle.fromSerialized(fields.get(0), job.getConfiguration());
if (!formatNodeMap.containsKey(inputBundle)) {
formatNodeMap.put(inputBundle, Maps.<Integer, List<Path>>newHashMap());
}
Integer nodeIndex = Integer.valueOf(fields.get(1));
if (!formatNodeMap.get(inputBundle).containsKey(nodeIndex)) {
formatNodeMap.get(inputBundle).put(nodeIndex, Lists.<Path>newLinkedList());
}
List<Path> formatNodePaths = formatNodeMap.get(inputBundle).get(nodeIndex);
String encodedPaths = fields.get(2);
for (String encodedPath : Splitter.on(PATH_SEP).split(encodedPaths)) {
formatNodePaths.add(decodePath(encodedPath));
}
}
return formatNodeMap;
}
private static String encodePath(Path path) {
try {
return URLEncoder.encode(path.toString(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
private static Path decodePath(String encodedPath) {
try {
return new Path(URLDecoder.decode(encodedPath, "UTF-8"));
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
}
| 2,806 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/From.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.specific.SpecificRecord;
import org.apache.crunch.Pair;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.io.avro.AvroFileSource;
import org.apache.crunch.io.avro.AvroTableFileSource;
import org.apache.crunch.io.impl.FileTableSourceImpl;
import org.apache.crunch.io.seq.SeqFileSource;
import org.apache.crunch.io.seq.SeqFileTableSource;
import org.apache.crunch.io.text.TextFileSource;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
/**
* <p>Static factory methods for creating common {@link Source} types.</p>
*
* <p>The {@code From} class is intended to provide a literate API for creating
* Crunch pipelines from common input file types.
*
* <pre>
* {@code
*
* Pipeline pipeline = new MRPipeline(this.getClass());
*
* // Reference the lines of a text file by wrapping the TextInputFormat class.
* PCollection<String> lines = pipeline.read(From.textFile("/path/to/myfiles"));
*
* // Reference entries from a sequence file where the key is a LongWritable and the
* // value is a custom Writable class.
* PTable<LongWritable, MyWritable> table = pipeline.read(From.sequenceFile(
* "/path/to/seqfiles", LongWritable.class, MyWritable.class));
*
* // Reference the records from an Avro file, where MyAvroObject implements Avro's
* // SpecificRecord interface.
* PCollection<MyAvroObject> myObjects = pipeline.read(From.avroFile("/path/to/avrofiles",
* MyAvroObject.class));
*
* // References the key-value pairs from a custom extension of FileInputFormat:
* PTable<KeyWritable, ValueWritable> custom = pipeline.read(From.formattedFile(
* "/custom", MyFileInputFormat.class, KeyWritable.class, ValueWritable.class));
* }
* </pre>
*/
public class From {
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat<K, V>} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param pathName The name of the path to the data on the filesystem
* @param formatClass The {@code FileInputFormat} implementation
* @param keyClass The {@code Writable} to use for the key
* @param valueClass The {@code Writable} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> formattedFile(
String pathName, Class<? extends FileInputFormat<K, V>> formatClass,
Class<K> keyClass, Class<V> valueClass) {
return formattedFile(new Path(pathName), formatClass, keyClass, valueClass);
}
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat<K, V>} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param path The {@code Path} to the data
* @param formatClass The {@code FileInputFormat} implementation
* @param keyClass The {@code Writable} to use for the key
* @param valueClass The {@code Writable} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> formattedFile(
Path path, Class<? extends FileInputFormat<K, V>> formatClass,
Class<K> keyClass, Class<V> valueClass) {
return formattedFile(path, formatClass, Writables.writables(keyClass),
Writables.writables(valueClass));
}
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat<K, V>} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param paths A list of {@code Path}s to the data
* @param formatClass The {@code FileInputFormat} implementation
* @param keyClass The {@code Writable} to use for the key
* @param valueClass The {@code Writable} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> formattedFile(
List<Path> paths, Class<? extends FileInputFormat<K, V>> formatClass,
Class<K> keyClass, Class<V> valueClass) {
return formattedFile(paths, formatClass, Writables.writables(keyClass),
Writables.writables(valueClass));
}
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param pathName The name of the path to the data on the filesystem
* @param formatClass The {@code FileInputFormat} implementation
* @param keyType The {@code PType} to use for the key
* @param valueType The {@code PType} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K, V> TableSource<K, V> formattedFile(String pathName,
Class<? extends FileInputFormat<?, ?>> formatClass,
PType<K> keyType, PType<V> valueType) {
return formattedFile(new Path(pathName), formatClass, keyType, valueType);
}
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param path The {@code Path} to the data
* @param formatClass The {@code FileInputFormat} implementation
* @param keyType The {@code PType} to use for the key
* @param valueType The {@code PType} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K, V> TableSource<K, V> formattedFile(Path path,
Class<? extends FileInputFormat<?, ?>> formatClass,
PType<K> keyType, PType<V> valueType) {
PTableType<K, V> tableType = keyType.getFamily().tableOf(keyType, valueType);
return new FileTableSourceImpl<K, V>(path, tableType, formatClass);
}
/**
* Creates a {@code TableSource<K, V>} for reading data from files that have custom
* {@code FileInputFormat} implementations not covered by the provided {@code TableSource}
* and {@code Source} factory methods.
*
* @param paths A list of {@code Path}s to the data
* @param formatClass The {@code FileInputFormat} implementation
* @param keyType The {@code PType} to use for the key
* @param valueType The {@code PType} to use for the value
* @return A new {@code TableSource<K, V>} instance
*/
public static <K, V> TableSource<K, V> formattedFile(List<Path> paths,
Class<? extends FileInputFormat<?, ?>> formatClass,
PType<K> keyType, PType<V> valueType) {
PTableType<K, V> tableType = keyType.getFamily().tableOf(keyType, valueType);
return new FileTableSourceImpl<K, V>(paths, tableType, formatClass);
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param avroClass The subclass of {@code SpecificRecord} to use for the Avro file
* @return A new {@code Source<T>} instance
*/
public static <T extends SpecificRecord> Source<T> avroFile(String pathName, Class<T> avroClass) {
return avroFile(new Path(pathName), avroClass);
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param avroClass The subclass of {@code SpecificRecord} to use for the Avro file
* @return A new {@code Source<T>} instance
*/
public static <T extends SpecificRecord> Source<T> avroFile(Path path, Class<T> avroClass) {
return avroFile(path, Avros.specifics(avroClass));
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given {@code Path}s.
*
* @param paths A list of {@code Path}s to the data
* @param avroClass The subclass of {@code SpecificRecord} to use for the Avro file
* @return A new {@code Source<T>} instance
*/
public static <T extends SpecificRecord> Source<T> avroFile(List<Path> paths, Class<T> avroClass) {
return avroFile(paths, Avros.specifics(avroClass));
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code AvroType} for the Avro records
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> avroFile(String pathName, PType<T> ptype) {
return avroFile(new Path(pathName), ptype);
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param ptype The {@code AvroType} for the Avro records
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> avroFile(Path path, PType<T> ptype) {
return new AvroFileSource<T>(path, (AvroType<T>) ptype);
}
/**
* Creates a {@code Source<T>} instance from the Avro file(s) at the given {@code Path}s.
*
* @param paths A list of {@code Path}s to the data
* @param ptype The {@code PType} for the Avro records
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> avroFile(List<Path> paths, PType<T> ptype) {
return new AvroFileSource<T>(paths, (AvroType<T>) ptype);
}
/**
* Creates a {@code Source<GenericData.Record>} by reading the schema of the Avro file
* at the given path. If the path is a directory, the schema of a file in the directory
* will be used to determine the schema to use.
*
* @param pathName The name of the path to the data on the filesystem
* @return A new {@code Source<GenericData.Record>} instance
*/
public static Source<GenericData.Record> avroFile(String pathName) {
return avroFile(new Path(pathName));
}
/**
* Creates a {@code Source<GenericData.Record>} by reading the schema of the Avro file
* at the given path. If the path is a directory, the schema of a file in the directory
* will be used to determine the schema to use.
*
* @param path The path to the data on the filesystem
* @return A new {@code Source<GenericData.Record>} instance
*/
public static Source<GenericData.Record> avroFile(Path path) {
return avroFile(path, new Configuration());
}
/**
* Creates a {@code Source<GenericData.Record>} by reading the schema of the Avro file
* at the given paths. If the path is a directory, the schema of a file in the directory
* will be used to determine the schema to use.
*
* @param paths A list of paths to the data on the filesystem
* @return A new {@code Source<GenericData.Record>} instance
*/
public static Source<GenericData.Record> avroFile(List<Path> paths) {
return avroFile(paths, new Configuration());
}
/**
* Creates a {@code Source<GenericData.Record>} by reading the schema of the Avro file
* at the given path using the {@code FileSystem} information contained in the given
* {@code Configuration} instance. If the path is a directory, the schema of a file in
* the directory will be used to determine the schema to use.
*
* @param path The path to the data on the filesystem
* @param conf The configuration information
* @return A new {@code Source<GenericData.Record>} instance
*/
public static Source<GenericData.Record> avroFile(Path path, Configuration conf) {
return avroFile(path, Avros.generics(getSchemaFromPath(path, conf)));
}
/**
* Creates a {@code Source<GenericData.Record>} by reading the schema of the Avro file
* at the given paths using the {@code FileSystem} information contained in the given
* {@code Configuration} instance. If the first path is a directory, the schema of a file in
* the directory will be used to determine the schema to use.
*
* @param paths The path to the data on the filesystem
* @param conf The configuration information
* @return A new {@code Source<GenericData.Record>} instance
*/
public static Source<GenericData.Record> avroFile(List<Path> paths, Configuration conf) {
Preconditions.checkArgument(!paths.isEmpty(), "At least one path must be supplied");
return avroFile(paths, Avros.generics(getSchemaFromPath(paths.get(0), conf)));
}
/**
* Creates a {@code TableSource<K,V>} for reading an Avro key/value file at the given path.
*
* @param path The path to the data on the filesystem
* @param tableType Avro table type for deserializing the table data
* @return a new {@code TableSource<K,V>} instance for reading Avro key/value data
*/
public static <K, V> TableSource<K, V> avroTableFile(Path path, PTableType<K, V> tableType) {
return avroTableFile(ImmutableList.of(path), tableType);
}
/**
* Creates a {@code TableSource<K,V>} for reading an Avro key/value file at the given paths.
*
* @param paths list of paths to be read by the returned source
* @param tableType Avro table type for deserializing the table data
* @return a new {@code TableSource<K,V>} instance for reading Avro key/value data
*/
public static <K, V> TableSource<K, V> avroTableFile(List<Path> paths, PTableType<K, V> tableType) {
return new AvroTableFileSource<K, V>(paths, (AvroType<Pair<K,V>>)tableType);
}
static Schema getSchemaFromPath(Path path, Configuration conf) {
DataFileReader reader = null;
try {
FileSystem fs = path.getFileSystem(conf);
if (!fs.isFile(path)) {
PathFilter ignoreHidden = new PathFilter() {
@Override
public boolean accept(Path path) {
String name = path.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
FileStatus[] globStatus = fs.globStatus(path, ignoreHidden);
if (globStatus == null) {
throw new IllegalArgumentException("No valid files found in directory: " + path);
}
Path newPath = null;
for (FileStatus status : globStatus) {
if (status.isFile()) {
newPath = status.getPath();
break;
} else {
FileStatus[] listStatus = fs.listStatus(path, ignoreHidden);
if (listStatus != null && listStatus.length > 0) {
newPath = listStatus[0].getPath();
break;
}
}
}
if (newPath == null) {
throw new IllegalArgumentException("No valid files found in directory: " + path);
}
path = newPath;
}
reader = new DataFileReader(new FsInput(path, conf), new GenericDatumReader<GenericRecord>());
return reader.getSchema();
} catch (IOException e) {
throw new RuntimeException("Error reading schema from path: " + path, e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
// ignored
}
}
}
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given path name
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T extends Writable> Source<T> sequenceFile(String pathName, Class<T> valueClass) {
return sequenceFile(new Path(pathName), valueClass);
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given {@code Path}
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T extends Writable> Source<T> sequenceFile(Path path, Class<T> valueClass) {
return sequenceFile(path, Writables.writables(valueClass));
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given {@code Path}s
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param paths A list of {@code Path}s to the data
* @param valueClass The {@code Writable} type for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T extends Writable> Source<T> sequenceFile(List<Path> paths, Class<T> valueClass) {
return sequenceFile(paths, Writables.writables(valueClass));
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given path name
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code PType} for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> sequenceFile(String pathName, PType<T> ptype) {
return sequenceFile(new Path(pathName), ptype);
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given {@code Path}
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param path The {@code Path} to the data
* @param ptype The {@code PType} for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> sequenceFile(Path path, PType<T> ptype) {
return new SeqFileSource<T>(path, ptype);
}
/**
* Creates a {@code Source<T>} instance from the SequenceFile(s) at the given {@code Path}s
* from the value field of each key-value pair in the SequenceFile(s).
*
* @param paths A list of {@code Path}s to the data
* @param ptype The {@code PType} for the value of the SequenceFile entry
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> sequenceFile(List<Path> paths, PType<T> ptype) {
return new SeqFileSource<T>(paths, ptype);
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param keyClass The {@code Writable} subclass for the key of the SequenceFile entry
* @param valueClass The {@code Writable} subclass for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> sequenceFile(
String pathName, Class<K> keyClass, Class<V> valueClass) {
return sequenceFile(new Path(pathName), keyClass, valueClass);
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param keyClass The {@code Writable} subclass for the key of the SequenceFile entry
* @param valueClass The {@code Writable} subclass for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> sequenceFile(
Path path, Class<K> keyClass, Class<V> valueClass) {
return sequenceFile(path, Writables.writables(keyClass), Writables.writables(valueClass));
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given {@code Path}s.
*
* @param paths A list of {@code Path}s to the data
* @param keyClass The {@code Writable} subclass for the key of the SequenceFile entry
* @param valueClass The {@code Writable} subclass for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K extends Writable, V extends Writable> TableSource<K, V> sequenceFile(
List<Path> paths, Class<K> keyClass, Class<V> valueClass) {
return sequenceFile(paths, Writables.writables(keyClass), Writables.writables(valueClass));
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @param keyType The {@code PType} for the key of the SequenceFile entry
* @param valueType The {@code PType} for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K, V> TableSource<K, V> sequenceFile(String pathName, PType<K> keyType, PType<V> valueType) {
return sequenceFile(new Path(pathName), keyType, valueType);
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @param keyType The {@code PType} for the key of the SequenceFile entry
* @param valueType The {@code PType} for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K, V> TableSource<K, V> sequenceFile(Path path, PType<K> keyType, PType<V> valueType) {
PTypeFamily ptf = keyType.getFamily();
return new SeqFileTableSource<K, V>(path, ptf.tableOf(keyType, valueType));
}
/**
* Creates a {@code TableSource<K, V>} instance for the SequenceFile(s) at the given {@code Path}s.
*
* @param paths A list of {@code Path}s to the data
* @param keyType The {@code PType} for the key of the SequenceFile entry
* @param valueType The {@code PType} for the value of the SequenceFile entry
* @return A new {@code SourceTable<K, V>} instance
*/
public static <K, V> TableSource<K, V> sequenceFile(List<Path> paths, PType<K> keyType, PType<V> valueType) {
PTypeFamily ptf = keyType.getFamily();
return new SeqFileTableSource<K, V>(paths, ptf.tableOf(keyType, valueType));
}
/**
* Creates a {@code Source<String>} instance for the text file(s) at the given path name.
*
* @param pathName The name of the path to the data on the filesystem
* @return A new {@code Source<String>} instance
*/
public static Source<String> textFile(String pathName) {
return textFile(new Path(pathName));
}
/**
* Creates a {@code Source<String>} instance for the text file(s) at the given {@code Path}.
*
* @param path The {@code Path} to the data
* @return A new {@code Source<String>} instance
*/
public static Source<String> textFile(Path path) {
return textFile(path, Writables.strings());
}
/**
* Creates a {@code Source<String>} instance for the text file(s) at the given {@code Path}s.
*
* @param paths A list of {@code Path}s to the data
* @return A new {@code Source<String>} instance
*/
public static Source<String> textFile(List<Path> paths) {
return textFile(paths, Writables.strings());
}
/**
* Creates a {@code Source<T>} instance for the text file(s) at the given path name using
* the provided {@code PType<T>} to convert the input text.
*
* @param pathName The name of the path to the data on the filesystem
* @param ptype The {@code PType<T>} to use to process the input text
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> textFile(String pathName, PType<T> ptype) {
return textFile(new Path(pathName), ptype);
}
/**
* Creates a {@code Source<T>} instance for the text file(s) at the given {@code Path} using
* the provided {@code PType<T>} to convert the input text.
*
* @param path The {@code Path} to the data
* @param ptype The {@code PType<T>} to use to process the input text
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> textFile(Path path, PType<T> ptype) {
return new TextFileSource<T>(path, ptype);
}
/**
* Creates a {@code Source<T>} instance for the text file(s) at the given {@code Path}s using
* the provided {@code PType<T>} to convert the input text.
*
* @param paths A list of {@code Path}s to the data
* @param ptype The {@code PType<T>} to use to process the input text
* @return A new {@code Source<T>} instance
*/
public static <T> Source<T> textFile(List<Path> paths, PType<T> ptype) {
return new TextFileSource<T>(paths, ptype);
}
}
| 2,807 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/OutputHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import org.apache.crunch.Target;
import org.apache.crunch.types.PType;
public interface OutputHandler {
boolean configure(Target target, PType<?> ptype);
}
| 2,808 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/CrunchOutputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import com.google.common.collect.Sets;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* An analogue of {@link CrunchInputs} for handling multiple {@code OutputFormat} instances
* writing to multiple files within a single MapReduce job.
*/
public class CrunchOutputs<K, V> {
public static final String CRUNCH_OUTPUTS = "crunch.outputs.dir";
public static final String CRUNCH_DISABLE_OUTPUT_COUNTERS = "crunch.disable.output.counters";
private static final char RECORD_SEP = ',';
private static final char FIELD_SEP = ';';
private static final Joiner JOINER = Joiner.on(FIELD_SEP);
private static final Splitter SPLITTER = Splitter.on(FIELD_SEP);
public static void addNamedOutput(Job job, String name,
Class<? extends OutputFormat> outputFormatClass,
Class keyClass, Class valueClass) {
addNamedOutput(job, name, FormatBundle.forOutput(outputFormatClass), keyClass, valueClass);
}
public static void addNamedOutput(Job job, String name,
FormatBundle<? extends OutputFormat> outputBundle,
Class keyClass, Class valueClass) {
Configuration conf = job.getConfiguration();
String inputs = JOINER.join(name, outputBundle.serialize(), keyClass.getName(), valueClass.getName());
String existing = conf.get(CRUNCH_OUTPUTS);
conf.set(CRUNCH_OUTPUTS, existing == null ? inputs : existing + RECORD_SEP + inputs);
}
public static void checkOutputSpecs(JobContext jc) throws IOException, InterruptedException {
Map<String, OutputConfig> outputs = getNamedOutputs(jc.getConfiguration());
for (Map.Entry<String, OutputConfig> e : outputs.entrySet()) {
String namedOutput = e.getKey();
Job job = getJob(jc.getJobID(), e.getKey(), jc.getConfiguration());
OutputFormat fmt = getOutputFormat(namedOutput, job, e.getValue());
fmt.checkOutputSpecs(job);
}
}
public static OutputCommitter getOutputCommitter(TaskAttemptContext tac) throws IOException, InterruptedException {
Map<String, OutputConfig> outputs = getNamedOutputs(tac.getConfiguration());
Map<String, OutputCommitter> committers = Maps.newHashMap();
for (Map.Entry<String, OutputConfig> e : outputs.entrySet()) {
String namedOutput = e.getKey();
Job job = getJob(tac.getJobID(), e.getKey(), tac.getConfiguration());
OutputFormat fmt = getOutputFormat(namedOutput, job, e.getValue());
TaskAttemptContext taskContext = getTaskContext(tac, job);
OutputCommitter oc = fmt.getOutputCommitter(taskContext);
committers.put(namedOutput, oc);
}
return new CompositeOutputCommitter(outputs, committers);
}
public static class OutputConfig<K, V> {
public FormatBundle<OutputFormat<K, V>> bundle;
public Class<K> keyClass;
public Class<V> valueClass;
public OutputConfig(FormatBundle<OutputFormat<K, V>> bundle,
Class<K> keyClass, Class<V> valueClass) {
this.bundle = bundle;
this.keyClass = keyClass;
this.valueClass = valueClass;
}
}
private static Map<String, OutputConfig> getNamedOutputs(
TaskInputOutputContext<?, ?, ?, ?> context) {
return getNamedOutputs(context.getConfiguration());
}
public static Map<String, OutputConfig> getNamedOutputs(Configuration conf) {
Map<String, OutputConfig> out = Maps.newHashMap();
String serOut = conf.get(CRUNCH_OUTPUTS);
if (serOut == null || serOut.isEmpty()) {
return out;
}
for (String input : Splitter.on(RECORD_SEP).split(conf.get(CRUNCH_OUTPUTS))) {
List<String> fields = Lists.newArrayList(SPLITTER.split(input));
String name = fields.get(0);
FormatBundle<OutputFormat> bundle = FormatBundle.fromSerialized(fields.get(1), conf);
try {
Class<?> keyClass = Class.forName(fields.get(2));
Class<?> valueClass = Class.forName(fields.get(3));
out.put(name, new OutputConfig(bundle, keyClass, valueClass));
} catch (ClassNotFoundException e) {
throw new CrunchRuntimeException(e);
}
}
return out;
}
private static final String BASE_OUTPUT_NAME = "mapreduce.output.basename";
private static final String COUNTERS_GROUP = CrunchOutputs.class.getName();
private TaskInputOutputContext<?, ?, K, V> baseContext;
private Configuration baseConf;
private final Map<String, OutputConfig> namedOutputs;
private final Map<String, OutputState<K, V>> outputStates;
private final boolean disableOutputCounters;
/**
* Creates and initializes multiple outputs support,
* it should be instantiated in the Mapper/Reducer setup method.
*
* @param context the TaskInputOutputContext object
*/
public CrunchOutputs(TaskInputOutputContext<?, ?, K, V> context) {
this(context.getConfiguration());
this.baseContext = context;
}
public CrunchOutputs(Configuration conf) {
this.baseConf = conf;
this.namedOutputs = getNamedOutputs(conf);
this.outputStates = Maps.newHashMap();
this.disableOutputCounters = conf.getBoolean(CRUNCH_DISABLE_OUTPUT_COUNTERS, false);
}
@SuppressWarnings("unchecked")
public void write(String namedOutput, K key, V value)
throws IOException, InterruptedException {
if (!namedOutputs.containsKey(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" +
namedOutput + "'");
}
if (!disableOutputCounters) {
baseContext.getCounter(COUNTERS_GROUP, namedOutput).increment(1);
}
getOutputState(namedOutput).write(key, value);
}
public void close() throws IOException, InterruptedException {
for (OutputState<?, ?> out : outputStates.values()) {
out.close();
}
}
private OutputState<K, V> getOutputState(String namedOutput) throws IOException, InterruptedException {
OutputState<?, ?> out = outputStates.get(namedOutput);
if (out != null) {
return (OutputState<K, V>) out;
}
// The following trick leverages the instantiation of a record writer via
// the job thus supporting arbitrary output formats.
Job job = getJob(baseContext.getJobID(), namedOutput, baseConf);
// Get a job with the expected named output.
job = getJob(job.getJobID(), namedOutput,baseConf);
OutputFormat<K, V> fmt = getOutputFormat(namedOutput, job, namedOutputs.get(namedOutput));
TaskAttemptContext taskContext = getTaskContext(baseContext, job);
RecordWriter<K, V> recordWriter = fmt.getRecordWriter(taskContext);
OutputState<K, V> outputState = new OutputState(taskContext, recordWriter);
this.outputStates.put(namedOutput, outputState);
return outputState;
}
private static Job getJob(JobID jobID, String namedOutput, Configuration baseConf)
throws IOException {
Job job = new Job(new JobConf(baseConf));
job.getConfiguration().set("crunch.namedoutput", namedOutput);
setJobID(job, jobID, namedOutput);
return job;
}
private static TaskAttemptContext getTaskContext(TaskAttemptContext baseContext, Job job) {
org.apache.hadoop.mapreduce.TaskAttemptID baseTaskId = baseContext.getTaskAttemptID();
// Create a task ID context with our specialized job ID.
org.apache.hadoop.mapreduce.TaskAttemptID taskId;
taskId = new org.apache.hadoop.mapreduce.TaskAttemptID(job.getJobID().getJtIdentifier(),
job.getJobID().getId(),
baseTaskId.isMap(),
baseTaskId.getTaskID().getId(),
baseTaskId.getId());
return new TaskAttemptContextWrapper(baseContext, job.getConfiguration(), taskId);
}
private static void setJobID(Job job, JobID jobID, String namedOutput) {
JobID newJobID = jobID == null || jobID.getJtIdentifier().contains(namedOutput) ?
jobID :
new JobID(jobID.getJtIdentifier() + "_" + namedOutput, jobID.getId());
job.setJobID(newJobID);
}
private static void configureJob(
String namedOutput,
Job job,
OutputConfig outConfig) throws IOException {
job.getConfiguration().set(BASE_OUTPUT_NAME, namedOutput);
job.setOutputFormatClass(outConfig.bundle.getFormatClass());
job.setOutputKeyClass(outConfig.keyClass);
job.setOutputValueClass(outConfig.valueClass);
outConfig.bundle.configure(job.getConfiguration());
}
private static OutputFormat getOutputFormat(
String namedOutput,
Job job,
OutputConfig outConfig) throws IOException {
configureJob(namedOutput, job, outConfig);
try {
return ReflectionUtils.newInstance(
job.getOutputFormatClass(),
job.getConfiguration());
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
}
private static class OutputState<K, V> {
private final TaskAttemptContext context;
private final RecordWriter<K, V> recordWriter;
public OutputState(TaskAttemptContext context, RecordWriter<K, V> recordWriter) {
this.context = context;
this.recordWriter = recordWriter;
}
public void write(K key, V value) throws IOException, InterruptedException {
recordWriter.write(key, value);
}
public void close() throws IOException, InterruptedException {
recordWriter.close(context);
}
}
private static class CompositeOutputCommitter extends OutputCommitter {
private final Map<String, OutputConfig> outputs;
private final Map<String, OutputCommitter> committers;
public CompositeOutputCommitter(Map<String, OutputConfig> outputs, Map<String, OutputCommitter> committers) {
this.outputs = outputs;
this.committers = committers;
}
private TaskAttemptContext getContext(String namedOutput, TaskAttemptContext baseContext) throws IOException {
Job job = getJob(baseContext.getJobID(), namedOutput, baseContext.getConfiguration());
configureJob(namedOutput, job, outputs.get(namedOutput));
return getTaskContext(baseContext, job);
}
@Override
public void setupJob(JobContext jobContext) throws IOException {
Configuration conf = jobContext.getConfiguration();
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
Job job = getJob(jobContext.getJobID(), e.getKey(), conf);
configureJob(e.getKey(), job, outputs.get(e.getKey()));
e.getValue().setupJob(job);
}
}
@Override
public void setupTask(TaskAttemptContext taskAttemptContext) throws IOException {
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
e.getValue().setupTask(getContext(e.getKey(), taskAttemptContext));
}
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taskAttemptContext) throws IOException {
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
if (e.getValue().needsTaskCommit(getContext(e.getKey(), taskAttemptContext))) {
return true;
}
}
return false;
}
@Override
public void commitTask(TaskAttemptContext taskAttemptContext) throws IOException {
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
e.getValue().commitTask(getContext(e.getKey(), taskAttemptContext));
}
}
@Override
public void abortTask(TaskAttemptContext taskAttemptContext) throws IOException {
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
e.getValue().abortTask(getContext(e.getKey(), taskAttemptContext));
}
}
@Override
public void commitJob(JobContext jobContext) throws IOException {
Configuration conf = jobContext.getConfiguration();
Set<Path> handledPaths = Sets.newHashSet();
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
OutputCommitter oc = e.getValue();
Job job = getJob(jobContext.getJobID(), e.getKey(), conf);
configureJob(e.getKey(), job, outputs.get(e.getKey()));
if (oc instanceof FileOutputCommitter) {
Path outputPath = ((FileOutputCommitter) oc).getWorkPath().getParent();
if (handledPaths.contains(outputPath)) {
continue;
} else {
handledPaths.add(outputPath);
}
}
oc.commitJob(job);
}
}
@Override
public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException {
Configuration conf = jobContext.getConfiguration();
for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
Job job = getJob(jobContext.getJobID(), e.getKey(), conf);
configureJob(e.getKey(), job, outputs.get(e.getKey()));
e.getValue().abortJob(job, state);
}
}
}
private static class TaskAttemptContextWrapper extends TaskAttemptContextImpl {
private final TaskAttemptContext baseContext;
public TaskAttemptContextWrapper(TaskAttemptContext baseContext, Configuration config, TaskAttemptID taskId){
super(config, taskId);
this.baseContext = baseContext;
}
@Override
public Counter getCounter(Enum<?> counterName) {
return baseContext.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return baseContext.getCounter(groupName, counterName);
}
}
}
| 2,809 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Data input and output for Pipelines.
*/
package org.apache.crunch.io;
| 2,810 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/FileNamingScheme.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Encapsulates rules for naming output files. It is the responsibility of
* implementors to avoid file name collisions.
*/
public interface FileNamingScheme {
/**
* Get the output file name for a map task. Note that the implementation is
* responsible for avoiding naming collisions.
*
* @param configuration The configuration of the job for which the map output
* is being written
* @param outputDirectory The directory where the output will be written
* @return The filename for the output of the map task
* @throws IOException if an exception occurs while accessing the output file
* system
*/
String getMapOutputName(Configuration configuration, Path outputDirectory) throws IOException;
/**
* Get the output file name for a reduce task. Note that the implementation is
* responsible for avoiding naming collisions.
*
* @param configuration The configuration of the job for which output is being
* written
* @param outputDirectory The directory where the file will be written
* @param partitionId The partition of the reduce task being output
* @return The filename for the output of the reduce task
* @throws IOException if an exception occurs while accessing output file
* system
*/
String getReduceOutputName(Configuration configuration, Path outputDirectory, int partitionId) throws IOException;
}
| 2,811 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/FileTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mr.plan.PlanningParameters;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.SourceTargetHelper;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.CrunchRenameCopyListing;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.tools.CopyListing;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FileTargetImpl implements PathTarget {
private static final Logger LOG = LoggerFactory.getLogger(FileTargetImpl.class);
protected Path path;
private final FormatBundle<? extends FileOutputFormat> formatBundle;
private final FileNamingScheme fileNamingScheme;
public FileTargetImpl(Path path, Class<? extends FileOutputFormat> outputFormatClass,
FileNamingScheme fileNamingScheme) {
this(path, outputFormatClass, fileNamingScheme, ImmutableMap.<String, String>of());
}
public FileTargetImpl(Path path, Class<? extends FileOutputFormat> outputFormatClass,
FileNamingScheme fileNamingScheme, Map<String, String> extraConf) {
this.path = path;
this.formatBundle = FormatBundle.forOutput(outputFormatClass);
this.fileNamingScheme = fileNamingScheme;
if (extraConf != null && !extraConf.isEmpty()) {
for (Map.Entry<String, String> e : extraConf.entrySet()) {
formatBundle.set(e.getKey(), e.getValue());
}
}
}
@Override
public Target outputConf(String key, String value) {
formatBundle.set(key, value);
return this;
}
@Override
public Target fileSystem(FileSystem fileSystem) {
if (formatBundle.getFileSystem() != null) {
throw new IllegalStateException("Filesystem already set. Change is not supported.");
}
if (fileSystem != null) {
path = fileSystem.makeQualified(path);
formatBundle.setFileSystem(fileSystem);
}
return this;
}
@Override
public FileSystem getFileSystem() {
return formatBundle.getFileSystem();
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
Converter converter = getConverter(ptype);
Class keyClass = converter.getKeyClass();
Class valueClass = converter.getValueClass();
configureForMapReduce(job, keyClass, valueClass, formatBundle, outputPath, name);
}
@Deprecated
protected void configureForMapReduce(Job job, Class keyClass, Class valueClass,
Class outputFormatClass, Path outputPath, String name) {
configureForMapReduce(job, keyClass, valueClass, FormatBundle.forOutput(outputFormatClass), outputPath, name);
}
protected void configureForMapReduce(Job job, Class keyClass, Class valueClass,
FormatBundle formatBundle, Path outputPath, String name) {
try {
FileOutputFormat.setOutputPath(job, outputPath);
} catch (Exception e) {
throw new RuntimeException(e);
}
if (name == null) {
job.setOutputFormatClass(formatBundle.getFormatClass());
formatBundle.configure(job.getConfiguration());
job.setOutputKeyClass(keyClass);
job.setOutputValueClass(valueClass);
} else {
CrunchOutputs.addNamedOutput(job, name, formatBundle, keyClass, valueClass);
}
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
handler.configure(this, ptype);
return true;
}
@Override
public Converter<?, ?, ?, ?> getConverter(PType<?> ptype) {
return ptype.getConverter();
}
private class WorkingPathFileMover implements Callable<Boolean> {
private Configuration conf;
private Path src;
private Path dst;
private FileSystem srcFs;
private FileSystem dstFs;
private boolean sameFs;
public WorkingPathFileMover(Configuration conf, Path src, Path dst,
FileSystem srcFs, FileSystem dstFs, boolean sameFs) {
this.conf = conf;
this.src = src;
this.dst = dst;
this.srcFs = srcFs;
this.dstFs = dstFs;
this.sameFs = sameFs;
}
@Override
public Boolean call() throws IOException {
if (sameFs) {
return srcFs.rename(src, dst);
} else {
return FileUtil.copy(srcFs, src, dstFs, dst, true, true, conf);
}
}
}
@Override
public void handleOutputs(Configuration conf, Path workingPath, int index) throws IOException {
FileSystem srcFs = workingPath.getFileSystem(conf);
Configuration dstFsConf = getEffectiveBundleConfig(conf);
FileSystem dstFs = path.getFileSystem(dstFsConf);
if (!dstFs.exists(path)) {
dstFs.mkdirs(path);
}
Path srcPattern = getSourcePattern(workingPath, index);
boolean sameFs = isCompatible(srcFs, path);
boolean useDistributedCopy = conf.getBoolean(RuntimeParameters.FILE_TARGET_USE_DISTCP, true);
int maxDistributedCopyTasks = conf.getInt(RuntimeParameters.FILE_TARGET_MAX_DISTCP_TASKS, 100);
int maxDistributedCopyTaskBandwidthMB = conf.getInt(RuntimeParameters.FILE_TARGET_MAX_DISTCP_TASK_BANDWIDTH_MB,
DistCpConstants.DEFAULT_BANDWIDTH_MB);
int maxThreads = conf.getInt(RuntimeParameters.FILE_TARGET_MAX_THREADS, 1);
if (!sameFs) {
if (useDistributedCopy) {
LOG.info("Source and destination are in different file systems, performing distributed copy from {} to {}", srcPattern,
path);
handleOutputsDistributedCopy(conf, srcPattern, srcFs, dstFs, maxDistributedCopyTasks,
maxDistributedCopyTaskBandwidthMB);
} else {
LOG.info("Source and destination are in different file systems, performing asynch copies from {} to {}", srcPattern, path);
handleOutputsAsynchronously(conf, srcPattern, srcFs, dstFs, sameFs, maxThreads);
}
} else {
LOG.info("Source and destination are in the same file system, performing asynch renames from {} to {}", srcPattern, path);
handleOutputsAsynchronously(conf, srcPattern, srcFs, dstFs, sameFs, maxThreads);
}
}
private void handleOutputsAsynchronously(Configuration conf, Path srcPattern, FileSystem srcFs, FileSystem dstFs,
boolean sameFs, int maxThreads) throws IOException {
Configuration dstFsConf = getEffectiveBundleConfig(conf);
Path[] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPattern), srcPattern);
List<ListenableFuture<Boolean>> renameFutures = Lists.newArrayList();
ListeningExecutorService executorService =
MoreExecutors.listeningDecorator(
Executors.newFixedThreadPool(
maxThreads));
for (Path s : srcs) {
Path d = getDestFile(dstFsConf, s, path, s.getName().contains("-m-"));
renameFutures.add(
executorService.submit(
new WorkingPathFileMover(conf, s, d, srcFs, dstFs, sameFs)));
}
if (sameFs) {
LOG.info("Renaming {} files using at most {} threads.", renameFutures.size(), maxThreads);
} else {
LOG.info("Copying {} files using at most {} threads.", renameFutures.size(), maxThreads);
}
ListenableFuture<List<Boolean>> future =
Futures.successfulAsList(renameFutures);
List<Boolean> renameResults = null;
try {
renameResults = future.get();
} catch (InterruptedException | ExecutionException e) {
Throwables.propagate(e);
} finally {
executorService.shutdownNow();
}
if (renameResults != null && !renameResults.contains(false)) {
if (sameFs) {
LOG.info("Renamed {} files.", renameFutures.size());
} else {
LOG.info("Copied {} files.", renameFutures.size());
}
dstFs.create(getSuccessIndicator(), true).close();
LOG.info("Created success indicator file");
}
}
private void handleOutputsDistributedCopy(Configuration conf, Path srcPattern, FileSystem srcFs, FileSystem dstFs,
int maxTasks, int maxBandwidthMB) throws IOException {
Configuration dstFsConf = getEffectiveBundleConfig(conf);
Path[] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPattern), srcPattern);
if (srcs.length > 0) {
try {
DistCp distCp = createDistCp(srcs, maxTasks, maxBandwidthMB, dstFsConf);
if (!distCp.execute().isSuccessful()) {
throw new CrunchRuntimeException("Distributed copy failed from " + srcPattern + " to " + path);
}
LOG.info("Distributed copy completed for {} files", srcs.length);
} catch (Exception e) {
throw new CrunchRuntimeException("Distributed copy failed from " + srcPattern + " to " + path, e);
}
} else {
LOG.info("No files found to distributed copy at {}", srcPattern);
}
dstFs.create(getSuccessIndicator(), true).close();
LOG.info("Created success indicator file");
}
protected Path getSuccessIndicator() {
return new Path(path, "_SUCCESS");
}
protected Path getSourcePattern(Path workingPath, int index) {
if (index < 0) {
return new Path(workingPath, "part-*");
} else {
return new Path(workingPath, PlanningParameters.MULTI_OUTPUT_PREFIX + index + "-*");
}
}
@Override
public Path getPath() {
return path;
}
protected static boolean isCompatible(FileSystem fs, Path path) {
try {
fs.makeQualified(path);
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
protected Path getDestFile(Configuration conf, Path src, Path dir, boolean mapOnlyJob)
throws IOException {
String outputFilename = null;
String sourceFilename = src.getName();
if (mapOnlyJob) {
outputFilename = getFileNamingScheme().getMapOutputName(conf, dir);
} else {
outputFilename = getFileNamingScheme().getReduceOutputName(conf, dir, extractPartitionNumber(sourceFilename));
}
if (sourceFilename.contains(".")) {
outputFilename += sourceFilename.substring(sourceFilename.indexOf("."));
}
return new Path(dir, outputFilename);
}
protected DistCp createDistCp(Path[] srcs, int maxTasks, int maxBandwidthMB, Configuration conf) throws Exception {
LOG.info("Distributed copying {} files using at most {} tasks and bandwidth limit of {} MB/s per task",
new Object[]{srcs.length, maxTasks, maxBandwidthMB});
Configuration distCpConf = new Configuration(conf);
// Remove unnecessary and problematic properties from the DistCp configuration. This is necessary since
// files referenced by these properties may have already been deleted when the DistCp is being started.
distCpConf.unset("mapreduce.job.cache.files");
distCpConf.unset("mapreduce.job.classpath.files");
distCpConf.unset("tmpjars");
// Setup renaming for part files
List<String> renames = Lists.newArrayList();
for (Path s : srcs) {
Path d = getDestFile(conf, s, path, s.getName().contains("-m-"));
renames.add(s.getName() + ":" + d.getName());
}
distCpConf.setStrings(CrunchRenameCopyListing.DISTCP_PATH_RENAMES, renames.toArray(new String[renames.size()]));
distCpConf.setClass(DistCpConstants.CONF_LABEL_COPY_LISTING_CLASS, CrunchRenameCopyListing.class, CopyListing.class);
// Once https://issues.apache.org/jira/browse/HADOOP-15281 is available, we can use the direct write
// distcp optimization if the target path is in S3
DistCpOptions options = new DistCpOptions(Arrays.asList(srcs), path);
options.setMaxMaps(maxTasks);
options.setMapBandwidth(maxBandwidthMB);
options.setBlocking(true);
return new DistCp(distCpConf, options);
}
/**
* Extract the partition number from a raw reducer output filename.
*
* @param reduceOutputFileName The raw reducer output file name
* @return The partition number encoded in the filename
*/
public static int extractPartitionNumber(String reduceOutputFileName) {
Matcher matcher = Pattern.compile(".*-r-(\\d{5})").matcher(reduceOutputFileName);
if (matcher.find()) {
return Integer.parseInt(matcher.group(1), 10);
} else {
throw new IllegalArgumentException("Reducer output name '" + reduceOutputFileName + "' cannot be parsed");
}
}
@Override
public FileNamingScheme getFileNamingScheme() {
return fileNamingScheme;
}
@Override
public boolean equals(Object other) {
if (other == null || !getClass().equals(other.getClass())) {
return false;
}
FileTargetImpl o = (FileTargetImpl) other;
return Objects.equals(path, o.path) && Objects.equals(formatBundle, o.formatBundle);
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(path).append(formatBundle).toHashCode();
}
@Override
public String toString() {
return new StringBuilder()
.append(formatBundle.getFormatClass().getSimpleName())
.append("(")
.append(path)
.append(")")
.toString();
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
// By default, assume that we cannot do this.
return null;
}
private Configuration getEffectiveBundleConfig(Configuration configuration) {
// overlay the bundle config on top of a copy of the supplied config
return formatBundle.configure(new Configuration(configuration));
}
@Override
public boolean handleExisting(WriteMode strategy, long lastModForSource, Configuration conf) {
FileSystem fs = null;
try {
fs = path.getFileSystem(getEffectiveBundleConfig(conf));
} catch (IOException e) {
LOG.error("Could not retrieve FileSystem object to check for existing path", e);
throw new CrunchRuntimeException(e);
}
boolean exists = false;
boolean successful = false;
long lastModForTarget = -1;
try {
exists = fs.exists(path);
if (exists) {
successful = fs.exists(getSuccessIndicator());
// Last modified time is only relevant when the path exists and the
// write mode is checkpoint
if (successful && strategy == WriteMode.CHECKPOINT) {
lastModForTarget = SourceTargetHelper.getLastModifiedAt(fs, path);
}
}
} catch (IOException e) {
LOG.error("Exception checking existence of path: {}", path, e);
throw new CrunchRuntimeException(e);
}
if (exists) {
switch (strategy) {
case DEFAULT:
LOG.error("Path {} already exists!", path);
throw new CrunchRuntimeException("Path already exists: " + path);
case OVERWRITE:
LOG.info("Removing data at existing path: {}", path);
try {
fs.delete(path, true);
} catch (IOException e) {
LOG.error("Exception thrown removing data at path: {}", path, e);
}
break;
case APPEND:
LOG.info("Adding output files to existing path: {}", path);
break;
case CHECKPOINT:
if (successful && lastModForTarget > lastModForSource) {
LOG.info("Re-starting pipeline from checkpoint path: {}", path);
break;
} else {
if (!successful) {
LOG.info("_SUCCESS file not found, Removing data at existing checkpoint path: {}", path);
} else {
LOG.info("Source data has recent updates. Removing data at existing checkpoint path: {}", path);
}
try {
fs.delete(path, true);
} catch (IOException e) {
LOG.error("Exception thrown removing data at checkpoint path: {}", path, e);
}
return false;
}
default:
throw new CrunchRuntimeException("Unknown WriteMode: " + strategy);
}
} else {
LOG.info("Will write output files to new path: {}", path);
}
return exists;
}
}
| 2,812 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/FileReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import java.util.List;
class FileReadableData<T> extends ReadableDataImpl<T> {
private final FormatBundle<? extends InputFormat> bundle;
private final PType<T> ptype;
public FileReadableData(List<Path> paths, FormatBundle<? extends InputFormat> bundle, PType<T> ptype) {
super(paths);
this.bundle = bundle;
this.ptype = ptype;
}
@Override
protected FileReaderFactory<T> getFileReaderFactory() {
return new DefaultFileReaderFactory<T>(bundle, ptype);
}
}
| 2,813 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/FileSourceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.util.Map.Entry;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Source;
import org.apache.crunch.impl.mr.run.CrunchInputFormat;
import org.apache.crunch.io.CompositePathIterable;
import org.apache.crunch.io.CrunchInputs;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.SourceTargetHelper;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FileSourceImpl<T> implements ReadableSource<T> {
private static final Logger LOG = LoggerFactory.getLogger(FileSourceImpl.class);
@Deprecated
protected Path path;
protected List<Path> paths;
protected final PType<T> ptype;
protected final FormatBundle<? extends InputFormat> inputBundle;
public FileSourceImpl(Path path, PType<T> ptype, Class<? extends InputFormat> inputFormatClass) {
this(path, ptype, FormatBundle.forInput(inputFormatClass));
}
public FileSourceImpl(Path path, PType<T> ptype, FormatBundle<? extends InputFormat> inputBundle) {
this(Lists.newArrayList(path), ptype, inputBundle);
}
public FileSourceImpl(List<Path> paths, PType<T> ptype, Class<? extends InputFormat> inputFormatClass) {
this(paths, ptype, FormatBundle.forInput(inputFormatClass));
}
public FileSourceImpl(List<Path> paths, PType<T> ptype, FormatBundle<? extends InputFormat> inputBundle) {
Preconditions.checkArgument(!paths.isEmpty(), "Must supply at least one input path");
this.path = paths.get(0);
this.paths = paths;
this.ptype = ptype;
this.inputBundle = inputBundle;
}
@Deprecated
public Path getPath() {
if (paths.size() > 1) {
LOG.warn("getPath() called for source with multiple paths, only returning first. Source: {}", this);
}
return paths.get(0);
}
public List<Path> getPaths() {
return paths;
}
@Override
public FileSystem getFileSystem() {
return inputBundle.getFileSystem();
}
@Override
public Source<T> inputConf(String key, String value) {
inputBundle.set(key, value);
return this;
}
@Override
public Source<T> fileSystem(FileSystem fileSystem) {
if (inputBundle.getFileSystem() != null) {
throw new IllegalStateException("Filesystem already set. Change is not supported.");
}
if (fileSystem != null) {
List<Path> qualifiedPaths = new ArrayList<>(paths.size());
for (Path path : paths) {
qualifiedPaths.add(fileSystem.makeQualified(path));
}
paths = qualifiedPaths;
inputBundle.setFileSystem(fileSystem);
}
return this;
}
@Override
public Converter<?, ?, ?, ?> getConverter() {
return ptype.getConverter();
}
@Override
public void configureSource(Job job, int inputId) throws IOException {
// Use Crunch to handle the combined input splits
job.setInputFormatClass(CrunchInputFormat.class);
CrunchInputs.addInputPaths(job, paths, inputBundle, inputId);
}
public FormatBundle<? extends InputFormat> getBundle() {
return inputBundle;
}
@Override
public PType<T> getType() {
return ptype;
}
private Configuration getEffectiveBundleConfig(Configuration configuration) {
// overlay the bundle config on top of a copy of the supplied config
return getBundle().configure(new Configuration(configuration));
}
@Override
public long getSize(Configuration configuration) {
long size = 0;
Configuration bundleConfig = getEffectiveBundleConfig(configuration);
for (Path path : paths) {
try {
size += SourceTargetHelper.getPathSize(bundleConfig, path);
} catch (IOException e) {
LOG.warn("Exception thrown looking up size of: {}", path, e);
throw new IllegalStateException("Failed to get the file size of:" + path, e);
}
}
return size;
}
protected Iterable<T> read(Configuration conf, FileReaderFactory<T> readerFactory)
throws IOException {
List<Iterable<T>> iterables = Lists.newArrayList();
Configuration bundleConfig = getEffectiveBundleConfig(conf);
for (Path path : paths) {
FileSystem fs = path.getFileSystem(bundleConfig);
iterables.add(CompositePathIterable.create(fs, path, readerFactory));
}
return Iterables.concat(iterables);
}
/* Retain string format for single-path sources */
protected String pathsAsString() {
if (paths.size() == 1) {
return paths.get(0).toString();
}
return paths.toString();
}
@Override
public long getLastModifiedAt(Configuration conf) {
long lastMod = -1;
Configuration bundleConfig = getEffectiveBundleConfig(conf);
for (Path path : paths) {
try {
FileSystem fs = path.getFileSystem(bundleConfig);
long lm = SourceTargetHelper.getLastModifiedAt(fs, path);
if (lm > lastMod) {
lastMod = lm;
}
} catch (IOException e) {
LOG.error("Could not determine last modification time for source: {}", toString(), e);
}
}
return lastMod;
}
@Override
public boolean equals(Object other) {
if (other == null || !getClass().equals(other.getClass())) {
return false;
}
FileSourceImpl o = (FileSourceImpl) other;
return ptype.equals(o.ptype) && paths.equals(o.paths) && inputBundle.equals(o.inputBundle);
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(ptype).append(paths).append(inputBundle).toHashCode();
}
@Override
public String toString() {
return new StringBuilder().append(inputBundle.getName()).append("(").append(pathsAsString()).append(")").toString();
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, new DefaultFileReaderFactory<T>(inputBundle, ptype));
}
@Override
public ReadableData<T> asReadable() {
return new FileReadableData<T>(paths, inputBundle, ptype);
}
}
| 2,814 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/TableSourcePathTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSource;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.types.PTableType;
public class TableSourcePathTargetImpl<K, V> extends SourcePathTargetImpl<Pair<K, V>> implements TableSource<K, V> {
public TableSourcePathTargetImpl(TableSource<K, V> source, PathTarget target) {
this(source, target, SequentialFileNamingScheme.getInstance());
}
public TableSourcePathTargetImpl(TableSource<K, V> source, PathTarget target, FileNamingScheme fileNamingScheme) {
super(source, target, fileNamingScheme);
}
@Override
public PTableType<K, V> getTableType() {
return ((TableSource<K, V>) source).getTableType();
}
}
| 2,815 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/SourceTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.Source;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.Job;
class SourceTargetImpl<T> implements SourceTarget<T> {
protected final Source<T> source;
protected final Target target;
public SourceTargetImpl(Source<T> source, Target target) {
this.source = source;
this.target = target;
}
@Override
public Source<T> inputConf(String key, String value) {
source.inputConf(key, value);
return this;
}
@Override
public SourceTarget<T> fileSystem(FileSystem fileSystem) {
source.fileSystem(fileSystem);
target.fileSystem(fileSystem);
return this;
}
@Override
public FileSystem getFileSystem() {
// could either return source or target filesytem as they are the same
return source.getFileSystem();
}
@Override
public PType<T> getType() {
return source.getType();
}
@Override
public void configureSource(Job job, int inputId) throws IOException {
source.configureSource(job, inputId);
}
@Override
public long getSize(Configuration configuration) {
return source.getSize(configuration);
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
return target.accept(handler, ptype);
}
@Override
public <S> SourceTarget<S> asSourceTarget(PType<S> ptype) {
if (ptype != null && ptype.equals(source.getType())) {
return (SourceTarget<S>) this;
}
return target.asSourceTarget(ptype);
}
@Override
public boolean equals(Object other) {
if (other == null || !(other.getClass().equals(getClass()))) {
return false;
}
SourceTargetImpl sti = (SourceTargetImpl) other;
return source.equals(sti.source) && target.equals(sti.target);
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(source).append(target).toHashCode();
}
@Override
public String toString() {
return source.toString();
}
@Override
public Target outputConf(String key, String value) {
target.outputConf(key, value);
return this;
}
@Override
public boolean handleExisting(WriteMode strategy, long lastModifiedAt, Configuration conf) {
return target.handleExisting(strategy, lastModifiedAt, conf);
}
@Override
public long getLastModifiedAt(Configuration configuration) {
return source.getLastModifiedAt(configuration);
}
@Override
public Converter<?, ?, ?, ?> getConverter() {
return source.getConverter();
}
@Override
public Converter<?, ?, ?, ?> getConverter(PType<?> ptype) {
return target.getConverter(ptype);
}
@Override
public SourceTarget<T> conf(String key, String value) {
source.inputConf(key, value);
target.outputConf(key, value);
return this;
}
}
| 2,816 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/TableSourceTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSource;
import org.apache.crunch.Target;
import org.apache.crunch.types.PTableType;
public class TableSourceTargetImpl<K, V> extends SourceTargetImpl<Pair<K, V>> implements TableSource<K, V> {
public TableSourceTargetImpl(TableSource<K, V> source, Target target) {
super(source, target);
}
@Override
public PTableType<K, V> getTableType() {
return ((TableSource<K, V>) source).getTableType();
}
}
| 2,817 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/DefaultFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.UnmodifiableIterator;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
class DefaultFileReaderFactory<T> implements FileReaderFactory<T> {
private static final Logger LOG = LoggerFactory.getLogger(DefaultFileReaderFactory.class);
private final FormatBundle<? extends InputFormat> bundle;
private final PType<T> ptype;
public DefaultFileReaderFactory(FormatBundle<? extends InputFormat> bundle, PType<T> ptype) {
this.bundle = bundle;
this.ptype = ptype;
}
@Override
public Iterator<T> read(FileSystem fs, Path path) {
final Configuration conf = new Configuration(fs.getConf());
bundle.configure(conf);
ptype.initialize(conf);
final InputFormat fmt = ReflectionUtils.newInstance(bundle.getFormatClass(), conf);
final TaskAttemptContext ctxt = new TaskAttemptContextImpl(conf, new TaskAttemptID());
try {
Job job = new Job(conf);
FileInputFormat.addInputPath(job, path);
return Iterators.concat(Lists.transform(fmt.getSplits(job), new Function<InputSplit, Iterator<T>>() {
@Override
public Iterator<T> apply(InputSplit split) {
try {
RecordReader reader = fmt.createRecordReader(split, ctxt);
reader.initialize(split, ctxt);
return new RecordReaderIterator<T>(reader, ptype);
} catch (Exception e) {
LOG.error("Error reading split: " + split, e);
throw new CrunchRuntimeException(e);
}
}
}).iterator());
} catch (Exception e) {
LOG.error("Error reading path: " + path, e);
throw new CrunchRuntimeException(e);
}
}
private static class RecordReaderIterator<T> extends UnmodifiableIterator<T> {
private final RecordReader reader;
private final PType<T> ptype;
private T cur;
private boolean hasNext;
public RecordReaderIterator(RecordReader reader, PType<T> ptype) {
this.reader = reader;
this.ptype = ptype;
try {
this.hasNext = reader.nextKeyValue();
if (hasNext) {
Object converted = ptype.getConverter().convertInput(
reader.getCurrentKey(), reader.getCurrentValue());
this.cur = ptype.getInputMapFn().map(converted);
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public T next() {
T ret = cur;
try {
hasNext = reader.nextKeyValue();
if (hasNext) {
Object converted = ptype.getConverter().convertInput(
reader.getCurrentKey(), reader.getCurrentValue());
this.cur = ptype.getInputMapFn().map(converted);
}
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
return ret;
}
}
}
| 2,818 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/AutoClosingIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import com.google.common.collect.UnmodifiableIterator;
import org.apache.commons.io.IOUtils;
/**
* Closes the wrapped {@code Closeable} when {@link #hasNext()} returns false. As long a client loops through to
* completion (doesn't abort early due to an exception, short circuit, etc.) resources will be closed automatically.
*/
public class AutoClosingIterator<T> extends UnmodifiableIterator<T> implements Closeable {
private final Iterator<T> iter;
private Closeable closeable;
public AutoClosingIterator(Closeable closeable, Iterator<T> iter) {
this.closeable = closeable;
this.iter = iter;
}
@Override
public boolean hasNext() {
if (iter.hasNext()) {
return true;
} else {
IOUtils.closeQuietly(this);
return false;
}
}
@Override
public T next() {
return iter.next();
}
@Override
public void close() throws IOException {
if (closeable != null) {
closeable.close();
closeable = null;
}
}
}
| 2,819 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/FileTableSourceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSource;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public class FileTableSourceImpl<K, V> extends FileSourceImpl<Pair<K, V>> implements TableSource<K, V> {
public FileTableSourceImpl(Path path, PTableType<K, V> tableType, Class<? extends FileInputFormat> formatClass) {
super(path, tableType, formatClass);
}
public FileTableSourceImpl(List<Path> paths, PTableType<K, V> tableType, Class<? extends FileInputFormat> formatClass) {
super(paths, tableType, formatClass);
}
public FileTableSourceImpl(Path path, PTableType<K, V> tableType, FormatBundle bundle) {
super(path, tableType, bundle);
}
public FileTableSourceImpl(List<Path> paths, PTableType<K, V> tableType, FormatBundle bundle) {
super(paths, tableType, bundle);
}
@Override
public PTableType<K, V> getTableType() {
return (PTableType<K, V>) getType();
}
}
| 2,820 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/ReadableSourceTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Target;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.hadoop.conf.Configuration;
public class ReadableSourceTargetImpl<T> extends SourceTargetImpl<T> implements ReadableSourceTarget<T> {
public ReadableSourceTargetImpl(ReadableSource<T> source, Target target) {
super(source, target);
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return ((ReadableSource<T>) source).read(conf);
}
@Override
public ReadableData<T> asReadable() {
ReadableData<T> rd = ((ReadableSource<T>) source).asReadable();
if (rd instanceof ReadableDataImpl) {
((ReadableDataImpl<T>) rd).setParent(this);
}
return rd;
}
}
| 2,821 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/SourcePathTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import org.apache.crunch.Source;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
public class SourcePathTargetImpl<T> extends SourceTargetImpl<T> implements PathTarget {
private final FileNamingScheme fileNamingScheme;
public SourcePathTargetImpl(Source<T> source, PathTarget target, FileNamingScheme fileNamingScheme) {
super(source, target);
this.fileNamingScheme = fileNamingScheme;
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
((PathTarget) target).configureForMapReduce(job, ptype, outputPath, name);
}
@Override
public Path getPath() {
return ((PathTarget) target).getPath();
}
@Override
public FileNamingScheme getFileNamingScheme() {
return fileNamingScheme;
}
@Override
public void handleOutputs(Configuration conf, Path workingPath, int index)
throws IOException {
((PathTarget) target).handleOutputs(conf, workingPath, index);
}
}
| 2,822 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/ReadableSourcePathTargetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import java.io.IOException;
import org.apache.crunch.ReadableData;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.ReadableSourceTarget;
import org.apache.hadoop.conf.Configuration;
public class ReadableSourcePathTargetImpl<T> extends SourcePathTargetImpl<T> implements ReadableSourceTarget<T> {
public ReadableSourcePathTargetImpl(ReadableSource<T> source, PathTarget target, FileNamingScheme fileNamingScheme) {
super(source, target, fileNamingScheme);
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return ((ReadableSource<T>) source).read(conf);
}
@Override
public ReadableData<T> asReadable() {
ReadableData<T> rd = ((ReadableSource<T>) source).asReadable();
if (rd instanceof ReadableDataImpl) {
((ReadableDataImpl<T>) rd).setParent(this);
}
return rd;
}
}
| 2,823 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/impl/ReadableDataImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.impl;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.ReadableData;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.io.CompositePathIterable;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.util.DistCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import java.util.Set;
public abstract class ReadableDataImpl<T> implements ReadableData<T> {
private List<String> paths;
private transient SourceTarget parent;
protected ReadableDataImpl(List<Path> paths) {
this.paths = Lists.newArrayList();
for (Path p : paths) {
this.paths.add(p.toString());
}
}
public ReadableData<T> setParent(SourceTarget<?> parent) {
this.parent = parent;
return this;
}
@Override
public Set<SourceTarget<?>> getSourceTargets() {
if (parent != null) {
return ImmutableSet.<SourceTarget<?>>of(parent);
} else {
return ImmutableSet.of();
}
}
@Override
public void configure(Configuration conf) {
for (String path : paths) {
DistCache.addCacheFile(new Path(path), conf);
}
}
protected abstract FileReaderFactory<T> getFileReaderFactory();
private Path getCacheFilePath(String input, Configuration conf) {
Path local = DistCache.getPathToCacheFile(new Path(input), conf);
if (local == null) {
throw new CrunchRuntimeException("Can't find local cache file for '" + input + "'");
}
return local;
}
@Override
public Iterable<T> read(TaskInputOutputContext<?, ?, ?, ?> ctxt) throws IOException {
final Configuration conf = ctxt.getConfiguration();
final FileReaderFactory<T> readerFactory = getFileReaderFactory();
return Iterables.concat(Lists.transform(paths, new Function<String, Iterable<T>>() {
@Override
public Iterable<T> apply(@Nullable String input) {
Path path = getCacheFilePath(input, conf);
try {
FileSystem fs = path.getFileSystem(conf);
return CompositePathIterable.create(fs, path, readerFactory);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}));
}
}
| 2,824 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import java.io.IOException;
import java.util.Iterator;
import org.apache.crunch.MapFn;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.AutoClosingIterator;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.collect.Iterators;
import com.google.common.collect.UnmodifiableIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SeqFileReaderFactory<T> implements FileReaderFactory<T> {
private static final Logger LOG = LoggerFactory.getLogger(SeqFileReaderFactory.class);
private final Converter converter;
private final MapFn<Object, T> mapFn;
private final Writable key;
private final Writable value;
public SeqFileReaderFactory(PType<T> ptype) {
this.converter = ptype.getConverter();
this.mapFn = ptype.getInputMapFn();
if (ptype instanceof PTableType) {
PTableType ptt = (PTableType) ptype;
this.key = SeqFileHelper.newInstance(ptt.getKeyType(), null);
this.value = SeqFileHelper.newInstance(ptt.getValueType(), null);
} else {
this.key = NullWritable.get();
this.value = SeqFileHelper.newInstance(ptype, null);
}
}
public SeqFileReaderFactory(Class clazz) {
PType<T> ptype = Writables.writables(clazz);
this.converter = ptype.getConverter();
this.mapFn = ptype.getInputMapFn();
this.key = NullWritable.get();
this.value = (Writable) ReflectionUtils.newInstance(clazz, null);
}
@Override
public Iterator<T> read(FileSystem fs, final Path path) {
mapFn.initialize();
try {
final SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, fs.getConf());
return new AutoClosingIterator<T>(reader, new UnmodifiableIterator<T>() {
boolean nextChecked = false;
boolean hasNext = false;
@Override
public boolean hasNext() {
if (nextChecked == true) {
return hasNext;
}
try {
hasNext = reader.next(key, value);
nextChecked = true;
return hasNext;
} catch (IOException e) {
LOG.info("Error reading from path: {}", path, e);
return false;
}
}
@Override
public T next() {
if (!nextChecked && !hasNext()) {
return null;
}
nextChecked = false;
return mapFn.map(converter.convertInput(key, value));
}
});
} catch (IOException e) {
LOG.info("Could not read seqfile at path: {}", path, e);
return Iterators.emptyIterator();
}
}
}
| 2,825 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
public class SeqFileSourceTarget<T> extends ReadableSourcePathTargetImpl<T> {
public SeqFileSourceTarget(String path, PType<T> ptype) {
this(new Path(path), ptype);
}
public SeqFileSourceTarget(Path path, PType<T> ptype) {
this(path, ptype, SequentialFileNamingScheme.getInstance());
}
public SeqFileSourceTarget(Path path, PType<T> ptype, FileNamingScheme fileNamingScheme) {
super(new SeqFileSource<T>(path, ptype), new SeqFileTarget(path), fileNamingScheme);
}
@Override
public String toString() {
return target.toString();
}
}
| 2,826 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import org.apache.crunch.MapFn;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.writable.WritableType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;
class SeqFileHelper {
static <T> Writable newInstance(PType<T> ptype, Configuration conf) {
return (Writable) ReflectionUtils.newInstance(((WritableType) ptype).getSerializationClass(), conf);
}
static <T> MapFn<Object, T> getInputMapFn(PType<T> ptype) {
return ptype.getInputMapFn();
}
}
| 2,827 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.ReadableData;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
public class SeqFileSource<T> extends FileSourceImpl<T> implements ReadableSource<T> {
public SeqFileSource(Path path, PType<T> ptype) {
this(Collections.<Path>singletonList(path), ptype);
}
public SeqFileSource(List<Path> paths, PType<T> ptype) {
super(paths, ptype, SequenceFileInputFormat.class);
inputBundle.set(RuntimeParameters.DISABLE_COMBINE_FILE, Boolean.FALSE.toString());
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, new SeqFileReaderFactory<T>(ptype));
}
@Override
public ReadableData<T> asReadable() {
return new SeqFileReadableData<T>(paths, ptype);
}
@Override
public String toString() {
return "SeqFile(" + pathsAsString() + ")";
}
}
| 2,828 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
public class SeqFileTarget extends FileTargetImpl {
public SeqFileTarget(String path) {
this(new Path(path));
}
public SeqFileTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public SeqFileTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, SequenceFileOutputFormat.class, fileNamingScheme);
}
@Override
public String toString() {
return "SeqFile(" + path.toString() + ")";
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
if (ptype instanceof PTableType) {
return new SeqFileTableSourceTarget(path, (PTableType) ptype).fileSystem(getFileSystem());
} else {
return new SeqFileSourceTarget(path, ptype).fileSystem(getFileSystem());
}
}
}
| 2,829 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import java.util.List;
public class SeqFileReadableData<T> extends ReadableDataImpl {
private final PType<T> ptype;
public SeqFileReadableData(List<Path> paths, PType<T> ptype) {
super(paths);
this.ptype = ptype;
}
@Override
protected FileReaderFactory<T> getFileReaderFactory() {
return new SeqFileReaderFactory<T>(ptype);
}
}
| 2,830 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileTableSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.ReadableData;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileTableSourceImpl;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
/**
* A {@code TableSource} that uses {@code SequenceFileInputFormat} to read the input
* file.
*/
public class SeqFileTableSource<K, V> extends FileTableSourceImpl<K, V> implements ReadableSource<Pair<K, V>> {
public SeqFileTableSource(String path, PTableType<K, V> ptype) {
this(new Path(path), ptype);
}
public SeqFileTableSource(Path path, PTableType<K, V> ptype) {
super(path, ptype, SequenceFileInputFormat.class);
}
public SeqFileTableSource(List<Path> paths, PTableType<K, V> ptype) {
super(paths, ptype, SequenceFileInputFormat.class);
}
@Override
public Iterable<Pair<K, V>> read(Configuration conf) throws IOException {
return read(conf, new SeqFileReaderFactory<Pair<K, V>>(getTableType()));
}
@Override
public ReadableData<Pair<K, V>> asReadable() {
return new SeqFileReadableData<Pair<K, V>>(paths, getTableType());
}
@Override
public String toString() {
return "SeqFile(" + pathsAsString() + ")";
}
}
| 2,831 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/seq/SeqFileTableSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.seq;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSourceTarget;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.fs.Path;
public class SeqFileTableSourceTarget<K, V> extends ReadableSourcePathTargetImpl<Pair<K, V>> implements
TableSourceTarget<K, V> {
private final PTableType<K, V> tableType;
public SeqFileTableSourceTarget(String path, PTableType<K, V> tableType) {
this(new Path(path), tableType);
}
public SeqFileTableSourceTarget(Path path, PTableType<K, V> tableType) {
this(path, tableType, SequentialFileNamingScheme.getInstance());
}
public SeqFileTableSourceTarget(Path path, PTableType<K, V> tableType, FileNamingScheme fileNamingScheme) {
super(new SeqFileTableSource<K, V>(path, tableType), new SeqFileTarget(path), fileNamingScheme);
this.tableType = tableType;
}
@Override
public PTableType<K, V> getTableType() {
return tableType;
}
@Override
public String toString() {
return target.toString();
}
}
| 2,832 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroFileSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import org.apache.avro.io.DatumReader;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
public class AvroFileSourceTarget<T> extends ReadableSourcePathTargetImpl<T> {
public AvroFileSourceTarget(Path path, AvroType<T> atype) {
this(path, atype, SequentialFileNamingScheme.getInstance());
}
public AvroFileSourceTarget(Path path, AvroType<T> atype, DatumReader<T> reader) {
this(path, atype, reader, SequentialFileNamingScheme.getInstance());
}
public AvroFileSourceTarget(Path path, AvroType<T> atype, FileNamingScheme fileNamingScheme) {
super(new AvroFileSource<T>(path, atype), new AvroFileTarget(path), fileNamingScheme);
}
public AvroFileSourceTarget(Path path, AvroType<T> atype, DatumReader<T> reader, FileNamingScheme fileNamingScheme) {
super(new AvroFileSource<T>(path, atype, reader), new AvroFileTarget(path), fileNamingScheme);
}
@Override
public String toString() {
return target.toString();
}
}
| 2,833 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroFileTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import com.google.common.collect.Maps;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroOutputFormat;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import java.util.Map;
public class AvroFileTarget extends FileTargetImpl {
private Map<String, String> extraConf = Maps.newHashMap();
public AvroFileTarget(String path) {
this(new Path(path));
}
public AvroFileTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public AvroFileTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, AvroOutputFormat.class, fileNamingScheme);
}
@Override
public String toString() {
return "Avro(" + path.toString() + ")";
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
if (!(ptype instanceof AvroType)) {
return false;
}
handler.configure(this, ptype);
return true;
}
@Override
public Target outputConf(String key, String value) {
extraConf.put(key, value);
return this;
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
AvroType<?> atype = (AvroType<?>) ptype;
FormatBundle bundle = FormatBundle.forOutput(AvroOutputFormat.class);
for (Map.Entry<String, String> e : extraConf.entrySet()) {
bundle.set(e.getKey(), e.getValue());
}
bundle.set("avro.output.schema", atype.getSchema().toString());
AvroMode.fromType(atype).configure(bundle);
configureForMapReduce(job, AvroWrapper.class, NullWritable.class, bundle,
outputPath, name);
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
if (ptype instanceof AvroType) {
return new AvroFileSourceTarget<T>(path, (AvroType<T>) ptype).fileSystem(getFileSystem());
}
return null;
}
}
| 2,834 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroPathPerKeyTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import java.io.IOException;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.crunch.impl.mr.plan.PlanningParameters;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroPathPerKeyOutputFormat;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link org.apache.crunch.Target} that wraps {@link org.apache.crunch.types.avro.AvroPathPerKeyOutputFormat} to allow one file
* per key to be written as the output of a {@code PTable<String, T>}.
*
* <p>Note the restrictions that apply to the {@code AvroPathPerKeyOutputFormat}; in particular, it's a good
* idea to write out all of the records for the same key together within each partition of the data.
*/
public class AvroPathPerKeyTarget extends FileTargetImpl {
private static final Logger LOG = LoggerFactory.getLogger(AvroPathPerKeyTarget.class);
public AvroPathPerKeyTarget(String path) {
this(new Path(path));
}
public AvroPathPerKeyTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public AvroPathPerKeyTarget(Path path, FileNamingScheme fileNamingScheme) {
this(path, AvroPathPerKeyOutputFormat.class, fileNamingScheme);
}
protected AvroPathPerKeyTarget(Path path, Class<? extends FileOutputFormat>
outputFormatClass,
FileNamingScheme fileNamingScheme) {
super(path, outputFormatClass, fileNamingScheme);
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
if (ptype instanceof PTableType && ptype instanceof AvroType) {
if (String.class.equals(((PTableType) ptype).getKeyType().getTypeClass())) {
handler.configure(this, ptype);
return true;
}
}
return false;
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
AvroType<?> atype = (AvroType) ((PTableType) ptype).getValueType();
FormatBundle bundle = FormatBundle.forOutput(AvroPathPerKeyOutputFormat.class);
bundle.set("avro.output.schema", atype.getSchema().toString());
AvroMode.fromType(atype).configure(bundle);
configureForMapReduce(job, AvroWrapper.class, NullWritable.class, bundle, outputPath, name);
}
@Override
public void handleOutputs(Configuration conf, Path workingPath, int index) throws IOException {
FileSystem srcFs = workingPath.getFileSystem(conf);
if (index == -1) {
// Map the -1 index from the SparkRuntime to the (correct) out0 value that
// the AvroPathPerKeyTarget expects.
index = 0;
}
Path base = new Path(workingPath, PlanningParameters.MULTI_OUTPUT_PREFIX + index);
if (!srcFs.exists(base)) {
LOG.warn("Nothing to copy from {}", base);
return;
}
FileSystem dstFs = path.getFileSystem(conf);
if (!dstFs.exists(path)) {
dstFs.mkdirs(path);
}
boolean sameFs = isCompatible(srcFs, path);
move(conf, base, srcFs, path, dstFs, sameFs);
dstFs.create(getSuccessIndicator(), true).close();
}
private void move(Configuration conf, Path srcBase, FileSystem srcFs, Path dstBase, FileSystem dstFs, boolean sameFs)
throws IOException {
Path[] keys = FileUtil.stat2Paths(srcFs.listStatus(srcBase));
if (!dstFs.exists(dstBase)) {
dstFs.mkdirs(dstBase);
}
for (Path key : keys) {
Path[] srcs = FileUtil.stat2Paths(srcFs.listStatus(key), key);
Path targetPath = new Path(dstBase, key.getName());
dstFs.mkdirs(targetPath);
for (Path s : srcs) {
if (srcFs.isDirectory(s)) {
Path nextBase = new Path(targetPath, s.getName());
dstFs.mkdirs(nextBase);
move(conf, s, srcFs, nextBase, dstFs, sameFs);
} else {
Path d = getDestFile(conf, s, targetPath, s.getName().contains("-m-"));
if (sameFs) {
srcFs.rename(s, d);
} else {
FileUtil.copy(srcFs, s, dstFs, d, true, true, conf);
}
}
}
}
}
@Override
public String toString() {
return "AvroFilePerKey(" + path + ")";
}
}
| 2,835 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import java.io.IOException;
import java.util.List;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.AvroJob;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.avro.AvroInputFormat;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
public class AvroFileSource<T> extends FileSourceImpl<T> implements ReadableSource<T> {
private static <S> FormatBundle getBundle(AvroType<S> ptype, AvroMode mode) {
FormatBundle bundle = FormatBundle.forInput(AvroInputFormat.class)
.set(AvroJob.INPUT_IS_REFLECT, String.valueOf(ptype.hasReflect()))
.set(AvroJob.INPUT_SCHEMA, ptype.getSchema().toString())
.set(Avros.REFLECT_DATA_FACTORY_CLASS, Avros.REFLECT_DATA_FACTORY.getClass().getName())
.set(RuntimeParameters.DISABLE_COMBINE_FILE, Boolean.FALSE.toString());
mode.configure(bundle);
return bundle;
}
private static <S> FormatBundle getBundle(AvroType<S> ptype) {
return getBundle(ptype, AvroMode.fromType(ptype));
}
private DatumReader<T> reader;
public AvroFileSource(Path path, AvroType<T> ptype) {
super(path, ptype, getBundle(ptype));
}
public AvroFileSource(Path path, AvroType<T> ptype, AvroMode mode) {
super(path, ptype, getBundle(ptype, mode));
}
public AvroFileSource(Path path, AvroType<T> ptype, DatumReader<T> reader) {
super(path, ptype, getBundle(ptype));
this.reader = reader;
}
public AvroFileSource(List<Path> paths, AvroType<T> ptype) {
super(paths, ptype, getBundle(ptype));
}
public AvroFileSource(List<Path> paths, AvroType<T> ptype, AvroMode mode) {
super(paths, ptype, getBundle(ptype, mode));
}
public AvroFileSource(List<Path> paths, AvroType<T> ptype, DatumReader<T> reader) {
super(paths, ptype, getBundle(ptype));
this.reader = reader;
}
@Override
public String toString() {
return "Avro(" + pathsAsString() + ")";
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, getFileReaderFactory((AvroType<T>) ptype));
}
@Override
public ReadableData<T> asReadable() {
return new AvroReadableData<T>(this.paths, (AvroType<T>) ptype);
}
protected AvroFileReaderFactory<T> getFileReaderFactory(AvroType<T> ptype){
return new AvroFileReaderFactory(reader, ptype);
}
}
| 2,836 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
import java.util.List;
public class AvroReadableData<T> extends ReadableDataImpl<T> {
private final AvroType<T> avroType;
public AvroReadableData(List<Path> paths, AvroType<T> avroType) {
super(paths);
this.avroType = avroType;
}
@Override
protected FileReaderFactory<T> getFileReaderFactory() {
return new AvroFileReaderFactory<T>(avroType);
}
}
| 2,837 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import java.io.IOException;
import java.util.Iterator;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.FsInput;
import org.apache.crunch.MapFn;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.AutoClosingIterator;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Iterators;
import com.google.common.collect.UnmodifiableIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AvroFileReaderFactory<T> implements FileReaderFactory<T> {
private static final Logger LOG = LoggerFactory.getLogger(AvroFileReaderFactory.class);
private DatumReader<T> reader;
private final AvroType<?> atype;
private final MapFn<T, T> mapFn;
public AvroFileReaderFactory(Schema schema) {
this(null, Avros.generics(schema));
}
public AvroFileReaderFactory(AvroType<?> atype) {
this(null, atype);
}
public AvroFileReaderFactory(DatumReader<T> reader, AvroType<?> atype) {
this.reader = reader;
this.atype = atype;
this.mapFn = (MapFn<T, T>) atype.getInputMapFn();
}
static <T> DatumReader<T> createDatumReader(AvroType<T> atype) {
return Avros.newReader(atype);
}
@Override
public Iterator<T> read(FileSystem fs, final Path path) {
AvroMode mode = AvroMode.fromType(atype).withFactoryFromConfiguration(fs.getConf());
final DatumReader recordReader = reader == null ? mode.getReader(atype.getSchema()) : reader;
this.mapFn.initialize();
try {
FsInput fsi = new FsInput(path, fs.getConf());
final DataFileReader<T> reader = new DataFileReader<T>(fsi, recordReader);
return new AutoClosingIterator<T>(reader, new UnmodifiableIterator<T>() {
@Override
public boolean hasNext() {
return reader.hasNext();
}
@Override
public T next() {
return mapFn.map(reader.next());
}
});
} catch (IOException e) {
LOG.info("Could not read avro file at path: {}", path, e);
return Iterators.emptyIterator();
}
}
}
| 2,838 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/avro/AvroTableFileSource.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.avro;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSource;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
/**
* A file source for reading a table of Avro keys and values.
*
* This file source can be used for reading and writing tables compatible with
* the {@code org.apache.avro.mapred.AvroJob} and {@code org.apache.avro.mapreduce.AvroJob} classes (in addition to
* tables created by Crunch).
*
* @see org.apache.crunch.types.avro.Avros#tableOf(org.apache.crunch.types.PType, org.apache.crunch.types.PType)
* @see org.apache.crunch.types.avro.Avros#keyValueTableOf(org.apache.crunch.types.PType, org.apache.crunch.types.PType)
*/
public class AvroTableFileSource<K, V> extends AvroFileSource<Pair<K, V>> implements TableSource<K,V> {
public AvroTableFileSource(List<Path> paths, AvroType<Pair<K, V>> tableType) {
super(paths, tableType);
}
@Override
public PTableType<K, V> getTableType() {
return (PTableType<K,V>)super.getType();
}
}
| 2,839 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/NLineFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
/**
* A {@code Source} instance that uses the {@code NLineInputFormat}, which gives each map
* task a fraction of the lines in a text file as input. Most useful when running simulations
* on Hadoop, where each line represents configuration information about each simulation
* run.
*/
public class NLineFileSource<T> extends FileSourceImpl<T> implements ReadableSource<T> {
private static FormatBundle getBundle(int linesPerTask) {
FormatBundle bundle = FormatBundle.forInput(NLineInputFormat.class);
bundle.set(NLineInputFormat.LINES_PER_MAP, String.valueOf(linesPerTask));
bundle.set(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
return bundle;
}
/**
* Create a new {@code NLineFileSource} instance.
*
* @param path The path to the input data, as a String
* @param ptype The PType to use for processing the data
* @param linesPerTask The number of lines from the input each map task will process
*/
public NLineFileSource(String path, PType<T> ptype, int linesPerTask) {
this(new Path(path), ptype, linesPerTask);
}
/**
* Create a new {@code NLineFileSource} instance.
*
* @param path The {@code Path} to the input data
* @param ptype The PType to use for processing the data
* @param linesPerTask The number of lines from the input each map task will process
*/
public NLineFileSource(Path path, PType<T> ptype, int linesPerTask) {
super(path, ptype, getBundle(linesPerTask));
}
/**
* Create a new {@code NLineFileSource} instance.
*
* @param paths The {@code Path}s to the input data
* @param ptype The PType to use for processing the data
* @param linesPerTask The number of lines from the input each map task will process
*/
public NLineFileSource(List<Path> paths, PType<T> ptype, int linesPerTask) {
super(paths, ptype, getBundle(linesPerTask));
}
@Override
public String toString() {
return "NLine(" + pathsAsString() + ")";
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, new TextFileReaderFactory<T>(LineParser.forType(ptype)));
}
@Override
public ReadableData<T> asReadable() {
return new TextReadableData<T>(paths, ptype);
}
}
| 2,840 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.avro.AvroUtf8InputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
public class TextFileSource<T> extends FileSourceImpl<T> implements ReadableSource<T> {
private static <S> Class<? extends FileInputFormat<?, ?>> getInputFormat(PType<S> ptype) {
if (ptype.getFamily().equals(AvroTypeFamily.getInstance())) {
return AvroUtf8InputFormat.class;
} else {
return TextInputFormat.class;
}
}
public TextFileSource(Path path, PType<T> ptype) {
this(Collections.singletonList(path), ptype);
}
public TextFileSource(List<Path> paths, PType<T> ptype) {
super(paths, ptype, getInputFormat(ptype));
inputBundle.set(RuntimeParameters.DISABLE_COMBINE_FILE, Boolean.FALSE.toString());
}
@Override
public String toString() {
return "Text(" + pathsAsString() + ")";
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, new TextFileReaderFactory<T>(LineParser.forType(ptype)));
}
@Override
public ReadableData<T> asReadable() {
return new TextReadableData<T>(paths, ptype);
}
}
| 2,841 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import com.google.common.collect.Maps;
import org.apache.avro.Schema;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroTextOutputFormat;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.util.Map;
public class TextFileTarget extends FileTargetImpl {
private static Class<? extends FileOutputFormat> getOutputFormat(PType<?> ptype) {
if (ptype.getFamily().equals(AvroTypeFamily.getInstance())) {
return AvroTextOutputFormat.class;
} else {
return TextOutputFormat.class;
}
}
private final Map<String, String> extraConf = Maps.newHashMap();
public <T> TextFileTarget(String path) {
this(new Path(path));
}
public <T> TextFileTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public <T> TextFileTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, null, fileNamingScheme);
}
@Override
public Path getPath() {
return path;
}
@Override
public String toString() {
return "Text(" + path + ")";
}
@Override
public Target outputConf(String key, String value) {
extraConf.put(key, value);
return this;
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
Converter converter = ptype.getConverter();
Class keyClass = converter.getKeyClass();
Class valueClass = converter.getValueClass();
FormatBundle fb = FormatBundle.forOutput(getOutputFormat(ptype));
for (Map.Entry<String, String> e : extraConf.entrySet()) {
fb.set(e.getKey(), e.getValue());
}
configureForMapReduce(job, keyClass, valueClass, fb, outputPath, name);
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
if (!isTextCompatible(ptype)) {
return null;
}
if (ptype instanceof PTableType) {
return new TextFileTableSourceTarget(path, (PTableType) ptype).fileSystem(getFileSystem());
}
return new TextFileSourceTarget<T>(path, ptype).fileSystem(getFileSystem());
}
private <T> boolean isTextCompatible(PType<T> ptype) {
if (AvroTypeFamily.getInstance().equals(ptype.getFamily())) {
AvroType<T> at = (AvroType<T>) ptype;
if (at.getSchema().equals(Schema.create(Schema.Type.STRING))) {
return true;
}
} else if (WritableTypeFamily.getInstance().equals(ptype.getFamily())) {
if (ptype instanceof PTableType) {
PTableType ptt = (PTableType) ptype;
return isText(ptt.getKeyType()) && isText(ptt.getValueType());
} else {
return isText(ptype);
}
}
return false;
}
private <T> boolean isText(PType<T> wtype) {
return Text.class.equals(((WritableType) wtype).getSerializationClass());
}
}
| 2,842 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import java.util.List;
public class TextReadableData<T> extends ReadableDataImpl<T> {
private final PType<T> ptype;
private final String sep;
public TextReadableData(List<Path> paths, PType<T> ptype) {
this(paths, ptype, null);
}
public TextReadableData(List<Path> paths, PType<T> ptype, String sep) {
super(paths);
this.ptype = ptype;
this.sep = sep;
}
@Override
protected FileReaderFactory<T> getFileReaderFactory() {
if (sep == null) {
return new TextFileReaderFactory<T>(ptype);
} else {
return new TextFileReaderFactory<T>(LineParser.forTableType(((PTableType) ptype), sep));
}
}
}
| 2,843 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileTableSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import org.apache.crunch.Pair;
import org.apache.crunch.TableSourceTarget;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.fs.Path;
/**
* A {@code TableSource} and {@code SourceTarget} implementation that uses the
* {@code KeyValueTextInputFormat} and {@code TextOutputFormat} to support reading
* and writing text files as {@code PTable} instances using a tab separator for
* the keys and the values.
*/
public class TextFileTableSourceTarget<K, V> extends ReadableSourcePathTargetImpl<Pair<K, V>> implements
TableSourceTarget<K, V> {
private final PTableType<K, V> tableType;
public TextFileTableSourceTarget(String path, PTableType<K, V> tableType) {
this(new Path(path), tableType);
}
public TextFileTableSourceTarget(Path path, PTableType<K, V> tableType) {
this(path, tableType, SequentialFileNamingScheme.getInstance());
}
public TextFileTableSourceTarget(Path path, PTableType<K, V> tableType,
FileNamingScheme fileNamingScheme) {
super(new TextFileTableSource<K, V>(path, tableType), new TextFileTarget(path),
fileNamingScheme);
this.tableType = tableType;
}
@Override
public PTableType<K, V> getTableType() {
return tableType;
}
@Override
public String toString() {
return target.toString();
}
}
| 2,844 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/LineParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.crunch.MapFn;
import org.apache.crunch.Pair;
import org.apache.crunch.fn.CompositeMapFn;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
/**
* An abstraction for parsing the lines of a text file using a {@code PType<T>} to
* convert the lines of text into a given data type.
*
* @param <T> The type returned by the text parsing
*/
abstract class LineParser<T> {
public static <S> LineParser<S> forType(PType<S> ptype) {
return new SimpleLineParser<S>(ptype);
}
public static <K, V> LineParser<Pair<K, V>> forTableType(PTableType<K, V> ptt, String sep) {
return new KeyValueLineParser<K, V>(ptt, sep);
}
private MapFn<String, T> mapFn;
public void initialize() {
mapFn = getMapFn();
mapFn.initialize();
}
public T parse(String line) {
return mapFn.map(line);
}
protected abstract MapFn<String, T> getMapFn();
private static <T> MapFn<String, T> getMapFnForPType(PType<T> ptype) {
MapFn ret = null;
if (String.class.equals(ptype.getTypeClass())) {
ret = (MapFn) IdentityFn.getInstance();
} else {
// Check for a composite MapFn for the PType.
// Note that this won't work for Avro-- need to solve that.
ret = ptype.getInputMapFn();
if (ret instanceof CompositeMapFn) {
ret = ((CompositeMapFn) ret).getSecond();
}
}
return ret;
}
private static class SimpleLineParser<S> extends LineParser<S> {
private final PType<S> ptype;
public SimpleLineParser(PType<S> ptype) {
this.ptype = ptype;
}
@Override
protected MapFn<String, S> getMapFn() {
return getMapFnForPType(ptype);
}
}
private static class KeyValueLineParser<K, V> extends LineParser<Pair<K, V>> {
private final PTableType<K, V> ptt;
private final String sep;
public KeyValueLineParser(PTableType<K, V> ptt, String sep) {
this.ptt = ptt;
this.sep = sep;
}
@Override
protected MapFn<String, Pair<K, V>> getMapFn() {
final MapFn<String, K> keyMapFn = getMapFnForPType(ptt.getKeyType());
final MapFn<String, V> valueMapFn = getMapFnForPType(ptt.getValueType());
return new MapFn<String, Pair<K, V>>() {
@Override
public void initialize() {
keyMapFn.initialize();
valueMapFn.initialize();
}
@Override
public Pair<K, V> map(String input) {
List<String> kv = ImmutableList.copyOf(Splitter.on(sep).limit(1).split(input));
if (kv.size() != 2) {
throw new RuntimeException("Invalid input string: " + input);
}
return Pair.of(keyMapFn.map(kv.get(0)), valueMapFn.map(kv.get(1)));
}
};
}
}
}
| 2,845 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileTableSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.Pair;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.ReadableData;
import org.apache.crunch.io.impl.FileTableSourceImpl;
import org.apache.crunch.types.PTableType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
/**
* A {@code Source} that uses the {@code KeyValueTextInputFormat} to process
* input text. If a separator for the keys and values in the text file is not specified,
* a tab character is used.
*/
public class TextFileTableSource<K, V> extends FileTableSourceImpl<K, V>
implements ReadableSource<Pair<K, V>> {
// CRUNCH-125: Maintain compatibility with both versions of the KeyValueTextInputFormat's
// configuration field for specifying the separator character.
private static final String OLD_KV_SEP = "key.value.separator.in.input.line";
private static final String NEW_KV_SEP = "mapreduce.input.keyvaluelinerecordreader.key.value.separator";
private static FormatBundle getBundle(String sep) {
FormatBundle bundle = FormatBundle.forInput(KeyValueTextInputFormat.class);
bundle.set(OLD_KV_SEP, sep);
bundle.set(NEW_KV_SEP, sep);
return bundle;
}
private final String separator;
public TextFileTableSource(String path, PTableType<K, V> tableType) {
this(new Path(path), tableType);
}
public TextFileTableSource(Path path, PTableType<K, V> tableType) {
this(path, tableType, "\t");
}
public TextFileTableSource(List<Path> paths, PTableType<K, V> tableType) {
this(paths, tableType, "\t");
}
public TextFileTableSource(String path, PTableType<K, V> tableType, String separator) {
this(new Path(path), tableType, separator);
}
public TextFileTableSource(Path path, PTableType<K, V> tableType, String separator) {
super(path, tableType, getBundle(separator));
this.separator = separator;
}
public TextFileTableSource(List<Path> paths, PTableType<K, V> tableType, String separator) {
super(paths, tableType, getBundle(separator));
this.separator = separator;
}
@Override
public String toString() {
return "KeyValueText(" + pathsAsString() + ")";
}
@Override
public Iterable<Pair<K, V>> read(Configuration conf) throws IOException {
return read(conf,
new TextFileReaderFactory<Pair<K, V>>(LineParser.forTableType(getTableType(),
separator)));
}
@Override
public ReadableData<Pair<K, V>> asReadable() {
return new TextReadableData<Pair<K, V>>(paths, getTableType(), separator);
}
}
| 2,846 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
public class TextFileSourceTarget<T> extends ReadableSourcePathTargetImpl<T> {
public TextFileSourceTarget(String path, PType<T> ptype) {
this(new Path(path), ptype);
}
public TextFileSourceTarget(Path path, PType<T> ptype) {
this(path, ptype, SequentialFileNamingScheme.getInstance());
}
public TextFileSourceTarget(Path path, PType<T> ptype, FileNamingScheme fileNamingScheme) {
super(new TextFileSource<T>(path, ptype), new TextFileTarget(path), fileNamingScheme);
}
@Override
public String toString() {
return target.toString();
}
}
| 2,847 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextPathPerKeyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.DataOutputStream;
import java.io.IOException;
public class TextPathPerKeyOutputFormat<V> extends TextOutputFormat<Text, V> {
@Override
public RecordWriter<Text, V> getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException {
Configuration conf = taskAttemptContext.getConfiguration();
FileOutputCommitter outputCommitter = (FileOutputCommitter) getOutputCommitter(taskAttemptContext);
Path basePath = new Path(outputCommitter.getWorkPath(), conf.get("mapreduce.output.basename", "part"));
boolean isCompressed = FileOutputFormat.getCompressOutput(taskAttemptContext);
CompressionCodec codec = null;
String extension = "";
if (isCompressed) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(taskAttemptContext, GzipCodec.class);
codec = ReflectionUtils.newInstance(codecClass, conf);
extension = codec.getDefaultExtension();
}
return new TextPathPerKeyRecordWriter<>(basePath, getUniqueFile(taskAttemptContext, "part", extension),
isCompressed, codec, taskAttemptContext);
}
private class TextPathPerKeyRecordWriter<V> extends RecordWriter<Text, V> {
private final Path basePath;
private final String uniqueFileName;
private final Configuration conf;
private String currentKey;
private RecordWriter<V, NullWritable> currentWriter;
private CompressionCodec compressionCodec;
private boolean isCompressed;
private TaskAttemptContext taskAttemptContext;
public TextPathPerKeyRecordWriter(Path basePath, String uniqueFileName, boolean isCompressed,
CompressionCodec codec, TaskAttemptContext context) {
this.basePath = basePath;
this.uniqueFileName = uniqueFileName;
this.conf = context.getConfiguration();
this.isCompressed = isCompressed;
this.compressionCodec = codec;
this.taskAttemptContext = context;
}
@Override
public void write(Text record, V n) throws IOException, InterruptedException {
String key = record.toString();
if (!key.equals(currentKey)) {
if (currentWriter != null) {
currentWriter.close(taskAttemptContext);
}
currentKey = key;
Path dir = new Path(basePath, key);
FileSystem fs = dir.getFileSystem(conf);
if (!fs.exists(dir)) {
fs.mkdirs(dir);
}
Path filePath = new Path(dir, uniqueFileName);
DataOutputStream dataOutputStream;
if (fs.exists(filePath)) {
dataOutputStream = fs.append(filePath);
} else {
dataOutputStream = fs.create(filePath);
}
if (isCompressed && compressionCodec != null) {
dataOutputStream = new DataOutputStream(compressionCodec.createOutputStream(dataOutputStream));
}
String keyValueSeparator = conf.get(SEPERATOR, "\t");
currentWriter = new LineRecordWriter<>(dataOutputStream, keyValueSeparator);
}
currentWriter.write(n, NullWritable.get());
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
if (currentWriter != null) {
currentWriter.close(taskAttemptContext);
currentKey = null;
currentWriter = null;
}
}
}
}
| 2,848 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextPathPerKeyTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import com.google.common.collect.Maps;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mr.plan.PlanningParameters;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
public class TextPathPerKeyTarget extends FileTargetImpl {
private Map<String, String> extraConf = Maps.newHashMap();
private static final Logger LOG = LoggerFactory.getLogger(TextPathPerKeyTarget.class);
public TextPathPerKeyTarget(String path) {
this(new Path(path));
}
public TextPathPerKeyTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public TextPathPerKeyTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, TextPathPerKeyOutputFormat.class, fileNamingScheme);
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
if (ptype instanceof PTableType && ptype.getFamily() == WritableTypeFamily.getInstance()) {
if (String.class.equals(((PTableType) ptype).getKeyType().getTypeClass())) {
handler.configure(this, ptype);
return true;
}
}
return false;
}
@Override
public Target outputConf(String key, String value) {
extraConf.put(key, value);
return this;
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
FormatBundle bundle = FormatBundle.forOutput(TextPathPerKeyOutputFormat.class);
for (Map.Entry<String, String> e : extraConf.entrySet()) {
bundle.set(e.getKey(), e.getValue());
}
Converter converter = ((PTableType) ptype).getValueType().getConverter();
Class valueClass = converter.getValueClass();
configureForMapReduce(job, valueClass, NullWritable.class, bundle, outputPath, name);
}
@Override
public void handleOutputs(Configuration conf, Path workingPath, int index) throws IOException {
FileSystem srcFs = workingPath.getFileSystem(conf);
Path base = new Path(workingPath, PlanningParameters.MULTI_OUTPUT_PREFIX + index);
if (!srcFs.exists(base)) {
LOG.warn("Nothing to copy from {}", base);
return;
}
FileSystem dstFs = path.getFileSystem(conf);
if (!dstFs.exists(path)) {
dstFs.mkdirs(path);
}
boolean sameFs = isCompatible(srcFs, path);
move(conf, base, srcFs, path, dstFs, sameFs);
dstFs.create(getSuccessIndicator(), true).close();
}
private void move(Configuration conf, Path srcBase, FileSystem srcFs, Path dstBase, FileSystem dstFs, boolean sameFs)
throws IOException {
Path[] keys = FileUtil.stat2Paths(srcFs.listStatus(srcBase));
if (!dstFs.exists(dstBase)) {
dstFs.mkdirs(dstBase);
}
for (Path key : keys) {
Path[] srcs = FileUtil.stat2Paths(srcFs.listStatus(key), key);
Path targetPath = new Path(dstBase, key.getName());
dstFs.mkdirs(targetPath);
for (Path s : srcs) {
if (srcFs.isDirectory(s)) {
move(conf, key, srcFs, targetPath, dstFs, sameFs);
} else {
Path d = getDestFile(conf, s, targetPath, s.getName().contains("-m-"));
if (sameFs) {
srcFs.rename(s, d);
} else {
FileUtil.copy(srcFs, s, dstFs, d, true, true, conf);
}
}
}
}
}
@Override
public String toString() {
return "TextFilePerKey(" + path + ")";
}
}
| 2,849 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/TextFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.Iterator;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.AutoClosingIterator;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Iterators;
import com.google.common.collect.UnmodifiableIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TextFileReaderFactory<T> implements FileReaderFactory<T> {
private static final Logger LOG = LoggerFactory.getLogger(TextFileReaderFactory.class);
private final LineParser<T> parser;
public TextFileReaderFactory(PType<T> ptype) {
this(LineParser.forType(ptype));
}
public TextFileReaderFactory(LineParser<T> parser) {
this.parser = parser;
}
@Override
public Iterator<T> read(FileSystem fs, Path path) {
parser.initialize();
FSDataInputStream is;
try {
is = fs.open(path);
} catch (IOException e) {
LOG.info("Could not read path: {}", path, e);
return Iterators.emptyIterator();
}
final BufferedReader reader = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8")));
return new AutoClosingIterator<T>(reader, new UnmodifiableIterator<T>() {
boolean nextChecked = false;
private String nextLine;
@Override
public boolean hasNext() {
if (nextChecked) {
return nextLine != null;
}
try {
nextChecked = true;
return (nextLine = reader.readLine()) != null;
} catch (IOException e) {
LOG.info("Exception reading text file stream", e);
return false;
}
}
@Override
public T next() {
if (!nextChecked && !hasNext()) {
return null;
}
nextChecked = false;
return parser.parse(nextLine);
}
});
}
}
| 2,850 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/xml/XmlSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.xml;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Charsets;
/**
* Large XML documents composed of repetitive XML elements can be broken into chunks delimited by element's start and
* end tag. The {@link XmlSource} process XML files and extract out the XML between the pre-configured start / end
* tags. Developer should process the content between the tags.
*
* The {@link XmlSource} does not parse the input XML files and is not aware of the XML semantics. It just splits the
* input file in chunks defined by the start/end tags. Nested XML elements are not supported.
*/
public class XmlSource extends FileSourceImpl<String> {
/**
* Create new XML data loader using the UTF-8 encoding.
*
* @param inputPath
* Input XML file location
* @param tagStart
* Elements's start tag
* @param tagEnd
* Elements's end tag
*/
public XmlSource(String inputPath, String tagStart, String tagEnd) {
this(inputPath, tagStart, tagEnd, Charsets.UTF_8.name());
}
/**
* Create new XML data loader using the specified encoding.
*
* @param inputPath
* Input XML file location
* @param tagStart
* Elements's start tag
* @param tagEnd
* Elements's end tag
* @param encoding
* Input file encoding
*/
public XmlSource(String inputPath, String tagStart, String tagEnd, String encoding) {
super(new Path(inputPath),
Writables.strings(),
FormatBundle.forInput(XmlInputFormat.class)
.set(XmlInputFormat.START_TAG_KEY, tagStart)
.set(XmlInputFormat.END_TAG_KEY, tagEnd)
.set(XmlInputFormat.ENCODING, encoding));
}
}
| 2,851 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/xml/XmlInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.xml;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetEncoder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
/**
* Reads records that are delimited by a specific begin/end tag.
*
* The {@link XmlInputFormat} extends the Mahout's XmlInputFormat implementation providing encoding support
*/
public class XmlInputFormat extends TextInputFormat {
private static final Logger log = LoggerFactory.getLogger(XmlInputFormat.class);
public static final String START_TAG_KEY = "xmlinput.start";
public static final String END_TAG_KEY = "xmlinput.end";
public static final String ENCODING = "xml.encoding";
@Override
public RecordReader<LongWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) {
try {
return new XmlRecordReader((FileSplit) split, context.getConfiguration());
} catch (IOException ioe) {
log.warn("Error while creating XmlRecordReader", ioe);
return null;
}
}
/**
* XMLRecordReader class to read through a given xml document to output xml blocks as records as specified by the
* start tag and end tag.
*/
public static class XmlRecordReader extends RecordReader<LongWritable, Text> {
private static final String DEFAULT_ENCODING = Charsets.UTF_8.name();
private final char[] startTag;
private final char[] endTag;
private final long start;
private final long end;
private LongWritable currentKey;
private Text currentValue;
private final DataOutputBuffer outBuffer;
private final BufferedReader inReader;
private final OutputStreamWriter outWriter;
private final String inputEncoding;
private long readByteCounter;
private CharsetEncoder charsetEncoder;
public XmlRecordReader(FileSplit split, Configuration conf) throws IOException {
inputEncoding = conf.get(ENCODING, DEFAULT_ENCODING);
startTag = new String(conf.get(START_TAG_KEY).getBytes(inputEncoding), inputEncoding).toCharArray();
endTag = new String(conf.get(END_TAG_KEY).getBytes(inputEncoding), inputEncoding).toCharArray();
// open the file and seek to the start of the split
start = split.getStart();
end = start + split.getLength();
Path file = split.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream fsin = fs.open(split.getPath());
fsin.seek(start);
readByteCounter = start;
inReader = new BufferedReader(new InputStreamReader(fsin, Charset.forName(inputEncoding)));
outBuffer = new DataOutputBuffer();
outWriter = new OutputStreamWriter(outBuffer, inputEncoding);
charsetEncoder = Charset.forName(inputEncoding).newEncoder();
}
private boolean next(LongWritable key, Text value) throws IOException {
if (readByteCounter < end && readUntilMatch(startTag, false)) {
try {
outWriter.write(startTag);
if (readUntilMatch(endTag, true)) {
key.set(readByteCounter);
outWriter.flush();
value.set(toUTF8(outBuffer.getData()), 0, outBuffer.getLength());
return true;
}
} finally {
outWriter.flush();
outBuffer.reset();
}
}
return false;
}
private byte[] toUTF8(byte[] in) throws UnsupportedEncodingException {
return new String(in, inputEncoding).getBytes(Charsets.UTF_8);
}
@Override
public void close() throws IOException {
inReader.close();
}
@Override
public float getProgress() throws IOException {
return (readByteCounter - start) / (float) (end - start);
}
private boolean readUntilMatch(char[] match, boolean withinBlock) throws IOException {
int i = 0;
while (true) {
int nextInCharacter = inReader.read();
readByteCounter = readByteCounter + calculateCharacterByteLength((char) nextInCharacter);
// end of file:
if (nextInCharacter == -1) {
return false;
}
// save to buffer:
if (withinBlock) {
outWriter.write(nextInCharacter);
}
// check if we're matching:
if (nextInCharacter == match[i]) {
i++;
if (i >= match.length) {
return true;
}
} else {
i = 0;
}
// see if we've passed the stop point
if (!withinBlock && i == 0 && readByteCounter >= end) {
return false;
}
}
}
@Override
public LongWritable getCurrentKey() throws IOException, InterruptedException {
return currentKey;
}
@Override
public Text getCurrentValue() throws IOException, InterruptedException {
return currentValue;
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
currentKey = new LongWritable();
currentValue = new Text();
return next(currentKey, currentValue);
}
private int calculateCharacterByteLength(final char character) {
try {
return charsetEncoder.encode(CharBuffer.wrap(new char[] { character })).limit();
} catch (final CharacterCodingException e) {
throw new RuntimeException("The character attempting to be read (" + character + ") could not be encoded with "
+ inputEncoding);
}
}
}
}
| 2,852 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* A {@code Source} instance that uses the {@code CSVInputFormat}, which gives
* each map task one single CSV record, regardless of how many lines it may
* span.
*/
public class CSVFileSource extends FileSourceImpl<String> implements ReadableSource<String> {
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s buffer size
*/
public static final String CSV_BUFFER_SIZE = "csv.buffersize";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s input file encoding
*/
public static final String CSV_INPUT_FILE_ENCODING = "csv.inputfileencoding";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s open quote character
*/
public static final String CSV_OPEN_QUOTE_CHAR = "csv.openquotechar";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s close quote character
*/
public static final String CSV_CLOSE_QUOTE_CHAR = "csv.closequotechar";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s escape character
*/
public static final String CSV_ESCAPE_CHAR = "csv.escapechar";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s maximum record size. If this is not
* set, INPUT_SPLIT_SIZE will be checked first, and if that is not set, 64mb
* will be assumed.
*/
public static final String MAXIMUM_RECORD_SIZE = "csv.maximumrecordsize";
/**
* The key used in the {@code CSVInputFormat}'s {@code FormatBundle} to set
* the underlying {@code CSVLineReader}'s input split size. If it is not set,
* 64mb will be assumed.
*/
public static final String INPUT_SPLIT_SIZE = "csv.inputsplitsize";
private int bufferSize;
private String inputFileEncoding;
private char openQuoteChar;
private char closeQuoteChar;
private char escapeChar;
private int maximumRecordSize;
/**
* Create a new CSVFileSource instance
*
* @param paths
* The {@code Path} to the input data
*/
public CSVFileSource(final List<Path> paths) {
this(paths, CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Create a new CSVFileSource instance
*
* @param path
* The {@code Path} to the input data
*/
public CSVFileSource(final Path path) {
this(path, CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Create a new CSVFileSource instance with all configurable options.
*
* @param paths
* A list of {@code Path}s to be used as input data.
* @param bufferSize
* The size of the buffer to be used in the underlying
* {@code CSVLineReader}
* @param inputFileEncoding
* The the encoding of the input file to be read by the underlying
* {@code CSVLineReader}
* @param openQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param closeQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param escapeChar
* The character representing the escape character to be used in the
* underlying {@code CSVLineReader}
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
*/
public CSVFileSource(final List<Path> paths, final int bufferSize, final String inputFileEncoding,
final char openQuoteChar, final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
super(paths, Writables.strings(), getCSVBundle(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar,
escapeChar, maximumRecordSize));
setPrivateVariables(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar, escapeChar, maximumRecordSize);
}
/**
* Create a new CSVFileSource instance with all configurable options.
*
* @param path
* The {@code Path} to the input data
* @param bufferSize
* The size of the buffer to be used in the underlying
* {@code CSVLineReader}
* @param inputFileEncoding
* The the encoding of the input file to be read by the underlying
* {@code CSVLineReader}
* @param openQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param closeQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param escapeChar
* The character representing the escape character to be used in the
* underlying {@code CSVLineReader}
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
*/
public CSVFileSource(final Path path, final int bufferSize, final String inputFileEncoding, final char openQuoteChar,
final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
super(path, Writables.strings(), getCSVBundle(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar,
escapeChar, maximumRecordSize));
setPrivateVariables(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar, escapeChar, maximumRecordSize);
}
@Override
public Iterable<String> read(final Configuration conf) throws IOException {
return read(conf, new CSVFileReaderFactory(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar,
escapeChar, maximumRecordSize));
}
@Override
public ReadableData<String> asReadable() {
return new CSVReadableData(paths, bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar, escapeChar,
maximumRecordSize);
}
@Override
public String toString() {
return "CSV(" + pathsAsString() + ")";
}
/**
* Configures the job with any custom options. These will be retrieved later
* by {@code CSVInputFormat}
*/
private static FormatBundle<CSVInputFormat> getCSVBundle(final int bufferSize, final String inputFileEncoding,
final char openQuoteChar, final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
final FormatBundle<CSVInputFormat> bundle = FormatBundle.forInput(CSVInputFormat.class);
bundle.set(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
bundle.set(CSV_BUFFER_SIZE, String.valueOf(bufferSize));
bundle.set(CSV_INPUT_FILE_ENCODING, String.valueOf(inputFileEncoding));
bundle.set(CSV_OPEN_QUOTE_CHAR, String.valueOf(openQuoteChar));
bundle.set(CSV_CLOSE_QUOTE_CHAR, String.valueOf(closeQuoteChar));
bundle.set(CSV_ESCAPE_CHAR, String.valueOf(escapeChar));
bundle.set(MAXIMUM_RECORD_SIZE, String.valueOf(maximumRecordSize));
return bundle;
}
private void setPrivateVariables(final int bufferSize, final String inputFileEncoding, final char openQuoteChar,
final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
if (isSameCharacter(openQuoteChar, escapeChar)) {
throw new IllegalArgumentException("The open quote (" + openQuoteChar + ") and escape (" + escapeChar
+ ") characters must be different!");
}
if (isSameCharacter(closeQuoteChar, escapeChar)) {
throw new IllegalArgumentException("The close quote (" + closeQuoteChar + ") and escape (" + escapeChar
+ ") characters must be different!");
}
this.bufferSize = bufferSize;
this.inputFileEncoding = inputFileEncoding;
this.openQuoteChar = openQuoteChar;
this.closeQuoteChar = closeQuoteChar;
this.escapeChar = escapeChar;
this.maximumRecordSize = maximumRecordSize;
}
private boolean isSameCharacter(final char c1, final char c2) {
return c2 == c1;
}
}
| 2,853 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVLineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetEncoder;
import javax.annotation.ParametersAreNonnullByDefault;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
/**
* A record reader written specifically to read individual lines from CSV files.
* Most notably, it can read CSV records which span multiple lines.
*/
@ParametersAreNonnullByDefault
public class CSVLineReader {
private static final Logger LOGGER = LoggerFactory.getLogger(CSVLineReader.class);
// InputStream related variables
/**
* The default buffer size (64k) to be used when reading from the InputStream
*/
public static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
private final InputStreamReader inputStreamReader;
private final String inputFileEncoding;
private final CharsetEncoder charsetEncoder;
private char[] buffer;
private final int bufferSize;
private int bufferLength = 0;
private int bufferPosition = 0;
private boolean bufferIsPadded = false;
private static char CR = '\r';
private static char LF = '\n';
private boolean endOfFile = false;
// CSV parsing related variables
/**
* The default character to represent quotation marks, '"'
*/
public static final char DEFAULT_QUOTE_CHARACTER = '"';
/**
* The default character to represent an escape used before a control
* character that should be displayed, '\'
*/
public static final char DEFAULT_ESCAPE_CHARACTER = '\\';
/**
* The default character to represent a null character, '\0'
*/
public static final char NULL_CHARACTER = '\0';
/**
* The default input file encoding to read with, UTF-8
*/
public static final String DEFAULT_INPUT_FILE_ENCODING = "UTF-8";
/**
* The default input maximum record size
*/
public static final int DEFAULT_MAXIMUM_RECORD_SIZE = 67108864;
private final int maximumRecordSize;
private final char openQuoteChar;
private final char closeQuoteChar;
private final char escape;
private boolean inMultiLine = false;
private boolean currentlyInQuotes = false;
private boolean endOfLineReached = false;
private Text inputText = new Text();
/**
* This constructor will use default values for buffer size and control
* characters.
*
* @param inputStream
* The @{link InputStream} to read from. Note that this input stream
* should start at the very beginning of the CSV file to be read OR
* at the very beginning of a CSV entry. If the input stream starts
* at any other position (such as in the middle of a line) this
* reader will not work properly.
* @throws UnsupportedEncodingException
*/
public CSVLineReader(final InputStream inputStream) throws UnsupportedEncodingException {
this(inputStream, DEFAULT_BUFFER_SIZE, DEFAULT_INPUT_FILE_ENCODING, DEFAULT_QUOTE_CHARACTER,
DEFAULT_QUOTE_CHARACTER, DEFAULT_ESCAPE_CHARACTER, DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* The fully customizable constructor for CSVLineReader
*
* @param inputStream
* The @{link InputStream} to read from. Note that this input stream
* should start at the very beginning of the CSV file to be read OR
* at the very beginning of a CSV entry. If the input stream starts
* at any other position (such as in the middle of a line) this
* reader will not work properly.
* @param bufferSize
* The size of the buffer used when reading the input stream
* @param inputFileEncoding
* The encoding of the file to read from.
* @param openQuoteChar
* Used to specify a custom open quote character
* @param closeQuoteChar
* Used to specify a custom close quote character
* @param escapeChar
* Used to specify a custom escape character
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* parsing will stop and an exception will be thrown.
* @throws UnsupportedEncodingException
*/
public CSVLineReader(final InputStream inputStream, final int bufferSize, final String inputFileEncoding,
final char openQuoteChar, final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
Preconditions.checkNotNull(inputStream, "inputStream may not be null");
Preconditions.checkNotNull(inputFileEncoding, "inputFileEncoding may not be null");
if (bufferSize <= 0) {
throw new IllegalArgumentException("The buffer (" + bufferSize + ")cannot be <= 0");
}
// Input Stream related variables
try {
this.inputStreamReader = new InputStreamReader(inputStream, inputFileEncoding);
} catch (final UnsupportedEncodingException uee) {
throw new RuntimeException(inputFileEncoding + " is not a supported encoding.", uee);
}
this.bufferSize = bufferSize;
this.buffer = new char[this.bufferSize];
// CSV parsing related variables
if (isSameCharacter(openQuoteChar, escapeChar)) {
throw new IllegalArgumentException("The open quote (" + openQuoteChar + ") and escape (" + escapeChar
+ ") characters must be different!");
}
if (isSameCharacter(closeQuoteChar, escapeChar)) {
throw new IllegalArgumentException("The close quote (" + closeQuoteChar + ") and escape (" + escapeChar
+ ") characters must be different!");
}
this.openQuoteChar = openQuoteChar;
this.closeQuoteChar = closeQuoteChar;
this.escape = escapeChar;
this.inputFileEncoding = inputFileEncoding;
this.charsetEncoder = Charset.forName(inputFileEncoding).newEncoder();
this.maximumRecordSize = maximumRecordSize;
}
/**
* This method will read through one full CSV record, place its content into
* the input Text and return the number of bytes (including newline
* characters) that were consumed.
*
* @param input
* a mutable @{link Text} object into which the text of the CSV
* record will be stored, without any line feeds or carriage returns
* @return the number of byes that were read, including any control
* characters, line feeds, or carriage returns.
* @throws IOException
* if an IOException occurs while handling the file to be read
*/
public int readCSVLine(final Text input) throws IOException {
Preconditions.checkNotNull(input, "inputText may not be null");
inputText = new Text(input);
long totalBytesConsumed = 0;
if (endOfFile) {
return 0;
}
if (inMultiLine) {
throw new RuntimeException("Cannot begin reading a CSV record while inside of a multi-line CSV record.");
}
final StringBuilder stringBuilder = new StringBuilder();
do {
// Read a line from the file and add it to the builder
inputText.clear();
totalBytesConsumed += readFileLine(inputText);
stringBuilder.append(inputText.toString());
if (currentlyInQuotes && !endOfFile) {
// If we end up in a multi-line record, we need append a newline
stringBuilder.append('\n');
// Do a check on the total bytes consumed to see if something has gone
// wrong.
if (totalBytesConsumed > maximumRecordSize || totalBytesConsumed > Integer.MAX_VALUE) {
final String record = stringBuilder.toString();
LOGGER.error("Possibly malformed file encountered. First line of record: {}",
record.substring(0, record.indexOf('\n')));
throw new IOException("Possibly malformed file encountered. Check log statements for more information");
}
}
} while (currentlyInQuotes && !endOfFile);
// Set the input to the multi-line record
input.set(stringBuilder.toString());
return (int) totalBytesConsumed;
}
/**
* A method for reading through one single line in the CSV file, that is, it
* will read until the first line feed, carriage return, or set of both is
* found. The CSV parsing logic markers are maintained outside of this method
* to enable the manipulation that logic in order to find the beginning of a
* CSV record. Use {@link CSVLineReader#isInMultiLine()} and
* {@link CSVLineReader#resetMultiLine()} to do so. See
* {@link CSVInputFormat#getSplitsForFile(long, long, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.FSDataInputStream)}
* for an example.
*
* @param input
* a mutable @{link Text} object into which the text of the line will
* be stored, without any line feeds or carriage returns
* @return the number of byes that were read, including any control
* characters, line feeds, or carriage returns.
* @throws IOException
* if an IOException occurs while handling the file to be read
*/
public int readFileLine(final Text input) throws IOException {
Preconditions.checkNotNull(input, "inputText may not be null");
if (endOfFile) {
return 0;
}
// This integer keeps track of the number of newline characters used to
// terminate the line being read. This could be 1, in the case of LF or CR,
// or 2, in the case of CRLF.
int newlineLength = 0;
int inputTextLength = 0;
long bytesConsumed = 0;
int readTextLength = 0;
int startPosition = bufferPosition;
endOfLineReached = false;
inputText = new Text(input);
do {
boolean checkForLF = false;
// Figure out where we are in the buffer and fill it if necessary.
if (bufferPosition >= bufferLength) {
refillBuffer();
startPosition = bufferPosition;
if (endOfFile) {
break;
}
}
newlineLength = 0;
// Iterate through the buffer looking for newline characters while keeping
// track of if we're in a field and/or in quotes.
for (; bufferPosition < bufferLength; ++bufferPosition) {
bytesConsumed += calculateCharacterByteLength(buffer[bufferPosition]);
if (buffer[bufferPosition] == this.escape) {
if (isNextCharacterEscapable(currentlyInQuotes, bufferPosition)) {
// checks to see if we are in quotes and if the next character is a
// quote or an escape
// character. If so, that's fine. Record the next character's size
// and skip it.
++bufferPosition;
bytesConsumed += calculateCharacterByteLength(buffer[bufferPosition]);
}
} else if (buffer[bufferPosition] == openQuoteChar || buffer[bufferPosition] == closeQuoteChar) {
// toggle currentlyInQuotes if we've hit a non-escaped quote character
currentlyInQuotes = !currentlyInQuotes;
} else if (buffer[bufferPosition] == LF || buffer[bufferPosition] == CR) {
boolean lastCharWasCR = buffer[bufferPosition] == CR;
// Line is over, make note and increment the size of the newlinelength
// counter.
endOfLineReached = true;
++newlineLength;
++bufferPosition;
if (lastCharWasCR && buffer[bufferPosition] == LF) {
lastCharWasCR = false;
// Check for LF (in case of CRLF line endings) and increment the
// counter, skip it by moving the buffer position, then record the
// length of the LF.
++newlineLength;
++bufferPosition;
bytesConsumed += calculateCharacterByteLength(buffer[bufferPosition]);
} else if (lastCharWasCR && bufferPosition >= bufferLength) {
// We just read a CR at the very end of the buffer. If this is a
// file with CRLF line endings, there will be a LF next that we need
// to check for and account for in bytesRead before we count this
// line as "read".
checkForLF = true;
}
break;
}
}
// This is the length of the actual text and important stuff in the line.
readTextLength = bufferPosition - startPosition - newlineLength;
// Append the results.
if (readTextLength > Integer.MAX_VALUE - inputTextLength) {
readTextLength = Integer.MAX_VALUE - inputTextLength;
}
if (readTextLength > 0) {
// This will append the portion of the buffer containing only the
// important text, omitting any newline characters
inputText.set(new StringBuilder().append(inputText.toString())
.append(new String(buffer, startPosition, readTextLength)).toString());
inputTextLength += readTextLength;
}
// If the last character we read was a CR at the end of the buffer, we
// need to check for an LF after a buffer refill.
if (checkForLF) {
refillBuffer();
if (endOfFile) {
break;
}
if (buffer[bufferPosition] == LF) {
bytesConsumed += calculateCharacterByteLength(buffer[bufferPosition]);
++bufferPosition;
++newlineLength;
}
}
} while (newlineLength == 0 && bytesConsumed < Integer.MAX_VALUE);
if (endOfLineReached) {
if (currentlyInQuotes) {
inMultiLine = true;
} else {
inMultiLine = false;
}
}
if (bytesConsumed > Integer.MAX_VALUE) {
throw new IOException("Too many bytes consumed before newline: " + Integer.MAX_VALUE);
}
input.set(inputText);
return (int) bytesConsumed;
}
/**
* For use with {@link CSVLineReader#readFileLine(Text)}. Returns current
* multi-line CSV status.
*
* @return a boolean signifying if the last
* {@link CSVLineReader#readFileLine(Text)} call ended in the middle
* of a multi-line CSV record
*/
public boolean isInMultiLine() {
return inMultiLine;
}
/**
* For use with {@link CSVLineReader#readFileLine(Text)}. Resets current
* multi-line CSV status.
*/
public void resetMultiLine() {
inMultiLine = false;
currentlyInQuotes = false;
}
private boolean isSameCharacter(final char c1, final char c2) {
return c1 != NULL_CHARACTER && c1 == c2;
}
private boolean isNextCharacterEscapable(final boolean inQuotes, final int i) {
return inQuotes // we are in quotes, therefore there can be escaped quotes
// in here.
&& buffer.length > (i + 1) // there is indeed another character to
// check.
&& (buffer[i + 1] == closeQuoteChar || buffer[i + 1] == openQuoteChar || buffer[i + 1] == this.escape);
}
private void refillBuffer() throws IOException {
bufferPosition = 0;
// Undo the buffer padding
if (bufferIsPadded) {
buffer = new char[bufferLength];
bufferIsPadded = false;
}
bufferLength = inputStreamReader.read(buffer, 0, buffer.length);
// if bufferLength < bufferSize, this buffer will contain the end of the
// file. However, our line logic needs to be able to see what's a few spots
// past the current position. This will cause an index out of bounds
// exception if the buffer is full. So, my solution is to add a few extra
// spaces to the buffer so that the logic can still read ahead.
if (buffer.length == bufferLength) {
final char[] biggerBuffer = new char[bufferLength + 3];
for (int i = 0; i < bufferLength; i++) {
biggerBuffer[i] = buffer[i];
}
buffer = biggerBuffer;
bufferIsPadded = true;
}
if (bufferLength <= 0) {
endOfFile = true;
}
}
private int calculateCharacterByteLength(final char character) {
try {
return charsetEncoder.encode(CharBuffer.wrap(new char[] { character })).limit();
} catch (final CharacterCodingException e) {
throw new RuntimeException("The character attempting to be read (" + character + ") could not be encoded with "
+ inputFileEncoding);
}
}
}
| 2,854 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVRecordIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.util.Iterator;
/**
* An {@code Iterator} for an internally created {@code CSVLineReader}
*/
public class CSVRecordIterator implements Iterator<String>, Closeable {
private static final Log LOG = LogFactory.getLog(CSVRecordIterator.class);
private final CSVLineReader csvLineReader;
private InputStream inputStream;
private String currentLine;
/**
* Creates an instance of {@code CSVRecordIterator} with default configuration
*
* @param inputStream
* The {@code InputStream} for the CSV file to iterate over
* @throws UnsupportedEncodingException
*/
public CSVRecordIterator(final InputStream inputStream) throws UnsupportedEncodingException {
this(inputStream, CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Creates an instance of {@code CSVRecordIterator} with custom configuration
*
* @param inputStream
* The {@code InputStream} for the CSV file to iterate over
* @param bufferSize
* The size of the buffer used when reading the input stream
* @param inputFileEncoding
* the encoding for the input file
* @param openQuoteChar
* the character to use to open quote blocks
* @param closeQuoteChar
* the character to use to close quote blocks
* @param escapeChar
* the character to use for escaping control characters and quotes
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
* @throws UnsupportedEncodingException
*/
public CSVRecordIterator(final InputStream inputStream, final int bufferSize, final String inputFileEncoding,
final char openQuoteChar, final char closeQuoteChar, final char escapeChar, final int maximumRecordSize)
throws UnsupportedEncodingException {
csvLineReader = new CSVLineReader(inputStream, bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar,
escapeChar, maximumRecordSize);
this.inputStream = inputStream;
incrementValue();
}
@Override
public boolean hasNext() {
if (!(currentLine == null)) {
return true;
}
try {
this.close();
} catch (IOException e) {
LOG.error("Failed to close CSVRecordIterator", e);
}
return false;
}
@Override
public String next() {
final String result = currentLine;
incrementValue();
return result;
}
@Override
public void remove() {
incrementValue();
}
private void incrementValue() {
final Text tempText = new Text();
try {
csvLineReader.readCSVLine(tempText);
} catch (final IOException e) {
throw new RuntimeException("A problem occurred accessing the underlying CSV file stream.", e);
}
final String tempTextAsString = tempText.toString();
if ("".equals(tempTextAsString)) {
currentLine = null;
} else {
currentLine = tempTextAsString;
}
}
@Override
public void close() throws IOException {
if (inputStream != null) {
inputStream.close();
inputStream = null;
}
}
}
| 2,855 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
/**
* An extension of {@link RecordReader} used to intelligently read CSV files
*/
public class CSVRecordReader extends RecordReader<LongWritable, Text> {
private static final Logger LOGGER = LoggerFactory.getLogger(CSVRecordReader.class);
private long start;
private long pos;
private long end;
private LongWritable key = null;
private Text value = null;
private InputStream fileIn;
private CSVLineReader csvLineReader;
private final char openQuote;
private final char closeQuote;
private final char escape;
private final String inputFileEncoding;
private final int fileStreamBufferSize;
private final int maximumRecordSize;
private int totalRecordsRead = 0;
/**
* Default constructor, specifies default values for the {@link CSVLineReader}
*/
public CSVRecordReader() {
this(CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Customizable constructor used to specify all input parameters for the
* {@link CSVLineReader}
*
* @param bufferSize
* the size of the buffer to use while parsing through the input file
* @param inputFileEncoding
* the encoding for the input file
* @param openQuote
* the character to use to open quote blocks
* @param closeQuote
* the character to use to close quote blocks
* @param escape
* the character to use for escaping control characters and quotes
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
*/
public CSVRecordReader(final int bufferSize, final String inputFileEncoding, final char openQuote,
final char closeQuote, final char escape, final int maximumRecordSize) {
Preconditions.checkNotNull(openQuote, "quote cannot be null.");
Preconditions.checkNotNull(closeQuote, "quote cannot be null.");
Preconditions.checkNotNull(escape, "escape cannot be null.");
this.fileStreamBufferSize = bufferSize;
this.inputFileEncoding = inputFileEncoding;
this.openQuote = openQuote;
this.closeQuote = closeQuote;
this.escape = escape;
this.maximumRecordSize = maximumRecordSize;
}
/**
* Initializes the record reader
*
* @param genericSplit
* the split assigned to this record reader
* @param context
* the job context for this record reader
* @throws IOException
* if an IOException occurs while handling the file to be read
*/
@Override
public void initialize(final InputSplit genericSplit, final TaskAttemptContext context) throws IOException {
final FileSplit split = (FileSplit) genericSplit;
final Configuration job = context.getConfiguration();
start = split.getStart();
end = start + split.getLength();
this.pos = start;
final Path file = split.getPath();
CompressionCodecFactory codecFactory = new CompressionCodecFactory(context.getConfiguration());
CompressionCodec compressionCodec = codecFactory.getCodec(file);
LOGGER.info("Initializing processing of split for file: {}", file);
LOGGER.info("File size is: {}", file.getFileSystem(job).getFileStatus(file).getLen());
LOGGER.info("Split starts at: {}", start);
LOGGER.info("Split will end at: {}", end);
LOGGER.info("File is compressed: {}", (compressionCodec != null));
// Open the file, seek to the start of the split
// then wrap it in a CSVLineReader
if(compressionCodec == null) {
FSDataInputStream in = file.getFileSystem(job).open(file);
in.seek(start);
fileIn = in;
}else{
fileIn = compressionCodec.createInputStream(file.getFileSystem(job).open(file));
}
csvLineReader = new CSVLineReader(fileIn, this.fileStreamBufferSize, inputFileEncoding, this.openQuote,
this.closeQuote, this.escape, this.maximumRecordSize);
}
/**
* Increments the key and value pair for this reader
*
* @return true if there is another key/value to be read, false if not.
* @throws IOException
* if an IOException occurs while handling the file to be read
*/
@Override
public boolean nextKeyValue() throws IOException {
if (key == null) {
key = new LongWritable();
}
key.set(pos);
if (value == null) {
value = new Text();
}
if (pos >= end) {
key = null;
value = null;
LOGGER.info("End of split reached, ending processing. Total records read for this split: {}", totalRecordsRead);
close();
return false;
}
final int newSize = csvLineReader.readCSVLine(value);
if (newSize == 0) {
LOGGER.info("End of file reached. Ending processing. Total records read for this split: {}", totalRecordsRead);
return false;
}
pos += newSize;
totalRecordsRead++;
return true;
}
/**
* Returns the current key
*
* @return the key corresponding to the current value
*/
@Override
public LongWritable getCurrentKey() {
return key;
}
/**
* Returns the current value
*
* @return the value corresponding to the current key
*/
@Override
public Text getCurrentValue() {
return value;
}
/**
* Get the progress within the split
*/
@Override
public float getProgress() {
if (start == end) {
return 0.0f;
}
return Math.min(1.0f, (pos - start) / (float) (end - start));
}
/**
* Closes the file input stream for this record reader
*/
@Override
public synchronized void close() throws IOException {
fileIn.close();
}
}
| 2,856 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import com.google.common.annotations.VisibleForTesting;
/**
* A {@link FileInputFormat} for use specifically with CSV files. This input
* format deals with the fact that CSV files can potentially have multiple lines
* within fields which should all be treated as one record.
*/
public class CSVInputFormat extends FileInputFormat<LongWritable, Text> implements Configurable {
@VisibleForTesting
protected int bufferSize;
@VisibleForTesting
protected String inputFileEncoding;
@VisibleForTesting
protected char openQuoteChar;
@VisibleForTesting
protected char closeQuoteChar;
@VisibleForTesting
protected char escapeChar;
@VisibleForTesting
protected int maximumRecordSize;
private Configuration configuration;
/**
* This method is used by crunch to get an instance of {@link CSVRecordReader}
*
* @param split
* the {@link InputSplit} that will be assigned to the record reader
* @param context
* the {@TaskAttemptContext} for the job
* @return an instance of {@link CSVRecordReader} created using configured
* separator, quote, escape, and maximum record size.
*/
@Override
public RecordReader<LongWritable, Text> createRecordReader(final InputSplit split, final TaskAttemptContext context) {
return new CSVRecordReader(this.bufferSize, this.inputFileEncoding, this.openQuoteChar, this.closeQuoteChar,
this.escapeChar, this.maximumRecordSize);
}
/**
* A method used by crunch to calculate the splits for each file. This will
* split each CSV file at the end of a valid CSV record. The default split
* size is 64mb, but this can be reconfigured by setting the
* "csv.inputsplitsize" option in the job configuration.
*
* @param job
* the {@link JobContext} for the current job.
* @return a List containing all of the calculated splits for a single file.
* @throws IOException
* if an error occurs while accessing HDFS
*/
@Override
public List<InputSplit> getSplits(final JobContext job) throws IOException {
final long splitSize = job.getConfiguration().getLong(CSVFileSource.INPUT_SPLIT_SIZE, 67108864);
final List<InputSplit> splits = new ArrayList<InputSplit>();
final Path[] paths = FileUtil.stat2Paths(listStatus(job).toArray(new FileStatus[0]));
FSDataInputStream inputStream = null;
Configuration config = job.getConfiguration();
CompressionCodecFactory compressionCodecFactory = new CompressionCodecFactory(config);
try {
for (final Path path : paths) {
FileSystem fileSystem = path.getFileSystem(config);
CompressionCodec codec = compressionCodecFactory.getCodec(path);
if(codec == null) {
//if file is not compressed then split it up.
inputStream = fileSystem.open(path);
splits.addAll(getSplitsForFile(splitSize, fileSystem.getFileStatus(path).getLen(), path, inputStream));
}else{
//compressed file so no splitting it
splits.add(new FileSplit(path,0, Long.MAX_VALUE, new String[0]));
}
}
return splits;
} finally {
if (inputStream != null) {
inputStream.close();
}
}
}
@Override
public Configuration getConf() {
return configuration;
}
@Override
public void setConf(final Configuration conf) {
configuration = conf;
configure();
}
/**
* This method will read the configuration options that were set in
* {@link CSVFileSource}'s private getBundle() method
*/
public void configure() {
inputFileEncoding = this.configuration.get(CSVFileSource.CSV_INPUT_FILE_ENCODING, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING);
maximumRecordSize = this.configuration.getInt(CSVFileSource.MAXIMUM_RECORD_SIZE, this.configuration.getInt(CSVFileSource.INPUT_SPLIT_SIZE, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE));
closeQuoteChar = this.configuration.get(CSVFileSource.CSV_CLOSE_QUOTE_CHAR, String.valueOf(CSVLineReader.DEFAULT_QUOTE_CHARACTER)).charAt(0);
openQuoteChar = this.configuration.get(CSVFileSource.CSV_OPEN_QUOTE_CHAR, String.valueOf(CSVLineReader.DEFAULT_QUOTE_CHARACTER)).charAt(0);
escapeChar = this.configuration.get(CSVFileSource.CSV_ESCAPE_CHAR, String.valueOf(CSVLineReader.DEFAULT_ESCAPE_CHARACTER)).charAt(0);
bufferSize = this.configuration.getInt(CSVFileSource.CSV_BUFFER_SIZE, CSVLineReader.DEFAULT_BUFFER_SIZE);
}
/**
* In summary, this method will start at the beginning of the file, seek to
* the position corresponding to the desired split size, seek to the end of
* the line that contains that position, then attempt to seek until the
* CSVLineReader indicates that the current position is no longer within a CSV
* record. Then, it will mark that position for a split and a repeat its
* logic.
*/
@VisibleForTesting
protected List<FileSplit> getSplitsForFile(final long splitSize, final long fileSize, final Path fileName,
final FSDataInputStream inputStream) throws IOException {
final List<FileSplit> splitsList = new ArrayList<FileSplit>();
long splitStart;
long currentPosition = 0;
boolean endOfFile = false;
while (!endOfFile) {
// Set the start of this split to the furthest read point in the file
splitStart = currentPosition;
// Skip a number of bytes equal to the desired split size to avoid parsing
// every csv line, which greatly increases the run time
currentPosition = splitStart + splitSize;
// The input stream will freak out if we try to seek past the EOF
if (currentPosition >= fileSize) {
currentPosition = fileSize;
endOfFile = true;
final FileSplit fileSplit = new FileSplit(fileName, splitStart, currentPosition - splitStart, new String[]{});
splitsList.add(fileSplit);
break;
}
// Every time we seek to the new approximate split point,
// we need to create a new CSVLineReader around the stream.
inputStream.seek(currentPosition);
final CSVLineReader csvLineReader = new CSVLineReader(inputStream, this.bufferSize, this.inputFileEncoding,
this.openQuoteChar, this.closeQuoteChar, this.escapeChar, this.maximumRecordSize);
// This line is potentially garbage because we most likely just sought to
// the middle of a line. Read the rest of the line and leave it for the
// previous split. Then reset the multi-line CSV record boolean, because
// the partial line will have a very high chance of falsely triggering the
// class-wide multi-line logic.
currentPosition += csvLineReader.readFileLine(new Text());
csvLineReader.resetMultiLine();
// Now, we may still be in the middle of a multi-line CSV record.
currentPosition += csvLineReader.readFileLine(new Text());
// If we are, read until we are not.
while (csvLineReader.isInMultiLine()) {
final int bytesRead = csvLineReader.readFileLine(new Text());
// End of file
if (bytesRead <= 0) {
break;
}
currentPosition += bytesRead;
}
// We're out of the multi-line CSV record, so it's safe to end the
// previous split.
splitsList.add(new FileSplit(fileName, splitStart, currentPosition - splitStart, new String[]{}));
}
return splitsList;
}
@Override
protected boolean isSplitable(JobContext context, Path file) {
CompressionCodec codec = new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
return codec == null;
}
}
| 2,857 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.collect.Iterators;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The {@code FileReaderFactory} instance that is responsible for building a
* {@code CSVRecordIterator}
*/
public class CSVFileReaderFactory implements FileReaderFactory<String> {
private static final Logger LOG = LoggerFactory.getLogger(CSVFileReaderFactory.class);
private final int bufferSize;
private final String inputFileEncoding;
private final char openQuoteChar;
private final char closeQuoteChar;
private final char escapeChar;
private final int maximumRecordSize;
private CompressionCodecFactory compressionCodecFactory;
/**
* Creates a new {@code CSVFileReaderFactory} instance with default
* configuration
*/
CSVFileReaderFactory() {
this(CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Creates a new {@code CSVFileReaderFactory} instance with custom
* configuration
*
* @param bufferSize
* The size of the buffer to be used in the underlying
* {@code CSVLineReader}
* @param inputFileEncoding
* The the encoding of the input file to be read by the underlying
* {@code CSVLineReader}
* @param openQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param closeQuoteChar
* The character representing the quote character to be used in the
* underlying {@code CSVLineReader}
* @param escapeChar
* The character representing the escape character to be used in the
* underlying {@code CSVLineReader}
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
*/
CSVFileReaderFactory(final int bufferSize, final String inputFileEncoding, final char openQuoteChar,
final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
this.bufferSize = bufferSize;
this.inputFileEncoding = inputFileEncoding;
this.openQuoteChar = openQuoteChar;
this.closeQuoteChar = closeQuoteChar;
this.escapeChar = escapeChar;
this.maximumRecordSize = maximumRecordSize;
}
@Override
public Iterator<String> read(final FileSystem fs, final Path path) {
InputStream is;
try {
CompressionCodec codec = getCompressionCodec(path, fs.getConf());
is = codec == null ? fs.open(path): codec.createInputStream(fs.open(path));
return new CSVRecordIterator(is, bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar, escapeChar,
maximumRecordSize);
} catch (final IOException e) {
LOG.info("Could not read path: {}", path, e);
return Iterators.emptyIterator();
}
}
private CompressionCodec getCompressionCodec(Path path, Configuration configuration){
if(compressionCodecFactory == null){
compressionCodecFactory = new CompressionCodecFactory(configuration);
}
return compressionCodecFactory.getCodec(path);
}
}
| 2,858 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/text/csv/CSVReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.text.csv;
import java.util.List;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.hadoop.fs.Path;
public class CSVReadableData extends ReadableDataImpl<String> {
private final int bufferSize;
private final String inputFileEncoding;
private final char openQuoteChar;
private final char closeQuoteChar;
private final char escapeChar;
private final int maximumRecordSize;
/**
* Creates an instance of {@code CSVReadableData} with default configuration
*
* @param paths
* The paths of the files to be read
*/
protected CSVReadableData(final List<Path> paths) {
this(paths, CSVLineReader.DEFAULT_BUFFER_SIZE, CSVLineReader.DEFAULT_INPUT_FILE_ENCODING,
CSVLineReader.DEFAULT_QUOTE_CHARACTER, CSVLineReader.DEFAULT_QUOTE_CHARACTER,
CSVLineReader.DEFAULT_ESCAPE_CHARACTER, CSVLineReader.DEFAULT_MAXIMUM_RECORD_SIZE);
}
/**
* Creates an instance of {@code CSVReadableData} with specified configuration
*
* @param paths
* a list of input file paths
* @param bufferSize
* the size of the buffer to use while parsing through the input file
* @param inputFileEncoding
* the encoding for the input file
* @param openQuoteChar
* the character to use to open quote blocks
* @param closeQuoteChar
* the character to use to close quote blocks
* @param escapeChar
* the character to use for escaping control characters and quotes
* @param maximumRecordSize
* The maximum acceptable size of one CSV record. Beyond this limit,
* {@code CSVLineReader} will stop parsing and an exception will be
* thrown.
*/
protected CSVReadableData(final List<Path> paths, final int bufferSize, final String inputFileEncoding,
final char openQuoteChar, final char closeQuoteChar, final char escapeChar, final int maximumRecordSize) {
super(paths);
this.bufferSize = bufferSize;
this.inputFileEncoding = inputFileEncoding;
this.openQuoteChar = openQuoteChar;
this.closeQuoteChar = closeQuoteChar;
this.escapeChar = escapeChar;
this.maximumRecordSize = maximumRecordSize;
}
@Override
protected FileReaderFactory<String> getFileReaderFactory() {
return new CSVFileReaderFactory(bufferSize, inputFileEncoding, openQuoteChar, closeQuoteChar, escapeChar,
maximumRecordSize);
}
}
| 2,859 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetConverter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.avro.AvroType;
class AvroParquetConverter<T> implements Converter<Void, T, T, Iterable<T>> {
private AvroType<T> ptype;
public AvroParquetConverter(AvroType<T> ptype) {
this.ptype = ptype;
}
@Override
public T convertInput(Void key, T value) {
return value;
}
@Override
public Iterable<T> convertIterableInput(Void key, Iterable<T> value) {
return value;
}
@Override
public Void outputKey(T value) {
return null;
}
@Override
public T outputValue(T value) {
return value;
}
@Override
public Class<Void> getKeyClass() {
return Void.class;
}
@Override
public Class<T> getValueClass() {
return ptype.getTypeClass();
}
@Override
public boolean applyPTypeTransforms() {
return true;
}
}
| 2,860 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.ReadableDataImpl;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
import java.util.List;
public class AvroParquetReadableData<T> extends ReadableDataImpl<T> {
private final AvroType<T> avroType;
public AvroParquetReadableData(List<Path> paths, AvroType<T> avroType) {
super(paths);
this.avroType = avroType;
}
@Override
protected FileReaderFactory<T> getFileReaderFactory() {
return new AvroParquetFileReaderFactory<T>(avroType);
}
}
| 2,861 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetFileReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import com.google.common.collect.UnmodifiableIterator;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.avro.generic.IndexedRecord;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.io.FileReaderFactory;
import org.apache.crunch.io.impl.AutoClosingIterator;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroReadSupport;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.schema.MessageType;
class AvroParquetFileReaderFactory<T> implements FileReaderFactory<T> {
private AvroType<T> avroType;
public AvroParquetFileReaderFactory(AvroType<T> avroType) {
this.avroType = avroType;
}
@Override
@SuppressWarnings("unchecked")
public Iterator<T> read(FileSystem fs, Path path) {
Path p = fs.makeQualified(path);
final ParquetReader reader;
try {
reader = new ParquetReader(p, new CrunchAvroReadSupport(avroType));
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
return new AutoClosingIterator<T>(reader, new UnmodifiableIterator<T>() {
private T next;
@Override
public boolean hasNext() {
if (next != null) {
return true;
}
try {
next = (T) reader.read();
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
return next != null;
}
@Override
public T next() {
if (hasNext()) {
T ret = next;
next = null;
return ret;
}
throw new NoSuchElementException();
}
});
}
static class CrunchAvroReadSupport<T extends IndexedRecord> extends AvroReadSupport<T> {
private AvroType<T> avroType;
public CrunchAvroReadSupport(AvroType<T> avroType) {
this.avroType = avroType;
}
@Override
public ReadContext init(Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema) {
if (avroType != null) {
setRequestedProjection(configuration, avroType.getSchema());
}
return super.init(configuration, keyValueMetaData, fileSchema);
}
}
}
| 2,862 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetFileSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import org.apache.avro.generic.IndexedRecord;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.ReadableSourcePathTargetImpl;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
public class AvroParquetFileSourceTarget<T extends IndexedRecord> extends ReadableSourcePathTargetImpl<T> {
public AvroParquetFileSourceTarget(Path path, AvroType<T> atype) {
this(path, atype, SequentialFileNamingScheme.getInstance());
}
public AvroParquetFileSourceTarget(Path path, AvroType<T> atype, FileNamingScheme fileNamingScheme) {
super(new AvroParquetFileSource<T>(path, atype), new AvroParquetFileTarget(path),
fileNamingScheme);
}
@Override
public String toString() {
return target.toString();
}
}
| 2,863 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetFileSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificRecord;
import org.apache.crunch.ReadableData;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.io.impl.FileSourceImpl;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.Avros;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.parquet.avro.AvroParquetInputFormat;
import org.apache.parquet.avro.AvroReadSupport;
import org.apache.parquet.filter.UnboundRecordFilter;
import org.apache.parquet.hadoop.ParquetInputSplit;
public class AvroParquetFileSource<T extends IndexedRecord> extends FileSourceImpl<T> implements ReadableSource<T> {
private static final String AVRO_READ_SCHEMA = "parquet.avro.read.schema";
private final String projSchema;
private static <S> FormatBundle<AvroParquetInputFormat> getBundle(
AvroType<S> ptype,
Schema projSchema,
Class<? extends UnboundRecordFilter> filterClass) {
FormatBundle<AvroParquetInputFormat> fb = FormatBundle.forInput(AvroParquetInputFormat.class)
.set(AVRO_READ_SCHEMA, ptype.getSchema().toString());
if (projSchema != null) {
fb.set(AvroReadSupport.AVRO_REQUESTED_PROJECTION, projSchema.toString());
}
if (filterClass != null) {
fb.set("parquet.read.filter", filterClass.getName());
}
if (!FileSplit.class.isAssignableFrom(ParquetInputSplit.class)) {
// Older ParquetRecordReader expects ParquetInputSplits, not FileSplits, so it
// doesn't work with CombineFileInputFormat
fb.set(RuntimeParameters.DISABLE_COMBINE_FILE, "true");
}
return fb;
}
public AvroParquetFileSource(Path path, AvroType<T> ptype) {
this(ImmutableList.of(path), ptype);
}
/**
* Read the Parquet data at the given path using the schema of the {@code AvroType}, and projecting
* a subset of the columns from this schema via the separately given {@code Schema}.
*
* @param path the path of the file to read
* @param ptype the AvroType to use in reading the file
* @param projSchema the subset of columns from the input schema to read
*/
public AvroParquetFileSource(Path path, AvroType<T> ptype, Schema projSchema) {
this(ImmutableList.of(path), ptype, projSchema);
}
public AvroParquetFileSource(List<Path> paths, AvroType<T> ptype) {
this(paths, ptype, null, null);
}
/**
* Read the Parquet data at the given paths using the schema of the {@code AvroType}, and projecting
* a subset of the columns from this schema via the separately given {@code Schema}.
*
* @param paths the list of paths to read
* @param ptype the AvroType to use in reading the file
* @param projSchema the subset of columns from the input schema to read
*/
public AvroParquetFileSource(List<Path> paths, AvroType<T> ptype, Schema projSchema) {
this(paths, ptype, projSchema, null);
}
public AvroParquetFileSource(List<Path> paths, AvroType<T> ptype,
Class<? extends UnboundRecordFilter> filterClass) {
this(paths, ptype, null, filterClass);
}
/**
* Read the Parquet data at the given paths using the schema of the {@code AvroType}, projecting
* a subset of the columns from this schema via the separately given {@code Schema}, and using
* the filter class to select the input records.
*
* @param paths the list of paths to read
* @param ptype the AvroType to use in reading the file
* @param projSchema the subset of columns from the input schema to read
*/
public AvroParquetFileSource(List<Path> paths, AvroType<T> ptype, Schema projSchema,
Class<? extends UnboundRecordFilter> filterClass) {
super(paths, ptype, getBundle(ptype, projSchema, filterClass));
this.projSchema = projSchema == null ? null : projSchema.toString();
}
public Schema getProjectedSchema() {
return (new Schema.Parser()).parse(projSchema);
}
@Override
public Iterable<T> read(Configuration conf) throws IOException {
return read(conf, getFileReaderFactory((AvroType<T>) ptype));
}
@Override
public ReadableData<T> asReadable() {
return new AvroParquetReadableData<T>(paths, (AvroType<T>) ptype);
}
protected AvroParquetFileReaderFactory<T> getFileReaderFactory(AvroType<T> ptype){
return new AvroParquetFileReaderFactory<T>(ptype);
}
@Override
public Converter<?, ?, ?, ?> getConverter() {
return new AvroParquetConverter<T>((AvroType<T>) ptype);
}
@Override
public String toString() {
return "Parquet(" + pathsAsString() + ((projSchema == null) ? ")" : ") -> " + projSchema);
}
public static <T extends SpecificRecord> Builder<T> builder(Class<T> clazz) {
return new Builder<T>(Preconditions.checkNotNull(clazz));
}
public static Builder<GenericRecord> builder(Schema schema) {
Preconditions.checkNotNull(schema);
Preconditions.checkArgument(Schema.Type.RECORD.equals(schema.getType()));
return new Builder(schema);
}
/**
* Helper class for constructing an {@code AvroParquetFileSource} that only reads a subset of the
* fields defined in an Avro schema.
*/
public static class Builder<T extends IndexedRecord> {
private Class<T> clazz;
private Schema baseSchema;
private List<Schema.Field> fields = Lists.newArrayList();
private Class<? extends UnboundRecordFilter> filterClass;
private Builder(Class<T> clazz) {
this.clazz = clazz;
this.baseSchema = ReflectionUtils.newInstance(clazz, null).getSchema();
}
private Builder(Schema baseSchema) {
this.baseSchema = baseSchema;
}
public Builder includeField(String fieldName) {
Schema.Field field = baseSchema.getField(fieldName);
if (field == null) {
throw new IllegalArgumentException("No field " + fieldName + " in schema: " + baseSchema.getName());
}
fields.add(new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal(), field.order()));
return this;
}
public Builder filterClass(Class<? extends UnboundRecordFilter> filterClass) {
this.filterClass = filterClass;
return this;
}
public AvroParquetFileSource<T> build(Path path) {
return build(ImmutableList.of(path));
}
public AvroParquetFileSource<T> build(List<Path> paths) {
AvroType at = clazz == null ? Avros.generics(baseSchema) : Avros.specifics((Class) clazz);
if (fields.isEmpty()) {
return new AvroParquetFileSource<T>(paths, at, filterClass);
} else {
Schema projected = Schema.createRecord(
baseSchema.getName(),
baseSchema.getDoc(),
baseSchema.getNamespace(),
baseSchema.isError());
projected.setFields(fields);
return new AvroParquetFileSource<T>(paths, at, projected, filterClass);
}
}
}
}
| 2,864 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetFileTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import com.google.common.collect.Maps;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.parquet.avro.AvroWriteSupport;
import org.apache.parquet.hadoop.ParquetOutputFormat;
import java.util.Map;
public class AvroParquetFileTarget extends FileTargetImpl {
static final String PARQUET_AVRO_SCHEMA_PARAMETER = "parquet.avro.schema";
private Map<String, String> extraConf = Maps.newHashMap();
public AvroParquetFileTarget(String path) {
this(new Path(path));
}
public AvroParquetFileTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public AvroParquetFileTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, CrunchAvroParquetOutputFormat.class, fileNamingScheme);
}
@Override
public Target outputConf(String key, String value) {
extraConf.put(key, value);
return this;
}
@Override
public String toString() {
return "Parquet(" + path.toString() + ")";
}
@Override
public boolean accept(OutputHandler handler, PType<?> ptype) {
if (!(ptype instanceof AvroType)) {
return false;
}
handler.configure(this, ptype);
return true;
}
@Override
@SuppressWarnings("unchecked")
public Converter<?, ?, ?, ?> getConverter(PType<?> ptype) {
return new AvroParquetConverter<Object>((AvroType<Object>) ptype);
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
AvroType<?> atype = (AvroType<?>) ptype;
String schemaParam;
if (name == null) {
schemaParam = PARQUET_AVRO_SCHEMA_PARAMETER;
} else {
schemaParam = PARQUET_AVRO_SCHEMA_PARAMETER + "." + name;
}
FormatBundle fb = FormatBundle.forOutput(CrunchAvroParquetOutputFormat.class);
for (Map.Entry<String, String> e : extraConf.entrySet()) {
fb.set(e.getKey(), e.getValue());
}
fb.set(schemaParam, atype.getSchema().toString());
configureForMapReduce(job, Void.class, atype.getTypeClass(), fb, outputPath, name);
}
@Override
public <T> SourceTarget<T> asSourceTarget(PType<T> ptype) {
if (ptype instanceof AvroType && IndexedRecord.class.isAssignableFrom(((AvroType) ptype).getTypeClass())) {
return new AvroParquetFileSourceTarget(path, (AvroType<T>) ptype).fileSystem(getFileSystem());
}
return null;
}
public static class CrunchAvroWriteSupport extends AvroWriteSupport {
@Override
public WriteContext init(Configuration conf) {
String outputName = conf.get("crunch.namedoutput");
if (outputName != null && !outputName.isEmpty()) {
String schema = conf.get(PARQUET_AVRO_SCHEMA_PARAMETER + "." + outputName);
setSchema(conf, new Schema.Parser().parse(schema));
}
return super.init(conf);
}
}
public static class CrunchAvroParquetOutputFormat extends ParquetOutputFormat<IndexedRecord> {
public CrunchAvroParquetOutputFormat() {
super(new CrunchAvroWriteSupport());
}
}
}
| 2,865 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetPathPerKeyTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import org.apache.crunch.io.FileNamingScheme;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.io.SequentialFileNamingScheme;
import org.apache.crunch.io.avro.AvroPathPerKeyTarget;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.avro.AvroType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
/**
* A {@link org.apache.crunch.Target} that wraps {@link AvroParquetPathPerKeyOutputFormat} to allow one file
* per key to be written as the output of a {@code PTable<String, T>}.
*
* <p>Note the restrictions that apply to the {@code AvroParquetPathPerKeyOutputFormat}; in particular, it's a good
* idea to write out all of the records for the same key together within each partition of the data.
*/
public class AvroParquetPathPerKeyTarget extends AvroPathPerKeyTarget {
public AvroParquetPathPerKeyTarget(String path) {
this(new Path(path));
}
public AvroParquetPathPerKeyTarget(Path path) {
this(path, SequentialFileNamingScheme.getInstance());
}
public AvroParquetPathPerKeyTarget(Path path, FileNamingScheme fileNamingScheme) {
super(path, AvroParquetPathPerKeyOutputFormat.class, fileNamingScheme);
}
@Override
public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) {
AvroType<?> atype = (AvroType) ((PTableType) ptype).getValueType();
String schemaParam;
if (name == null) {
schemaParam = AvroParquetFileTarget.PARQUET_AVRO_SCHEMA_PARAMETER;
} else {
schemaParam = AvroParquetFileTarget.PARQUET_AVRO_SCHEMA_PARAMETER + "." + name;
}
FormatBundle fb = FormatBundle.forOutput(AvroParquetPathPerKeyOutputFormat.class);
fb.set(schemaParam, atype.getSchema().toString());
configureForMapReduce(job, Void.class, atype.getTypeClass(), fb, outputPath, name);
}
@Override
public String toString() {
return "AvroParquetPathPerKey(" + path + ")";
}
}
| 2,866 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/io/parquet/AvroParquetPathPerKeyOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.io.parquet;
import java.io.IOException;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.avro.mapred.Pair;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* A {@link FileOutputFormat} that takes in a {@link Utf8} and an Avro record and writes the Avro records to
* a sub-directory of the output path whose name is equal to the string-form of the {@code Utf8}.
*
* This {@code OutputFormat} only keeps one {@code RecordWriter} open at a time, so it's a very good idea to write
* out all of the records for the same key at the same time within each partition so as not to be frequently opening
* and closing files.
*/
public class AvroParquetPathPerKeyOutputFormat<T> extends FileOutputFormat<AvroWrapper<Pair<Utf8, T>>, NullWritable> {
@Override
public RecordWriter<AvroWrapper<Pair<Utf8, T>>, NullWritable> getRecordWriter(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
Configuration conf = taskAttemptContext.getConfiguration();
Path basePath = new Path(getOutputPath(taskAttemptContext), conf.get("mapreduce.output.basename", "out0"));
return new AvroParquetFilePerKeyRecordWriter<T>(basePath,
getUniqueFile(taskAttemptContext, "part", ".parquet"), conf);
}
private class AvroParquetFilePerKeyRecordWriter<T> extends RecordWriter<AvroWrapper<Pair<Utf8, T>>, NullWritable> {
private final Path basePath;
private final String uniqueFileName;
private final Configuration conf;
private String currentKey;
private RecordWriter<Void, T> currentWriter;
public AvroParquetFilePerKeyRecordWriter(Path basePath, String uniqueFileName, Configuration conf) {
this.basePath = basePath;
this.uniqueFileName = uniqueFileName;
this.conf = conf;
}
@Override
@SuppressWarnings("unchecked")
public void write(AvroWrapper<Pair<Utf8, T>> record, NullWritable n) throws IOException, InterruptedException {
String key = record.datum().key().toString();
if (!key.equals(currentKey)) {
if (currentWriter != null) {
currentWriter.close(null); // TaskAttemptContext not used for close
}
currentKey = key;
Path dir = new Path(basePath, key);
FileSystem fs = dir.getFileSystem(conf);
if (!fs.exists(dir)) {
fs.mkdirs(dir);
}
currentWriter = (RecordWriter<Void, T>)
new AvroParquetFileTarget.CrunchAvroParquetOutputFormat().getRecordWriter(conf,
new Path(dir, uniqueFileName),
AvroParquetFileTarget.CrunchAvroParquetOutputFormat.getCompression(conf));
}
currentWriter.write(null, record.datum().value());
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
if (currentWriter != null) {
currentWriter.close(taskAttemptContext);
currentKey = null;
currentWriter = null;
}
}
}
}
| 2,867 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib/jobcontrol/CrunchControlledJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.hadoop.mapreduce.lib.jobcontrol;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mr.MRJob;
import org.apache.crunch.impl.mr.plan.JobNameBuilder;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Function;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulates a MapReduce job and its dependency. It monitors the
* states of the depending jobs and updates the state of this job. A job starts
* in the WAITING state. If it does not have any depending jobs, or all of the
* depending jobs are in SUCCESS state, then the job state will become READY. If
* any depending jobs fail, the job will fail too. When in READY state, the job
* can be submitted to Hadoop for execution, with the state changing into
* RUNNING state. From RUNNING state, the job can get into SUCCEEDED or FAILED
* state, depending the status of the job execution.
*/
public class CrunchControlledJob implements MRJob {
public static interface Hook {
public void run(MRJob job) throws IOException;
}
private static final Logger LOG = LoggerFactory.getLogger(CrunchControlledJob.class);
private final int jobID;
private final Job job; // mapreduce job to be executed.
private final JobNameBuilder jobNameBuilder;
private final Set<Target> allTargets;
// the jobs the current job depends on
private final List<CrunchControlledJob> dependingJobs;
private final Hook prepareHook;
private final Hook completionHook;
private State state;
// some info for human consumption, e.g. the reason why the job failed
private String message;
private String lastKnownProgress;
private Counters counters;
private long preHookStartTimeMsec;
private long jobStartTimeMsec;
private long jobEndTimeMsec;
private long postHookEndTimeMsec;
/**
* Construct a job.
*
* @param jobID
* an ID used to match with its {@link org.apache.crunch.impl.mr.plan.JobPrototype}.
* @param job
* a mapreduce job to be executed.
* @param jobNameBuilder
* code for generating a name for the executed MapReduce job.
* @param allTargets
* the set of Targets that will exist after this job completes successfully.
* @param prepareHook
* a piece of code that will run before this job is submitted.
* @param completionHook
* a piece of code that will run after this job gets completed.
*/
public CrunchControlledJob(int jobID, Job job, JobNameBuilder jobNameBuilder, Set<Target> allTargets,
Hook prepareHook, Hook completionHook) {
this.jobID = jobID;
this.job = job;
this.jobNameBuilder = jobNameBuilder;
this.allTargets = allTargets;
this.dependingJobs = Lists.newArrayList();
this.prepareHook = prepareHook;
this.completionHook = completionHook;
this.state = State.WAITING;
this.message = "just initialized";
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("job name:\t").append(this.job.getJobName()).append("\n");
sb.append("job id:\t").append(this.jobID).append("\n");
sb.append("job state:\t").append(this.state).append("\n");
sb.append("job mapred id:\t").append(this.job.getJobID()).append("\n");
sb.append("job message:\t").append(this.message).append("\n");
if (this.dependingJobs == null || this.dependingJobs.size() == 0) {
sb.append("job has no depending job:\t").append("\n");
} else {
sb.append("job has ").append(this.dependingJobs.size())
.append(" depending jobs:\n");
for (int i = 0; i < this.dependingJobs.size(); i++) {
sb.append("\t depending job ").append(i).append(":\t");
sb.append((this.dependingJobs.get(i)).getJobName()).append("\n");
}
}
return sb.toString();
}
/**
* @return the job name of this job
*/
public String getJobName() {
return job.getJobName();
}
public void setJobSequence(int jobSequence) {
this.job.setJobName(jobNameBuilder.jobSequence(jobSequence).build());
}
/**
* @return the job ID of this job
*/
public int getJobID() {
return this.jobID;
}
/**
* @return the mapred ID of this job as assigned by the mapred framework.
*/
public JobID getMapredJobID() {
return this.job.getJobID();
}
public long getStartTimeMsec() {
return preHookStartTimeMsec;
}
public long getJobStartTimeMsec() {
return jobStartTimeMsec;
}
public long getJobEndTimeMsec() {
return jobEndTimeMsec;
}
public long getEndTimeMsec() {
return postHookEndTimeMsec;
}
public Counters getCounters() {
return counters;
}
public Set<Target> getAllTargets() { return allTargets; }
@Override
public synchronized Job getJob() {
return this.job;
}
@Override
public List<MRJob> getDependentJobs() {
return Lists.transform(dependingJobs, new Function<CrunchControlledJob, MRJob>() {
@Override
public MRJob apply(CrunchControlledJob job) {
return job;
}
});
}
@Override
public synchronized State getJobState() {
return this.state;
}
/**
* Set the state for this job.
*
* @param state
* the new state for this job.
*/
protected synchronized void setJobState(State state) {
this.state = state;
}
/**
* @return the message of this job
*/
public synchronized String getMessage() {
return this.message;
}
/**
* Set the message for this job.
*
* @param message
* the message for this job.
*/
public synchronized void setMessage(String message) {
this.message = message;
}
/**
* Add a job to this jobs' dependency list. Dependent jobs can only be added
* while a Job is waiting to run, not during or afterwards.
*
* @param dependingJob
* Job that this Job depends on.
* @return <tt>true</tt> if the Job was added.
*/
public synchronized boolean addDependingJob(CrunchControlledJob dependingJob) {
if (this.state == State.WAITING) { // only allowed to add jobs when waiting
return this.dependingJobs.add(dependingJob);
} else {
return false;
}
}
/**
* @return true if this job is in a complete state
*/
public synchronized boolean isCompleted() {
return this.state == State.FAILED || this.state == State.DEPENDENT_FAILED
|| this.state == State.SUCCESS;
}
/**
* @return true if this job is in READY state
*/
public synchronized boolean isReady() {
return this.state == State.READY;
}
public void killJob() throws IOException, InterruptedException {
job.killJob();
}
/**
* Check the state of this running job. The state may remain the same, become
* SUCCEEDED or FAILED.
*/
private void checkRunningState() throws IOException, InterruptedException {
try {
if (job.isComplete()) {
this.jobEndTimeMsec = System.currentTimeMillis();
this.counters = job.getCounters();
if (job.isSuccessful()) {
this.state = State.SUCCESS;
} else {
this.state = State.FAILED;
this.message = "Job failed!";
}
} else {
// still running
if (job.getConfiguration().getBoolean(RuntimeParameters.LOG_JOB_PROGRESS, false)) {
logJobProgress();
}
}
} catch (IOException ioe) {
this.state = State.FAILED;
this.message = StringUtils.stringifyException(ioe);
try {
if (job != null) {
job.killJob();
}
} catch (IOException e) {
}
}
if (isCompleted()) {
completionHook.run(this);
this.postHookEndTimeMsec = System.currentTimeMillis();
}
}
/**
* Check and update the state of this job. The state changes depending on its
* current state and the states of the depending jobs.
*/
synchronized State checkState() throws IOException, InterruptedException {
if (this.state == State.RUNNING) {
checkRunningState();
}
if (this.state != State.WAITING) {
return this.state;
}
if (this.dependingJobs == null || this.dependingJobs.size() == 0) {
this.state = State.READY;
return this.state;
}
CrunchControlledJob pred = null;
int n = this.dependingJobs.size();
for (int i = 0; i < n; i++) {
pred = this.dependingJobs.get(i);
State s = pred.checkState();
if (s == State.WAITING || s == State.READY || s == State.RUNNING) {
break; // a pred is still not completed, continue in WAITING
// state
}
if (s == State.FAILED || s == State.DEPENDENT_FAILED) {
this.state = State.DEPENDENT_FAILED;
this.message = "Depending job with jobID " + pred.getJobID() + " failed.";
break;
}
// pred must be in success state
if (i == n - 1) {
this.state = State.READY;
}
}
return this.state;
}
/**
* Submit this job to mapred. The state becomes RUNNING if submission is
* successful, FAILED otherwise.
*/
protected synchronized void submit() {
try {
this.preHookStartTimeMsec = System.currentTimeMillis();
prepareHook.run(this);
this.jobStartTimeMsec = System.currentTimeMillis();
job.submit();
this.state = State.RUNNING;
LOG.info("Running job \"{}\"", getJobName());
LOG.info("Job status available at: {}", job.getTrackingURL());
} catch (Exception ioe) {
this.state = State.FAILED;
this.message = StringUtils.stringifyException(ioe);
LOG.info("Error occurred starting job \"{}\":", getJobName());
LOG.info(getMessage());
}
}
private void logJobProgress() throws IOException, InterruptedException {
String progress = String.format("map %.0f%% reduce %.0f%%",
100.0 * job.mapProgress(), 100.0 * job.reduceProgress());
if (!Objects.equal(lastKnownProgress, progress)) {
LOG.info("{} progress: {}", job.getJobName(), progress);
lastKnownProgress = progress;
}
}
}
| 2,868 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib/jobcontrol/TaskInputOutputContextFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.hadoop.mapreduce.lib.jobcontrol;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.StatusReporter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
public class TaskInputOutputContextFactory {
private static final Logger LOG = LoggerFactory.getLogger(TaskInputOutputContextFactory.class);
private static final TaskInputOutputContextFactory INSTANCE = new TaskInputOutputContextFactory();
public static TaskInputOutputContext create(
Configuration conf,
TaskAttemptID taskAttemptId,
StatusReporter reporter) {
return INSTANCE.createInternal(conf, taskAttemptId, reporter);
}
private Constructor<? extends TaskInputOutputContext> taskIOConstructor;
private int arity;
private TaskInputOutputContextFactory() {
String ic = TaskInputOutputContext.class.isInterface() ?
"org.apache.hadoop.mapreduce.task.MapContextImpl" :
"org.apache.hadoop.mapreduce.MapContext";
try {
Class<? extends TaskInputOutputContext> implClass = (Class<? extends TaskInputOutputContext>) Class.forName(ic);
this.taskIOConstructor = (Constructor<? extends TaskInputOutputContext>) implClass.getConstructor(
Configuration.class, TaskAttemptID.class, RecordReader.class, RecordWriter.class,
OutputCommitter.class, StatusReporter.class, InputSplit.class);
this.arity = 7;
} catch (Exception e) {
LOG.error("Could not access TaskInputOutputContext constructor, exiting", e);
}
}
private TaskInputOutputContext createInternal(Configuration conf, TaskAttemptID taskAttemptId,
StatusReporter reporter) {
Object[] args = new Object[arity];
args[0] = conf;
args[1] = taskAttemptId;
args[5] = reporter;
try {
return taskIOConstructor.newInstance(args);
} catch (Exception e) {
LOG.error("Could not construct a TaskInputOutputContext instance", e);
throw new RuntimeException(e);
}
}
}
| 2,869 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/hadoop/mapreduce/lib/jobcontrol/CrunchJobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.hadoop.mapreduce.lib.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.Target;
import org.apache.crunch.impl.mr.MRJob.State;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class encapsulates a set of MapReduce jobs and its dependency.
*
* It tracks the states of the jobs by placing them into different tables
* according to their states.
*
* This class provides APIs for the client app to add a job to the group and to
* get the jobs in the group in different states. When a job is added, an ID
* unique to the group is assigned to the job.
*/
public class CrunchJobControl {
private Map<Integer, CrunchControlledJob> waitingJobs;
private Map<Integer, CrunchControlledJob> readyJobs;
private Map<Integer, CrunchControlledJob> runningJobs;
private Map<Integer, CrunchControlledJob> successfulJobs;
private Map<Integer, CrunchControlledJob> failedJobs;
private Map<PipelineCallable<?>, Set<Target>> allPipelineCallables;
private Set<PipelineCallable<?>> activePipelineCallables;
private List<PipelineCallable<?>> failedCallables;
private Logger log = LoggerFactory.getLogger(CrunchJobControl.class);
private final String groupName;
private final int maxRunningJobs;
private int jobSequence = 1;
/**
* Construct a job control for a group of jobs.
*
* @param groupName
* a name identifying this group
*/
public CrunchJobControl(Configuration conf, String groupName,
Map<PipelineCallable<?>, Set<Target>> pipelineCallables) {
this.waitingJobs = new Hashtable<Integer, CrunchControlledJob>();
this.readyJobs = new Hashtable<Integer, CrunchControlledJob>();
this.runningJobs = new Hashtable<Integer, CrunchControlledJob>();
this.successfulJobs = new Hashtable<Integer, CrunchControlledJob>();
this.failedJobs = new Hashtable<Integer, CrunchControlledJob>();
this.groupName = groupName;
this.maxRunningJobs = conf.getInt(RuntimeParameters.MAX_RUNNING_JOBS, 5);
this.allPipelineCallables = pipelineCallables;
this.activePipelineCallables = allPipelineCallables.keySet();
this.failedCallables = Lists.newArrayList();
}
private static List<CrunchControlledJob> toList(Map<Integer, CrunchControlledJob> jobs) {
ArrayList<CrunchControlledJob> retv = new ArrayList<CrunchControlledJob>();
synchronized (jobs) {
for (CrunchControlledJob job : jobs.values()) {
retv.add(job);
}
}
return retv;
}
/**
* @return the jobs in the waiting state
*/
public List<CrunchControlledJob> getWaitingJobList() {
return toList(this.waitingJobs);
}
/**
* @return the jobs in the running state
*/
public List<CrunchControlledJob> getRunningJobList() {
return toList(this.runningJobs);
}
/**
* @return the jobs in the ready state
*/
public List<CrunchControlledJob> getReadyJobsList() {
return toList(this.readyJobs);
}
/**
* @return the jobs in the success state
*/
public List<CrunchControlledJob> getSuccessfulJobList() {
return toList(this.successfulJobs);
}
public List<CrunchControlledJob> getFailedJobList() {
return toList(this.failedJobs);
}
/**
* @return the jobs in all states
*/
public synchronized List<CrunchControlledJob> getAllJobs() {
return ImmutableList.<CrunchControlledJob>builder()
.addAll(waitingJobs.values())
.addAll(readyJobs.values())
.addAll(runningJobs.values())
.addAll(successfulJobs.values())
.addAll(failedJobs.values())
.build();
}
private static void addToQueue(CrunchControlledJob aJob,
Map<Integer, CrunchControlledJob> queue) {
synchronized (queue) {
queue.put(aJob.getJobID(), aJob);
}
}
private void addToQueue(CrunchControlledJob aJob) {
Map<Integer, CrunchControlledJob> queue = getQueue(aJob.getJobState());
addToQueue(aJob, queue);
}
private Map<Integer, CrunchControlledJob> getQueue(State state) {
Map<Integer, CrunchControlledJob> retv;
switch (state) {
case WAITING:
retv = this.waitingJobs;
break;
case READY:
retv = this.readyJobs;
break;
case RUNNING:
retv = this.runningJobs;
break;
case SUCCESS:
retv = this.successfulJobs;
break;
case FAILED:
case DEPENDENT_FAILED:
retv = this.failedJobs;
break;
default:
throw new IllegalArgumentException("Unknown state " + state);
}
return retv;
}
/**
* Add a new job.
*
* @param aJob
* the new job
*/
synchronized public void addJob(CrunchControlledJob aJob) {
aJob.setJobState(State.WAITING);
this.addToQueue(aJob);
}
synchronized private void checkRunningJobs() throws IOException,
InterruptedException {
Map<Integer, CrunchControlledJob> oldJobs = null;
oldJobs = this.runningJobs;
this.runningJobs = new Hashtable<Integer, CrunchControlledJob>();
for (CrunchControlledJob nextJob : oldJobs.values()) {
nextJob.checkState();
this.addToQueue(nextJob);
}
}
synchronized private void checkWaitingJobs() throws IOException,
InterruptedException {
Map<Integer, CrunchControlledJob> oldJobs = null;
oldJobs = this.waitingJobs;
this.waitingJobs = new Hashtable<Integer, CrunchControlledJob>();
for (CrunchControlledJob nextJob : oldJobs.values()) {
nextJob.checkState();
this.addToQueue(nextJob);
}
}
private Set<Target> getUnfinishedTargets() {
Set<Target> unfinished = Sets.newHashSet();
for (CrunchControlledJob job : runningJobs.values()) {
unfinished.addAll(job.getAllTargets());
}
for (CrunchControlledJob job : readyJobs.values()) {
unfinished.addAll(job.getAllTargets());
}
for (CrunchControlledJob job : waitingJobs.values()) {
unfinished.addAll(job.getAllTargets());
}
return unfinished;
}
synchronized private void executeReadySeqDoFns() {
Set<Target> unfinished = getUnfinishedTargets();
Set<PipelineCallable<?>> oldPipelineCallables = activePipelineCallables;
this.activePipelineCallables = Sets.newHashSet();
List<Callable<PipelineCallable.Status>> callablesToRun = Lists.newArrayList();
for (final PipelineCallable<?> pipelineCallable : oldPipelineCallables) {
if (Sets.intersection(allPipelineCallables.get(pipelineCallable), unfinished).isEmpty()) {
if (pipelineCallable.runSingleThreaded()) {
try {
if (pipelineCallable.call() != PipelineCallable.Status.SUCCESS) {
failedCallables.add(pipelineCallable);
}
} catch (Throwable t) {
pipelineCallable.setMessage(t.getLocalizedMessage());
failedCallables.add(pipelineCallable);
}
} else {
callablesToRun.add(pipelineCallable);
}
} else {
// Still need to run this one
activePipelineCallables.add(pipelineCallable);
}
}
ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
try {
List<Future<PipelineCallable.Status>> res = es.invokeAll(callablesToRun);
for (int i = 0; i < res.size(); i++) {
if (res.get(i).get() != PipelineCallable.Status.SUCCESS) {
failedCallables.add((PipelineCallable) callablesToRun.get(i));
}
}
} catch (Throwable t) {
t.printStackTrace();
failedCallables.addAll((List) callablesToRun);
} finally {
es.shutdownNow();
}
}
synchronized private void startReadyJobs() {
Map<Integer, CrunchControlledJob> oldJobs = null;
oldJobs = this.readyJobs;
this.readyJobs = new Hashtable<Integer, CrunchControlledJob>();
for (CrunchControlledJob nextJob : oldJobs.values()) {
// Limit the number of concurrent running jobs. If we have reached such limit,
// stop submitting new jobs and wait until some running job completes.
if (runningJobs.size() < maxRunningJobs) {
// Submitting Job to Hadoop
nextJob.setJobSequence(jobSequence);
jobSequence++;
nextJob.submit();
}
this.addToQueue(nextJob);
}
}
synchronized public void killAllRunningJobs() {
for (CrunchControlledJob job : runningJobs.values()) {
if (!job.isCompleted()) {
try {
job.killJob();
} catch (Exception e) {
log.error("Exception killing job: " + job.getJobName(), e);
}
}
}
}
synchronized public boolean allFinished() {
return (this.waitingJobs.size() == 0 && this.readyJobs.size() == 0
&& this.runningJobs.size() == 0);
}
synchronized public boolean anyFailures() {
return this.failedJobs.size() > 0 || failedCallables.size() > 0;
}
public List<PipelineCallable<?>> getFailedCallables() {
return failedCallables;
}
/**
* Checks the states of the running jobs Update the states of waiting jobs, and submits the jobs in
* ready state (i.e. whose dependencies are all finished in success).
*/
public void pollJobStatusAndStartNewOnes() throws IOException, InterruptedException {
checkRunningJobs();
checkWaitingJobs();
executeReadySeqDoFns();
startReadyJobs();
}
}
| 2,870 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Join.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.join.DefaultJoinStrategy;
import org.apache.crunch.lib.join.JoinType;
/**
* Utilities for joining multiple {@code PTable} instances based on a common
* lastKey.
*/
public class Join {
/**
* Performs an inner join on the specified {@link PTable}s.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Inner_join">Inner
* Join</a>
* @param left
* A PTable to perform an inner join on.
* @param right
* A PTable to perform an inner join on.
* @param <K>
* Type of the keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result.
*/
public static <K, U, V> PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right) {
return innerJoin(left, right);
}
/**
* Performs an inner join on the specified {@link PTable}s.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Inner_join">Inner
* Join</a>
* @param left
* A PTable to perform an inner join on.
* @param right
* A PTable to perform an inner join on.
* @param <K>
* Type of the keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result.
*/
public static <K, U, V> PTable<K, Pair<U, V>> innerJoin(PTable<K, U> left, PTable<K, V> right) {
return new DefaultJoinStrategy<K, U, V>().join(left, right, JoinType.INNER_JOIN);
}
/**
* Performs a left outer join on the specified {@link PTable}s.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Left_outer_join">Left
* Join</a>
* @param left
* A PTable to perform an left join on. All of this PTable's entries
* will appear in the resulting PTable.
* @param right
* A PTable to perform an left join on.
* @param <K>
* Type of the keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result.
*/
public static <K, U, V> PTable<K, Pair<U, V>> leftJoin(PTable<K, U> left, PTable<K, V> right) {
return new DefaultJoinStrategy<K, U, V>().join(left, right, JoinType.LEFT_OUTER_JOIN);
}
/**
* Performs a right outer join on the specified {@link PTable}s.
*
* @see <a
* href="http://en.wikipedia.org/wiki/Join_(SQL)#Right_outer_join">Right
* Join</a>
* @param left
* A PTable to perform an right join on.
* @param right
* A PTable to perform an right join on. All of this PTable's entries
* will appear in the resulting PTable.
* @param <K>
* Type of the keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result.
*/
public static <K, U, V> PTable<K, Pair<U, V>> rightJoin(PTable<K, U> left, PTable<K, V> right) {
return new DefaultJoinStrategy<K, U, V>().join(left, right, JoinType.RIGHT_OUTER_JOIN);
}
/**
* Performs a full outer join on the specified {@link PTable}s.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Full_outer_join">Full
* Join</a>
* @param left
* A PTable to perform an full join on.
* @param right
* A PTable to perform an full join on.
* @param <K>
* Type of the keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result.
*/
public static <K, U, V> PTable<K, Pair<U, V>> fullJoin(PTable<K, U> left, PTable<K, V> right) {
return new DefaultJoinStrategy<K, U, V>().join(left, right, JoinType.FULL_OUTER_JOIN);
}
}
| 2,871 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Sort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.avro.Schema;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.lib.sort.SortFns;
import org.apache.crunch.lib.sort.TotalOrderPartitioner;
import org.apache.crunch.lib.sort.ReverseAvroComparator;
import org.apache.crunch.lib.sort.ReverseWritableComparator;
import org.apache.crunch.lib.sort.TupleWritableComparator;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.crunch.util.PartitionUtils;
import org.apache.hadoop.conf.Configuration;
/**
* Utilities for sorting {@code PCollection} instances.
*/
public class Sort {
/**
* For signaling the order in which a sort should be done.
*/
public enum Order {
ASCENDING,
DESCENDING,
IGNORE
}
/**
* To sort by column 2 ascending then column 1 descending, you would use:
* {@code
* sortPairs(coll, by(2, ASCENDING), by(1, DESCENDING))
* } Column numbering is 1-based.
*/
public static class ColumnOrder {
private int column;
private Order order;
public ColumnOrder(int column, Order order) {
this.column = column;
this.order = order;
}
public static ColumnOrder by(int column, Order order) {
return new ColumnOrder(column, order);
}
public int column() {
return column;
}
public Order order() {
return order;
}
@Override
public String toString() {
return "ColumnOrder: column:" + column + ", Order: " + order;
}
}
/**
* Sorts the {@code PCollection} using the natural ordering of its elements in ascending order.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <T> PCollection<T> sort(PCollection<T> collection) {
return sort(collection, Order.ASCENDING);
}
/**
* Sorts the {@code PCollection} using the natural order of its elements with the given {@code Order}.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <T> PCollection<T> sort(PCollection<T> collection, Order order) {
return sort(collection, -1, order);
}
/**
* Sorts the {@code PCollection} using the natural ordering of its elements in
* the order specified using the given number of reducers.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <T> PCollection<T> sort(PCollection<T> collection, int numReducers, Order order) {
PTypeFamily tf = collection.getTypeFamily();
PTableType<T, Void> type = tf.tableOf(collection.getPType(), tf.nulls());
Configuration conf = collection.getPipeline().getConfiguration();
PTable<T, Void> pt = collection.parallelDo("sort-pre", new DoFn<T, Pair<T, Void>>() {
@Override
public void process(T input, Emitter<Pair<T, Void>> emitter) {
emitter.emit(Pair.of(input, (Void) null));
}
}, type);
GroupingOptions options = buildGroupingOptions(pt, conf, numReducers, order);
return pt.groupByKey(options).ungroup().keys();
}
/**
* Sorts the {@code PTable} using the natural ordering of its keys in ascending order.
*
* @return a {@code PTable} representing the sorted table.
*/
public static <K, V> PTable<K, V> sort(PTable<K, V> table) {
return sort(table, Order.ASCENDING);
}
/**
* Sorts the {@code PTable} using the natural ordering of its keys with the given {@code Order}.
*
* @return a {@code PTable} representing the sorted table.
*/
public static <K, V> PTable<K, V> sort(PTable<K, V> table, Order key) {
return sort(table, -1, key);
}
/**
* Sorts the {@code PTable} using the natural ordering of its keys in the
* order specified with a client-specified number of reducers.
*
* @return a {@code PTable} representing the sorted collection.
*/
public static <K, V> PTable<K, V> sort(PTable<K, V> table, int numReducers, Order key) {
Configuration conf = table.getPipeline().getConfiguration();
GroupingOptions options = buildGroupingOptions(table, conf, numReducers, key);
return table.groupByKey(options).ungroup();
}
/**
* Sorts the {@code PCollection} of {@code Pair}s using the specified column
* ordering.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <U, V> PCollection<Pair<U, V>> sortPairs(PCollection<Pair<U, V>> collection,
ColumnOrder... columnOrders) {
return sortTuples(collection, columnOrders);
}
/**
* Sorts the {@code PCollection} of {@code Tuple3}s using the specified column
* ordering.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <V1, V2, V3> PCollection<Tuple3<V1, V2, V3>> sortTriples(PCollection<Tuple3<V1, V2, V3>> collection,
ColumnOrder... columnOrders) {
return sortTuples(collection, columnOrders);
}
/**
* Sorts the {@code PCollection} of {@code Tuple4}s using the specified column
* ordering.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <V1, V2, V3, V4> PCollection<Tuple4<V1, V2, V3, V4>> sortQuads(
PCollection<Tuple4<V1, V2, V3, V4>> collection, ColumnOrder... columnOrders) {
return sortTuples(collection, columnOrders);
}
/**
* Sorts the {@code PCollection} of tuples using the specified column ordering.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <T extends Tuple> PCollection<T> sortTuples(PCollection<T> collection,
ColumnOrder... columnOrders) {
return sortTuples(collection, -1, columnOrders);
}
/**
* Sorts the {@code PCollection} of {@link TupleN}s using the specified column
* ordering and a client-specified number of reducers.
*
* @return a {@code PCollection} representing the sorted collection.
*/
public static <T extends Tuple> PCollection<T> sortTuples(PCollection<T> collection, int numReducers,
ColumnOrder... columnOrders) {
PType<T> pType = collection.getPType();
SortFns.KeyExtraction<T> ke = new SortFns.KeyExtraction<T>(pType, columnOrders);
PTable<Object, T> pt = collection.by(ke.getByFn(), ke.getKeyType());
Configuration conf = collection.getPipeline().getConfiguration();
GroupingOptions options = buildGroupingOptions(pt, conf, numReducers, columnOrders);
return pt.groupByKey(options).ungroup().values();
}
// TODO: move to type family?
private static <K, V> GroupingOptions buildGroupingOptions(PTable<K, V> ptable, Configuration conf,
int numReducers, Order order) {
PType<K> ptype = ptable.getKeyType();
PTypeFamily tf = ptable.getTypeFamily();
GroupingOptions.Builder builder = GroupingOptions.builder();
if (order == Order.DESCENDING) {
if (tf == WritableTypeFamily.getInstance()) {
builder.sortComparatorClass(ReverseWritableComparator.class);
} else if (tf == AvroTypeFamily.getInstance()) {
AvroType<K> avroType = (AvroType<K>) ptype;
Schema schema = avroType.getSchema();
builder.conf("crunch.schema", schema.toString());
builder.sortComparatorClass(ReverseAvroComparator.class);
} else {
throw new RuntimeException("Unrecognized type family: " + tf);
}
} else if (tf == AvroTypeFamily.getInstance()) {
builder.conf("crunch.schema", ((AvroType<K>) ptype).getSchema().toString());
}
builder.requireSortedKeys();
configureReducers(builder, ptable, conf, numReducers);
return builder.build();
}
private static <K, V> GroupingOptions buildGroupingOptions(PTable<K, V> ptable, Configuration conf,
int numReducers, ColumnOrder[] columnOrders) {
PTypeFamily tf = ptable.getTypeFamily();
PType<K> keyType = ptable.getKeyType();
GroupingOptions.Builder builder = GroupingOptions.builder();
if (tf == WritableTypeFamily.getInstance()) {
if (columnOrders.length == 1 && columnOrders[0].order == Order.DESCENDING) {
builder.sortComparatorClass(ReverseWritableComparator.class);
} else {
WritableType[] wt = new WritableType[columnOrders.length];
for (int i = 0; i < wt.length; i++) {
wt[i] = (WritableType) keyType.getSubTypes().get(i);
}
TupleWritableComparator.configureOrdering(conf, wt, columnOrders);
builder.sortComparatorClass(TupleWritableComparator.class);
}
} else if (tf == AvroTypeFamily.getInstance()) {
AvroType<K> avroType = (AvroType<K>) keyType;
Schema schema = avroType.getSchema();
builder.conf("crunch.schema", schema.toString());
if (columnOrders.length == 1 && columnOrders[0].order == Order.DESCENDING) {
builder.sortComparatorClass(ReverseAvroComparator.class);
}
} else {
throw new RuntimeException("Unrecognized type family: " + tf);
}
builder.requireSortedKeys();
configureReducers(builder, ptable, conf, numReducers);
return builder.build();
}
private static <K, V> void configureReducers(GroupingOptions.Builder builder,
PTable<K, V> ptable, Configuration conf, int numReducers) {
if (numReducers <= 0) {
numReducers = PartitionUtils.getRecommendedPartitions(ptable, conf);
if (numReducers < 5) {
// Not worth the overhead, force it to 1
numReducers = 1;
}
}
builder.numReducers(numReducers);
if (numReducers > 1) {
Iterable<K> iter = Sample.reservoirSample(ptable.keys(), numReducers - 1).materialize();
MaterializableIterable<K> mi = (MaterializableIterable<K>) iter;
if (mi.isSourceTarget()) {
builder.sourceTargets((SourceTarget) mi.getSource());
}
builder.partitionerClass(TotalOrderPartitioner.class);
builder.conf(TotalOrderPartitioner.PARTITIONER_PATH, mi.getPath().toString());
//TODO: distcache handling
}
}
}
| 2,872 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Cogroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.collect.Lists;
import org.apache.crunch.MapFn;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple;
import org.apache.crunch.Tuple3;
import org.apache.crunch.Tuple4;
import org.apache.crunch.TupleN;
import org.apache.crunch.Union;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.TupleFactory;
import java.util.Collection;
public class Cogroup {
/**
* Co-groups the two {@link PTable} arguments.
*
* @param left The left (smaller) PTable
* @param right The right (larger) PTable
* @return a {@code PTable} representing the co-grouped tables
*/
public static <K, U, V> PTable<K, Pair<Collection<U>, Collection<V>>> cogroup(PTable<K, U> left, PTable<K, V> right) {
return cogroup(0, left, right);
}
/**
* Co-groups the two {@link PTable} arguments with a user-specified degree of parallelism (a.k.a, number of
* reducers.)
*
* @param numReducers The number of reducers to use
* @param left The left (smaller) PTable
* @param right The right (larger) PTable
* @return A new {@code PTable} representing the co-grouped tables
*/
public static <K, U, V> PTable<K, Pair<Collection<U>, Collection<V>>> cogroup(
int numReducers,
PTable<K, U> left,
PTable<K, V> right) {
PTypeFamily tf = left.getTypeFamily();
return cogroup(
tf.pairs(tf.collections(left.getValueType()),
tf.collections(right.getValueType())),
TupleFactory.PAIR,
numReducers,
left, right);
}
/**
* Co-groups the three {@link PTable} arguments.
*
* @param first The smallest PTable
* @param second The second-smallest PTable
* @param third The largest PTable
* @return a {@code PTable} representing the co-grouped tables
*/
public static <K, V1, V2, V3> PTable<K, Tuple3.Collect<V1, V2, V3>> cogroup(
PTable<K, V1> first,
PTable<K, V2> second,
PTable<K, V3> third) {
return cogroup(0, first, second, third);
}
/**
* Co-groups the three {@link PTable} arguments with a user-specified degree of parallelism (a.k.a, number of
* reducers.)
*
* @param numReducers The number of reducers to use
* @param first The smallest PTable
* @param second The second-smallest PTable
* @param third The largest PTable
* @return A new {@code PTable} representing the co-grouped tables
*/
public static <K, V1, V2, V3> PTable<K, Tuple3.Collect<V1, V2, V3>> cogroup(
int numReducers,
PTable<K, V1> first,
PTable<K, V2> second,
PTable<K, V3> third) {
return cogroup(
Tuple3.Collect.derived(first.getValueType(), second.getValueType(), third.getValueType()),
new TupleFactory<Tuple3.Collect<V1, V2, V3>>() {
@Override
public Tuple3.Collect<V1, V2, V3> makeTuple(Object... values) {
return new Tuple3.Collect<V1, V2, V3>(
(Collection<V1>) values[0],
(Collection<V2>) values[1],
(Collection<V3>) values[2]);
}
},
numReducers,
first, second, third);
}
/**
* Co-groups the three {@link PTable} arguments.
*
* @param first The smallest PTable
* @param second The second-smallest PTable
* @param third The largest PTable
* @return a {@code PTable} representing the co-grouped tables
*/
public static <K, V1, V2, V3, V4> PTable<K, Tuple4.Collect<V1, V2, V3, V4>> cogroup(
PTable<K, V1> first,
PTable<K, V2> second,
PTable<K, V3> third,
PTable<K, V4> fourth) {
return cogroup(0, first, second, third, fourth);
}
/**
* Co-groups the three {@link PTable} arguments with a user-specified degree of parallelism (a.k.a, number of
* reducers.)
*
* @param numReducers The number of reducers to use
* @param first The smallest PTable
* @param second The second-smallest PTable
* @param third The largest PTable
* @return A new {@code PTable} representing the co-grouped tables
*/
public static <K, V1, V2, V3, V4> PTable<K, Tuple4.Collect<V1, V2, V3, V4>> cogroup(
int numReducers,
PTable<K, V1> first,
PTable<K, V2> second,
PTable<K, V3> third,
PTable<K, V4> fourth) {
return cogroup(
Tuple4.Collect.derived(first.getValueType(), second.getValueType(), third.getValueType(),
fourth.getValueType()),
new TupleFactory<Tuple4.Collect<V1, V2, V3, V4>>() {
@Override
public Tuple4.Collect<V1, V2, V3, V4> makeTuple(Object... values) {
return new Tuple4.Collect<V1, V2, V3, V4>(
(Collection<V1>) values[0],
(Collection<V2>) values[1],
(Collection<V3>) values[2],
(Collection<V4>) values[3]);
}
},
numReducers,
first, second, third, fourth);
}
/**
* Co-groups an arbitrary number of {@link PTable} arguments. The largest table should
* come last in the ordering.
*
* @param first The first (smallest) PTable to co-group
* @param rest The other (larger) PTables to co-group
* @return a {@code PTable} representing the co-grouped tables
*/
public static <K> PTable<K, TupleN> cogroup(PTable<K, ?> first, PTable<K, ?>... rest) {
return cogroup(0, first, rest);
}
/**
* Co-groups an arbitrary number of {@link PTable} arguments with a user-specified degree of parallelism
* (a.k.a, number of reducers.) The largest table should come last in the ordering.
*
* @param numReducers The number of reducers to use
* @param first The first (smallest) PTable to co-group
* @param rest The other (larger) PTables to co-group
* @return A new {@code PTable} representing the co-grouped tables
*/
public static <K, U, V> PTable<K, TupleN> cogroup(
int numReducers,
PTable<K, ?> first,
PTable<K, ?>... rest) {
PTypeFamily tf = first.getTypeFamily();
PType[] components = new PType[1 + rest.length];
components[0] = tf.collections(first.getValueType());
for (int i = 0; i < rest.length; i++) {
components[i + 1] = tf.collections(rest[i].getValueType());
}
return cogroup(
tf.tuples(components),
TupleFactory.TUPLEN,
numReducers,
first, rest);
}
private static <K, T extends Tuple> PTable<K, T> cogroup(
PType<T> outputType,
TupleFactory tupleFactory,
int numReducers,
PTable<K, ?> first, PTable<K, ?>... rest) {
PTypeFamily ptf = first.getTypeFamily();
PType[] ptypes = new PType[1 + rest.length];
ptypes[0] = first.getValueType();
for (int i = 0; i < rest.length; i++) {
ptypes[i + 1] = rest[i].getValueType();
}
PType<Union> itype = ptf.unionOf(ptypes);
PTable<K, Union> firstInter = first.mapValues("coGroupTag1",
new CogroupFn(0), itype);
PTable<K, Union>[] inter = new PTable[rest.length];
for (int i = 0; i < rest.length; i++) {
inter[i] = rest[i].mapValues("coGroupTag" + (i + 2),
new CogroupFn(i + 1), itype);
}
PTable<K, Union> union = firstInter.union(inter);
PGroupedTable<K, Union> grouped;
if (numReducers > 0) {
grouped = union.groupByKey(numReducers);
} else {
grouped = union.groupByKey();
}
return grouped.mapValues("cogroup",
new PostGroupFn<T>(tupleFactory, ptypes),
outputType);
}
private static class CogroupFn<T> extends MapFn<T, Union> {
private final int index;
CogroupFn(int index) {
this.index = index;
}
@Override
public Union map(T input) {
return new Union(index, input);
}
}
private static class PostGroupFn<T extends Tuple> extends
MapFn<Iterable<Union>, T> {
private final TupleFactory factory;
private final PType[] ptypes;
PostGroupFn(TupleFactory tf, PType... ptypes) {
this.factory = tf;
this.ptypes = ptypes;
}
@Override
public void initialize() {
super.initialize();
for (PType pt : ptypes) {
pt.initialize(getConfiguration());
}
}
@Override
public T map(Iterable<Union> input) {
Collection[] collections = new Collection[ptypes.length];
for (int i = 0; i < ptypes.length; i++) {
collections[i] = Lists.newArrayList();
}
for (Union t : input) {
int index = t.getIndex();
collections[index].add(ptypes[index].getDetachedValue(t.getValue()));
}
return (T) factory.makeTuple(collections);
}
}
}
| 2,873 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Average.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.crunch.MapFn;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTypeFamily;
import static org.apache.crunch.fn.Aggregators.SUM_DOUBLES;
import static org.apache.crunch.fn.Aggregators.SUM_LONGS;
import static org.apache.crunch.fn.Aggregators.pairAggregator;
public class Average {
/**
* Calculate the mean average value by key for a table with numeric values.
* @param table PTable of (key, value) pairs to operate on
* @param <K> Key type, can be any type
* @param <V> Value type, must be numeric (ie. extend java.lang.Number)
* @return PTable<K, Double> of (key, mean(values)) pairs
*/
public static <K, V extends Number> PTable<K, Double> meanValue(PTable<K, V> table) {
PTypeFamily ptf = table.getTypeFamily();
PTable<K, Pair<Double, Long>> withCounts = table.mapValues(new MapFn<V, Pair<Double, Long>>() {
@Override
public Pair<Double, Long> map(V input) {
return Pair.of(input.doubleValue(), 1L);
}
}, ptf.pairs(ptf.doubles(), ptf.longs()));
PGroupedTable<K, Pair<Double, Long>> grouped = withCounts.groupByKey();
return grouped.combineValues(pairAggregator(SUM_DOUBLES(), SUM_LONGS()))
.mapValues(new MapFn<Pair<Double, Long>, Double>() {
@Override
public Double map(Pair<Double, Long> input) {
return input.first() / input.second();
}
}, ptf.doubles());
}
}
| 2,874 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Mapreduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.Proxy;
import javassist.util.proxy.MethodFilter;
import javassist.util.proxy.MethodHandler;
import javassist.util.proxy.ProxyFactory;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
/**
* Static functions for working with legacy Mappers and Reducers that live under the org.apache.hadoop.mapreduce.*
* package as part of Crunch pipelines.
*/
public class Mapreduce {
public static <K1, V1, K2 extends Writable, V2 extends Writable> PTable<K2, V2> map(
PTable<K1, V1> input,
Class<? extends Mapper<K1, V1, K2, V2>> mapperClass,
Class<K2> keyClass, Class<V2> valueClass) {
return input.parallelDo(new MapperFn<K1, V1, K2, V2>(mapperClass), tableOf(keyClass, valueClass));
}
public static <K1, V1, K2 extends Writable, V2 extends Writable> PTable<K2, V2> reduce(
PGroupedTable<K1, V1> input,
Class<? extends Reducer<K1, V1, K2, V2>> reducerClass,
Class<K2> keyClass, Class<V2> valueClass) {
return input.parallelDo(new ReducerFn<K1, V1, K2, V2>(reducerClass), tableOf(keyClass, valueClass));
}
private static <K extends Writable, V extends Writable> PTableType<K, V> tableOf(
Class<K> keyClass, Class<V> valueClass) {
return Writables.tableOf(Writables.writables(keyClass), Writables.writables(valueClass));
}
private static class MapperFn<K1, V1, K2 extends Writable, V2 extends Writable> extends
DoFn<Pair<K1, V1>, Pair<K2, V2>> {
private final Class<? extends Mapper<K1, V1, K2, V2>> mapperClass;
private transient Mapper<K1, V1, K2, V2> instance;
private transient Mapper.Context context;
private transient CtxtMethodHandler handler;
private transient Method setupMethod;
private transient Method mapMethod;
private transient Method cleanupMethod;
public MapperFn(Class<? extends Mapper<K1, V1, K2, V2>> mapperClass) {
this.mapperClass = Preconditions.checkNotNull(mapperClass);
}
@Override
public void initialize() {
if (instance == null) {
this.instance = ReflectionUtils.newInstance(mapperClass, getConfiguration());
try {
for (Method m : mapperClass.getDeclaredMethods()) {
if ("setup".equals(m.getName())) {
this.setupMethod = m;
this.setupMethod.setAccessible(true);
} else if ("cleanup".equals(m.getName())) {
this.cleanupMethod = m;
this.cleanupMethod.setAccessible(true);
} else if ("map".equals(m.getName())) {
this.mapMethod = m;
this.mapMethod.setAccessible(true);
}
}
if (mapMethod == null) {
throw new CrunchRuntimeException("No map method for class: " + mapperClass);
}
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.setSuperclass(Mapper.Context.class);
proxyFactory.setFilter(CtxtMethodHandler.FILTER);
Class[] paramTypes = new Class[] { Mapper.class };
Object[] args = new Object[] { instance };
if (!Modifier.isAbstract(Mapper.Context.class.getModifiers())) {
paramTypes = new Class[] { Mapper.class,
Configuration.class, TaskAttemptID.class,
RecordReader.class, RecordWriter.class,
OutputCommitter.class,
Class.forName("org.apache.hadoop.mapreduce.StatusReporter"),
InputSplit.class
};
args = new Object[] { instance, getConfiguration(), getTaskAttemptID(),
null, null, NO_OP_OUTPUT_COMMITTER, null, null
};
}
this.handler = new CtxtMethodHandler(this.getContext());
this.context = (Mapper.Context) proxyFactory.create(paramTypes, args, handler);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
if (setupMethod != null) {
try {
setupMethod.invoke(instance, context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
@Override
public void process(Pair<K1, V1> input, Emitter<Pair<K2, V2>> emitter) {
handler.set(emitter);
try {
mapMethod.invoke(instance, input.first(), input.second(), context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public void cleanup(Emitter<Pair<K2, V2>> emitter) {
if (cleanupMethod != null) {
handler.set(emitter);
try {
cleanupMethod.invoke(instance, context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
}
private static class ReducerFn<K1, V1, K2 extends Writable, V2 extends Writable> extends
DoFn<Pair<K1, Iterable<V1>>, Pair<K2, V2>> {
private final Class<? extends Reducer<K1, V1, K2, V2>> reducerClass;
private transient Reducer<K1, V1, K2, V2> instance;
private transient CtxtMethodHandler handler;
private transient Reducer.Context context;
private transient Method setupMethod;
private transient Method reduceMethod;
private transient Method cleanupMethod;
public ReducerFn(Class<? extends Reducer<K1, V1, K2, V2>> reducerClass) {
this.reducerClass = Preconditions.checkNotNull(reducerClass);
}
@Override
public void initialize() {
if (instance == null) {
this.instance = ReflectionUtils.newInstance(reducerClass, getConfiguration());
try {
for (Method m : reducerClass.getDeclaredMethods()) {
if ("setup".equals(m.getName())) {
this.setupMethod = m;
this.setupMethod.setAccessible(true);
} else if ("cleanup".equals(m.getName())) {
this.cleanupMethod = m;
this.cleanupMethod.setAccessible(true);
} else if ("reduce".equals(m.getName())) {
this.reduceMethod = m;
this.reduceMethod.setAccessible(true);
}
}
if (reduceMethod == null) {
throw new CrunchRuntimeException("No reduce method for class: " + reducerClass);
}
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.setSuperclass(Reducer.Context.class);
proxyFactory.setFilter(CtxtMethodHandler.FILTER);
Class[] paramTypes = new Class[] { Reducer.class };
Object[] args = new Object[] { instance };
if (!Modifier.isAbstract(Reducer.Context.class.getModifiers())) {
Class rkvi = Class.forName("org.apache.hadoop.mapred.RawKeyValueIterator");
Object rawKeyValueIterator = Proxy.newProxyInstance(rkvi.getClassLoader(),
new Class[] { rkvi }, new InvocationHandler() {
@Override
public Object invoke(Object obj, Method m, Object[] args) throws Throwable {
if ("next".equals(m.getName())) {
return true;
}
return null;
}
});
paramTypes = new Class[] { Reducer.class,
Configuration.class, TaskAttemptID.class,
rkvi,
Counter.class, Counter.class,
RecordWriter.class,
OutputCommitter.class,
Class.forName("org.apache.hadoop.mapreduce.StatusReporter"),
RawComparator.class,
Class.class, Class.class
};
args = new Object[] { instance, getConfiguration(), getTaskAttemptID(),
rawKeyValueIterator, null, null, null,
NO_OP_OUTPUT_COMMITTER, null, null,
NullWritable.class, NullWritable.class
};
}
this.handler = new CtxtMethodHandler(this.getContext());
this.context = (Reducer.Context) proxyFactory.create(paramTypes, args, handler);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
if (setupMethod != null) {
try {
setupMethod.invoke(instance, context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
@Override
public void process(Pair<K1, Iterable<V1>> input, Emitter<Pair<K2, V2>> emitter) {
handler.set(emitter);
try {
reduceMethod.invoke(instance, input.first(), input.second(), context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public void cleanup(Emitter<Pair<K2, V2>> emitter) {
if (cleanupMethod != null) {
handler.set(emitter);
try {
cleanupMethod.invoke(instance, context);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
}
}
private static class CtxtMethodHandler implements MethodHandler {
public static final MethodFilter FILTER = new MethodFilter() {
@Override
public boolean isHandled(Method m) {
return true;
}
};
private final TaskInputOutputContext ctxt;
private Emitter emitter;
public CtxtMethodHandler(TaskInputOutputContext ctxt) {
this.ctxt = ctxt;
}
public void set(Emitter emitter) {
this.emitter = emitter;
}
@Override
public Object invoke(Object instance, Method m, Method arg2, Object[] args) throws Throwable {
String name = m.getName();
if ("write".equals(name)) {
emitter.emit(Pair.of(args[0], args[1]));
return null;
} else {
return m.invoke(ctxt, args);
}
}
}
private static final OutputCommitter NO_OP_OUTPUT_COMMITTER = new OutputCommitter() {
@Override
public void abortTask(TaskAttemptContext arg0) throws IOException {
}
@Override
public void commitTask(TaskAttemptContext arg0) throws IOException {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext arg0) throws IOException {
return false;
}
@Override
public void setupJob(JobContext arg0) throws IOException {
}
@Override
public void setupTask(TaskAttemptContext arg0) throws IOException {
}
};
}
| 2,875 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Sample.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.SampleUtils.ReservoirSampleFn;
import org.apache.crunch.lib.SampleUtils.SampleFn;
import org.apache.crunch.lib.SampleUtils.WRSCombineFn;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
/**
* Methods for performing random sampling in a distributed fashion, either by accepting each
* record in a {@code PCollection} with an independent probability in order to sample some
* fraction of the overall data set, or by using reservoir sampling in order to pull a uniform
* or weighted sample of fixed size from a {@code PCollection} of an unknown size. For more details
* on the reservoir sampling algorithms used by this library, see the A-ES algorithm described in
* <a href="http://arxiv.org/pdf/1012.0256.pdf">Efraimidis (2012)</a>.
*/
public class Sample {
/**
* Output records from the given {@code PCollection} with the given probability.
*
* @param input The {@code PCollection} to sample from
* @param probability The probability (0.0 < p %lt; 1.0)
* @return The output {@code PCollection} created from sampling
*/
public static <S> PCollection<S> sample(PCollection<S> input, double probability) {
return sample(input, null, probability);
}
/**
* Output records from the given {@code PCollection} using a given seed. Useful for unit
* testing.
*
* @param input The {@code PCollection} to sample from
* @param seed The seed for the random number generator
* @param probability The probability (0.0 < p < 1.0)
* @return The output {@code PCollection} created from sampling
*/
public static <S> PCollection<S> sample(PCollection<S> input, Long seed, double probability) {
String stageName = String.format("sample(%.2f)", probability);
return input.parallelDo(stageName, new SampleFn<S>(probability, seed), input.getPType());
}
/**
* A {@code PTable<K, V>} analogue of the {@code sample} function.
*
* @param input The {@code PTable} to sample from
* @param probability The probability (0.0 < p < 1.0)
* @return The output {@code PTable} created from sampling
*/
public static <K, V> PTable<K, V> sample(PTable<K, V> input, double probability) {
return PTables.asPTable(sample((PCollection<Pair<K, V>>) input, probability));
}
/**
* A {@code PTable<K, V>} analogue of the {@code sample} function, with the seed argument
* exposed for testing purposes.
*
* @param input The {@code PTable} to sample from
* @param seed The seed for the random number generator
* @param probability The probability (0.0 < p < 1.0)
* @return The output {@code PTable} created from sampling
*/
public static <K, V> PTable<K, V> sample(PTable<K, V> input, Long seed, double probability) {
return PTables.asPTable(sample((PCollection<Pair<K, V>>) input, seed, probability));
}
/**
* Select a fixed number of elements from the given {@code PCollection} with each element
* equally likely to be included in the sample.
*
* @param input The input data
* @param sampleSize The number of elements to select
* @return A {@code PCollection} made up of the sampled elements
*/
public static <T> PCollection<T> reservoirSample(
PCollection<T> input,
int sampleSize) {
return reservoirSample(input, sampleSize, null);
}
/**
* A version of the reservoir sampling algorithm that uses a given seed, primarily for
* testing purposes.
*
* @param input The input data
* @param sampleSize The number of elements to select
* @param seed The test seed
* @return A {@code PCollection} made up of the sampled elements
*/
public static <T> PCollection<T> reservoirSample(
PCollection<T> input,
int sampleSize,
Long seed) {
PTypeFamily ptf = input.getTypeFamily();
PType<Pair<T, Integer>> ptype = ptf.pairs(input.getPType(), ptf.ints());
return weightedReservoirSample(
input.parallelDo("Map to pairs for reservoir sampling", new MapFn<T, Pair<T, Integer>>() {
@Override
public Pair<T, Integer> map(T t) { return Pair.of(t, 1); }
}, ptype),
sampleSize,
seed);
}
/**
* Selects a weighted sample of the elements of the given {@code PCollection}, where the second term in
* the input {@code Pair} is a numerical weight.
*
* @param input the weighted observations
* @param sampleSize The number of elements to select
* @return A random sample of the given size that respects the weighting values
*/
public static <T, N extends Number> PCollection<T> weightedReservoirSample(
PCollection<Pair<T, N>> input,
int sampleSize) {
return weightedReservoirSample(input, sampleSize, null);
}
/**
* The weighted reservoir sampling function with the seed term exposed for testing purposes.
*
* @param input the weighted observations
* @param sampleSize The number of elements to select
* @param seed The test seed
* @return A random sample of the given size that respects the weighting values
*/
public static <T, N extends Number> PCollection<T> weightedReservoirSample(
PCollection<Pair<T, N>> input,
int sampleSize,
Long seed) {
PTypeFamily ptf = input.getTypeFamily();
PTable<Integer, Pair<T, N>> groupedIn = input.parallelDo(
new MapFn<Pair<T, N>, Pair<Integer, Pair<T, N>>>() {
@Override
public Pair<Integer, Pair<T, N>> map(Pair<T, N> p) {
return Pair.of(0, p);
}
}, ptf.tableOf(ptf.ints(), input.getPType()));
int[] ss = { sampleSize };
return groupedWeightedReservoirSample(groupedIn, ss, seed)
.parallelDo("Extract sampled value from pair", new MapFn<Pair<Integer, T>, T>() {
@Override
public T map(Pair<Integer, T> p) {
return p.second();
}
}, (PType<T>) input.getPType().getSubTypes().get(0));
}
/**
* The most general purpose of the weighted reservoir sampling patterns that allows us to choose
* a random sample of elements for each of N input groups.
*
* @param input A {@code PTable} with the key a group ID and the value a weighted observation in that group
* @param sampleSizes An array of length N, with each entry is the number of elements to include in that group
* @return A {@code PCollection} of the sampled elements for each of the groups
*/
public static <T, N extends Number> PCollection<Pair<Integer, T>> groupedWeightedReservoirSample(
PTable<Integer, Pair<T, N>> input,
int[] sampleSizes) {
return groupedWeightedReservoirSample(input, sampleSizes, null);
}
/**
* Same as the other groupedWeightedReservoirSample method, but include a seed for testing
* purposes.
*
* @param input A {@code PTable} with the key a group ID and the value a weighted observation in that group
* @param sampleSizes An array of length N, with each entry is the number of elements to include in that group
* @param seed The test seed
* @return A {@code PCollection} of the sampled elements for each of the groups
*/
public static <T, N extends Number> PCollection<Pair<Integer, T>> groupedWeightedReservoirSample(
PTable<Integer, Pair<T, N>> input,
int[] sampleSizes,
Long seed) {
PTypeFamily ptf = input.getTypeFamily();
PType<T> ttype = (PType<T>) input.getPTableType().getValueType().getSubTypes().get(0);
PTableType<Integer, Pair<Double, T>> ptt = ptf.tableOf(ptf.ints(),
ptf.pairs(ptf.doubles(), ttype));
return input.parallelDo("Initial reservoir sampling", new ReservoirSampleFn<T, N>(sampleSizes, seed, ttype), ptt)
.groupByKey(1)
.combineValues(new WRSCombineFn<T>(sampleSizes, ttype))
.parallelDo("Extract sampled values", new MapFn<Pair<Integer, Pair<Double, T>>, Pair<Integer, T>>() {
@Override
public Pair<Integer, T> map(Pair<Integer, Pair<Double, T>> p) {
return Pair.of(p.first(), p.second().second());
}
}, ptf.pairs(ptf.ints(), ttype));
}
}
| 2,876 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Aggregate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.PriorityQueue;
import org.apache.crunch.Aggregator;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PObject;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.materialize.pobject.FirstElementPObject;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.util.PartitionUtils;
import com.google.common.collect.Lists;
/**
* Methods for performing various types of aggregations over {@link PCollection} instances.
*
*/
public class Aggregate {
/**
* Returns a {@code PTable} that contains the unique elements of this collection mapped to a count
* of their occurrences.
*/
public static <S> PTable<S, Long> count(PCollection<S> collect) {
return count(collect, PartitionUtils.getRecommendedPartitions(collect));
}
/**
* Returns a {@code PTable} that contains the unique elements of this collection mapped to a count
* of their occurrences.
*/
public static <S> PTable<S, Long> count(PCollection<S> collect, int numPartitions) {
PTypeFamily tf = collect.getTypeFamily();
return collect.parallelDo("Aggregate.count", new MapFn<S, Pair<S, Long>>() {
public Pair<S, Long> map(S input) {
return Pair.of(input, 1L);
}
}, tf.tableOf(collect.getPType(), tf.longs()))
.groupByKey(numPartitions)
.combineValues(Aggregators.SUM_LONGS());
}
/**
* Returns the number of elements in the provided PCollection.
*
* @param collect The PCollection whose elements should be counted.
* @param <S> The type of the PCollection.
* @return A {@code PObject} containing the number of elements in the {@code PCollection}.
*/
public static <S> PObject<Long> length(PCollection<S> collect) {
PTypeFamily tf = collect.getTypeFamily();
PTable<Integer, Long> countTable = collect
.parallelDo("Aggregate.count", new MapFn<S, Pair<Integer, Long>>() {
public Pair<Integer, Long> map(S input) {
return Pair.of(1, 1L);
}
public void cleanup(Emitter<Pair<Integer, Long>> e) {
e.emit(Pair.of(1, 0L));
}
}, tf.tableOf(tf.ints(), tf.longs()))
.groupByKey(GroupingOptions.builder().numReducers(1).build())
.combineValues(Aggregators.SUM_LONGS());
PCollection<Long> count = countTable.values();
return new FirstElementPObject<Long>(count, 0L);
}
public static class PairValueComparator<K, V> implements Comparator<Pair<K, V>> {
private final boolean ascending;
public PairValueComparator(boolean ascending) {
this.ascending = ascending;
}
@Override
public int compare(Pair<K, V> left, Pair<K, V> right) {
int cmp = ((Comparable<V>) left.second()).compareTo(right.second());
if (ascending) {
return cmp;
} else {
return cmp == Integer.MIN_VALUE ? Integer.MAX_VALUE : -cmp;
}
}
}
public static class TopKFn<K, V> extends DoFn<Pair<K, V>, Pair<Integer, Pair<K, V>>> {
private final int limit;
private final boolean maximize;
private final PType<Pair<K, V>> pairType;
private transient PriorityQueue<Pair<K, V>> values;
public TopKFn(int limit, boolean ascending, PType<Pair<K, V>> pairType) {
this.limit = limit;
this.maximize = ascending;
this.pairType = pairType;
}
public void initialize() {
this.values = new PriorityQueue<Pair<K, V>>(limit, new PairValueComparator<K, V>(maximize));
pairType.initialize(getConfiguration());
}
public void process(Pair<K, V> input, Emitter<Pair<Integer, Pair<K, V>>> emitter) {
values.add(pairType.getDetachedValue(input));
if (values.size() > limit) {
values.poll();
}
}
public void cleanup(Emitter<Pair<Integer, Pair<K, V>>> emitter) {
for (Pair<K, V> p : values) {
emitter.emit(Pair.of(0, p));
}
}
}
public static class TopKCombineFn<K, V> extends CombineFn<Integer, Pair<K, V>> {
private final int limit;
private final boolean maximize;
private PType<Pair<K, V>> pairType;
public TopKCombineFn(int limit, boolean maximize, PType<Pair<K, V>> pairType) {
this.limit = limit;
this.maximize = maximize;
this.pairType = pairType;
}
@Override
public void initialize() {
pairType.initialize(getConfiguration());
}
@Override
public void process(Pair<Integer, Iterable<Pair<K, V>>> input,
Emitter<Pair<Integer, Pair<K, V>>> emitter) {
Comparator<Pair<K, V>> cmp = new PairValueComparator<K, V>(maximize);
PriorityQueue<Pair<K, V>> queue = new PriorityQueue<Pair<K, V>>(limit, cmp);
for (Pair<K, V> pair : input.second()) {
queue.add(pairType.getDetachedValue(pair));
if (queue.size() > limit) {
queue.poll();
}
}
List<Pair<K, V>> values = Lists.newArrayList(queue);
Collections.sort(values, cmp);
for (int i = values.size() - 1; i >= 0; i--) {
emitter.emit(Pair.of(0, values.get(i)));
}
}
}
/**
* Selects the top N pairs from the given table, with sorting being performed on the values (i.e. the second
* value in the pair) of the table.
*
* @param ptable table containing the pairs from which the top N is to be selected
* @param limit number of top elements to select
* @param maximize if true, the maximum N values from the table will be selected, otherwise the minimal
* N values will be selected
* @return table containing the top N values from the incoming table
*/
public static <K, V> PTable<K, V> top(PTable<K, V> ptable, int limit, boolean maximize) {
PTypeFamily ptf = ptable.getTypeFamily();
PTableType<K, V> base = ptable.getPTableType();
PType<Pair<K, V>> pairType = ptf.pairs(base.getKeyType(), base.getValueType());
PTableType<Integer, Pair<K, V>> inter = ptf.tableOf(ptf.ints(), pairType);
return ptable.parallelDo("top" + limit + "map", new TopKFn<K, V>(limit, maximize, pairType), inter)
.groupByKey(1).combineValues(new TopKCombineFn<K, V>(limit, maximize, pairType))
.parallelDo("top" + limit + "reduce", new DoFn<Pair<Integer, Pair<K, V>>, Pair<K, V>>() {
public void process(Pair<Integer, Pair<K, V>> input, Emitter<Pair<K, V>> emitter) {
emitter.emit(input.second());
}
}, base);
}
/**
* Returns the largest numerical element from the input collection.
*/
public static <S> PObject<S> max(PCollection<S> collect) {
Class<S> clazz = collect.getPType().getTypeClass();
if (!clazz.isPrimitive() && !Comparable.class.isAssignableFrom(clazz)) {
throw new IllegalArgumentException("Can only get max for Comparable elements, not for: "
+ collect.getPType().getTypeClass());
}
PTypeFamily tf = collect.getTypeFamily();
PCollection<S> maxCollect = PTables.values(collect
.parallelDo("max", new DoFn<S, Pair<Boolean, S>>() {
private transient S max = null;
public void process(S input, Emitter<Pair<Boolean, S>> emitter) {
if (max == null || ((Comparable<S>) max).compareTo(input) < 0) {
max = input;
}
}
public void cleanup(Emitter<Pair<Boolean, S>> emitter) {
if (max != null) {
emitter.emit(Pair.of(true, max));
}
}
}, tf.tableOf(tf.booleans(), collect.getPType())).groupByKey(1)
.combineValues(new CombineFn<Boolean, S>() {
public void process(Pair<Boolean, Iterable<S>> input, Emitter<Pair<Boolean, S>> emitter) {
S max = null;
for (S v : input.second()) {
if (max == null || ((Comparable<S>) max).compareTo(v) < 0) {
max = v;
}
}
emitter.emit(Pair.of(input.first(), max));
}
}));
return new FirstElementPObject<S>(maxCollect);
}
/**
* Returns the smallest numerical element from the input collection.
*/
public static <S> PObject<S> min(PCollection<S> collect) {
Class<S> clazz = collect.getPType().getTypeClass();
if (!clazz.isPrimitive() && !Comparable.class.isAssignableFrom(clazz)) {
throw new IllegalArgumentException("Can only get min for Comparable elements, not for: "
+ collect.getPType().getTypeClass());
}
PTypeFamily tf = collect.getTypeFamily();
PCollection<S> minCollect = PTables.values(collect
.parallelDo("min", new DoFn<S, Pair<Boolean, S>>() {
private transient S min = null;
public void process(S input, Emitter<Pair<Boolean, S>> emitter) {
if (min == null || ((Comparable<S>) min).compareTo(input) > 0) {
min = input;
}
}
public void cleanup(Emitter<Pair<Boolean, S>> emitter) {
if (min != null) {
emitter.emit(Pair.of(false, min));
}
}
}, tf.tableOf(tf.booleans(), collect.getPType())).groupByKey(1)
.combineValues(new CombineFn<Boolean, S>() {
public void process(Pair<Boolean, Iterable<S>> input, Emitter<Pair<Boolean, S>> emitter) {
S min = null;
for (S v : input.second()) {
if (min == null || ((Comparable<S>) min).compareTo(v) > 0) {
min = v;
}
}
emitter.emit(Pair.of(input.first(), min));
}
}));
return new FirstElementPObject<S>(minCollect);
}
public static <K, V> PTable<K, Collection<V>> collectValues(PTable<K, V> collect) {
PTypeFamily tf = collect.getTypeFamily();
final PType<V> valueType = collect.getValueType();
return collect.groupByKey().mapValues("collect",
new MapFn<Iterable<V>, Collection<V>>() {
@Override
public void initialize() {
valueType.initialize(getConfiguration());
}
public Collection<V> map(Iterable<V> values) {
List<V> collected = Lists.newArrayList();
for (V value : values) {
collected.add(valueType.getDetachedValue(value));
}
return collected;
}
}, tf.collections(collect.getValueType()));
}
public static <S> PCollection<S> aggregate(PCollection<S> collect, Aggregator<S> aggregator) {
PTypeFamily tf = collect.getTypeFamily();
return collect.parallelDo("Aggregate.aggregator", new MapFn<S, Pair<Boolean, S>>() {
public Pair<Boolean, S> map(S input) {
return Pair.of(false, input);
}
}, tf.tableOf(tf.booleans(), collect.getPType()))
.groupByKey(1)
.combineValues(aggregator)
.values();
}
}
| 2,877 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/SampleUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.SortedMap;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.FilterFn;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
final class SampleUtils {
static class SampleFn<S> extends FilterFn<S> {
private final Long seed;
private final double acceptanceProbability;
private transient Random r;
SampleFn(double acceptanceProbability, Long seed) {
Preconditions.checkArgument(0.0 < acceptanceProbability && acceptanceProbability < 1.0);
if (seed == null) {
this.seed = System.currentTimeMillis();
} else {
this.seed = seed;
}
this.acceptanceProbability = acceptanceProbability;
}
@Override
public void initialize() {
if (r == null) {
r = new Random(seed);
}
}
@Override
public boolean accept(S input) {
return r.nextDouble() < acceptanceProbability;
}
}
static class ReservoirSampleFn<T, N extends Number>
extends DoFn<Pair<Integer, Pair<T, N>>, Pair<Integer, Pair<Double, T>>> {
private final int[] sampleSizes;
private final Long seed;
private final PType<T> valueType;
private transient List<SortedMap<Double, T>> reservoirs;
private transient Random random;
ReservoirSampleFn(int[] sampleSizes, Long seed, PType<T> valueType) {
this.sampleSizes = sampleSizes;
this.seed = seed;
this.valueType = valueType;
}
@Override
public void initialize() {
this.reservoirs = Lists.newArrayList();
this.valueType.initialize(getConfiguration());
for (int sampleSize : sampleSizes) {
reservoirs.add(Maps.<Double, T>newTreeMap());
}
if (random == null) {
if (seed == null) {
this.random = new Random();
} else {
this.random = new Random(seed);
}
}
}
@Override
public void process(Pair<Integer, Pair<T, N>> input,
Emitter<Pair<Integer, Pair<Double, T>>> emitter) {
int id = input.first();
Pair<T, N> p = input.second();
double weight = p.second().doubleValue();
if (weight > 0.0) {
double score = Math.log(random.nextDouble()) / weight;
SortedMap<Double, T> reservoir = reservoirs.get(id);
if (reservoir.size() < sampleSizes[id]) {
reservoir.put(score, valueType.getDetachedValue(p.first()));
} else if (score > reservoir.firstKey()) {
reservoir.remove(reservoir.firstKey());
reservoir.put(score, valueType.getDetachedValue(p.first()));
}
}
}
@Override
public void cleanup(Emitter<Pair<Integer, Pair<Double, T>>> emitter) {
for (int id = 0; id < reservoirs.size(); id++) {
Map<Double, T> reservoir = reservoirs.get(id);
for (Map.Entry<Double, T> e : reservoir.entrySet()) {
emitter.emit(Pair.of(id, Pair.of(e.getKey(), e.getValue())));
}
}
}
}
static class WRSCombineFn<T> extends CombineFn<Integer, Pair<Double, T>> {
private final int[] sampleSizes;
private final PType<T> valueType;
private List<SortedMap<Double, T>> reservoirs;
WRSCombineFn(int[] sampleSizes, PType<T> valueType) {
this.sampleSizes = sampleSizes;
this.valueType = valueType;
}
@Override
public void initialize() {
this.reservoirs = Lists.newArrayList();
for (int sampleSize : sampleSizes) {
reservoirs.add(Maps.<Double, T>newTreeMap());
}
this.valueType.initialize(getConfiguration());
}
@Override
public void process(Pair<Integer, Iterable<Pair<Double, T>>> input,
Emitter<Pair<Integer, Pair<Double, T>>> emitter) {
SortedMap<Double, T> reservoir = reservoirs.get(input.first());
for (Pair<Double, T> p : input.second()) {
if (reservoir.size() < sampleSizes[input.first()]) {
reservoir.put(p.first(), valueType.getDetachedValue(p.second()));
} else if (p.first() > reservoir.firstKey()) {
reservoir.remove(reservoir.firstKey());
reservoir.put(p.first(), valueType.getDetachedValue(p.second()));
}
}
}
@Override
public void cleanup(Emitter<Pair<Integer, Pair<Double, T>>> emitter) {
for (int i = 0; i < reservoirs.size(); i++) {
Map<Double, T> reservoir = reservoirs.get(i);
for (Map.Entry<Double, T> e : reservoir.entrySet()) {
emitter.emit(Pair.of(i, Pair.of(e.getKey(), e.getValue())));
}
}
}
}
}
| 2,878 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Channels.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.Channels.FirstEmittingDoFn.SecondEmittingDoFn;
import org.apache.crunch.types.PType;
/**
* Utilities for splitting {@link Pair} instances emitted by {@link DoFn} into
* separate {@link PCollection} instances. A typical motivation for this might
* be to separate standard output from error output of a DoFn.
*
* @author Brandon Inman
*
*/
public class Channels {
/**
* Splits a {@link PCollection} of any {@link Pair} of objects into a Pair of
* PCollection}, to allow for the output of a DoFn to be handled using
* separate channels.
*
* @param pCollection The {@code PCollection} to split
*/
public static <T, U> Pair<PCollection<T>, PCollection<U>> split(PCollection<Pair<T, U>> pCollection) {
PType<Pair<T, U>> pt = pCollection.getPType();
return split(pCollection, pt.getSubTypes().get(0), pt.getSubTypes().get(1));
}
/**
* Splits a {@link PCollection} of any {@link Pair} of objects into a Pair of
* PCollection}, to allow for the output of a DoFn to be handled using
* separate channels.
*
* @param pCollection The {@code PCollection} to split
* @param firstPType The {@code PType} for the first collection
* @param secondPType The {@code PType} for the second collection
* @return {@link Pair} of {@link PCollection}
*/
public static <T, U> Pair<PCollection<T>, PCollection<U>> split(PCollection<Pair<T, U>> pCollection,
PType<T> firstPType, PType<U> secondPType) {
PCollection<T> first = pCollection.parallelDo("Extract first value", new FirstEmittingDoFn<T, U>(), firstPType);
PCollection<U> second = pCollection.parallelDo("Extract second value", new SecondEmittingDoFn<T, U>(), secondPType);
return Pair.of(first, second);
}
/**
* DoFn that emits non-null first values in a {@link Pair}.
*
* @author Brandon Inman
* @param <T>
* @param <U>
*/
static class FirstEmittingDoFn<T, U> extends DoFn<Pair<T, U>, T> {
@Override
public void process(Pair<T, U> input, Emitter<T> emitter) {
T first = input.first();
if (first != null) {
emitter.emit(first);
}
}
/**
* DoFn that emits non-null second values in a {@link Pair}.
*
* @author Brandon Inman
* @param <T>
* @param <U>
*/
static class SecondEmittingDoFn<T, U> extends DoFn<Pair<T, U>, U> {
@Override
public void process(Pair<T, U> input, Emitter<U> emitter) {
U second = input.second();
if (second != null) {
emitter.emit(second);
}
}
}
}
}
| 2,879 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/DoFns.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.Serializable;
public class DoFns {
/**
* "Reduce" DoFn wrapper which detaches the values in the iterable, preventing the unexpected behaviour related to
* object reuse often observed when using Avro. Wrap your DoFn in a detach(...) and pass in a PType for the Iterable
* value, and then you'll be handed an Iterable of real distinct objects, instead of the same object being handed to
* you multiple times with different data.
*
* You should use this when you have a parallelDo after a groupBy, and you'd like to capture the objects arriving in
* the Iterable part of the incoming Pair and pass it through to the output (for example if you want to create an
* array of outputs from the values to be output as one record).
*
* The will incur a performance hit, as it means that every object read from the Iterable will allocate a new Java
* object for the record and objects for all its non-primitive fields too. If you are rolling up records into a
* collection then this will be necessary anyway, but if you are only outputting derived data this may impact the
* speed and memory usage of your job unnecessarily.
*
* @param reduceFn Underlying DoFn to wrap
* @param valueType PType of the object contained within the Iterable
* @param <K> Reduce key
* @param <V> Iterable value
* @param <T> Output type of DoFn
* @return DoFn which will detach values for you
*/
public static <K, V, T> DoFn<Pair<K, Iterable<V>>, T> detach(final DoFn<Pair<K, Iterable<V>>, T> reduceFn, final PType<V> valueType) {
return new DetachingDoFn<K, V, T>(reduceFn, valueType);
}
private static class DetachFunction<T> implements Function<T, T>, Serializable {
private final PType<T> pType;
public DetachFunction(PType<T> initializedPType) {
this.pType = initializedPType;
}
@Override
public T apply(T t) {
return pType.getDetachedValue(t);
}
}
private static class DetachingDoFn<K, V, T> extends DoFn<Pair<K, Iterable<V>>, T> {
private final DoFn<Pair<K, Iterable<V>>, T> reduceFn;
private final PType<V> valueType;
public DetachingDoFn(DoFn<Pair<K, Iterable<V>>, T> reduceFn, PType<V> valueType) {
this.reduceFn = reduceFn;
this.valueType = valueType;
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
super.setContext(context);
reduceFn.setContext(context);
}
@Override
public void configure(Configuration configuration) {
super.configure(configuration);
reduceFn.configure(configuration);
}
@Override
public void initialize() {
reduceFn.initialize();
valueType.initialize(getConfiguration() == null ? new Configuration() : getConfiguration());
}
@Override
public void process(Pair<K, Iterable<V>> input, Emitter<T> emitter) {
reduceFn.process(Pair.of(input.first(), detachIterable(input.second(), valueType)), emitter);
}
public Iterable<V> detachIterable(Iterable<V> iterable, final PType<V> pType) {
return Iterables.transform(iterable, new DetachFunction<V>(pType));
}
}
}
| 2,880 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/PTables.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.List;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.fn.PairMapFn;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import com.google.common.collect.Lists;
/**
* Methods for performing common operations on PTables.
*
*/
public class PTables {
/**
* Convert the given {@code PCollection<Pair<K, V>>} to a {@code PTable<K, V>}.
* @param pcollect The {@code PCollection} to convert
* @return A {@code PTable} that contains the same data as the input {@code PCollection}
*/
public static <K, V> PTable<K, V> asPTable(PCollection<Pair<K, V>> pcollect) {
PType<Pair<K, V>> pt = pcollect.getPType();
PTypeFamily ptf = pt.getFamily();
PTableType<K, V> ptt = ptf.tableOf(pt.getSubTypes().get(0), pt.getSubTypes().get(1));
DoFn<Pair<K, V>, Pair<K, V>> id = IdentityFn.getInstance();
return pcollect.parallelDo("asPTable", id, ptt);
}
/**
* Maps a {@code PTable<K1, V>} to a {@code PTable<K2, V>} using the given {@code MapFn<K1, K2>} on
* the keys of the {@code PTable}.
*
* @param ptable The {@code PTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned keys
* @return A new {@code PTable<K2, V>} instance
*/
public static <K1, K2, V> PTable<K2, V> mapKeys(PTable<K1, V> ptable, MapFn<K1, K2> mapFn,
PType<K2> ptype) {
return mapKeys("PTables.mapKeys", ptable, mapFn, ptype);
}
/**
* Maps a {@code PTable<K1, V>} to a {@code PTable<K2, V>} using the given {@code MapFn<K1, K2>} on
* the keys of the {@code PTable}.
*
* @param name The name of the transform
* @param ptable The {@code PTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned keys
* @return A new {@code PTable<K2, V>} instance
*/
public static <K1, K2, V> PTable<K2, V> mapKeys(String name, PTable<K1, V> ptable, MapFn<K1, K2> mapFn,
PType<K2> ptype) {
PTypeFamily ptf = ptable.getTypeFamily();
return ptable.parallelDo(name,
new PairMapFn<K1, V, K2, V>(mapFn, IdentityFn.<V>getInstance()),
ptf.tableOf(ptype, ptable.getValueType()));
}
/**
* Maps a {@code PTable<K, U>} to a {@code PTable<K, V>} using the given {@code MapFn<U, V>} on
* the values of the {@code PTable}.
*
* @param ptable The {@code PTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned values
* @return A new {@code PTable<K, V>} instance
*/
public static <K, U, V> PTable<K, V> mapValues(PTable<K, U> ptable, MapFn<U, V> mapFn,
PType<V> ptype) {
return mapValues("PTables.mapValues", ptable, mapFn, ptype);
}
/**
* Maps a {@code PTable<K, U>} to a {@code PTable<K, V>} using the given {@code MapFn<U, V>} on
* the values of the {@code PTable}.
*
* @param name The name of the transform
* @param ptable The {@code PTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned values
* @return A new {@code PTable<K, V>} instance
*/
public static <K, U, V> PTable<K, V> mapValues(String name, PTable<K, U> ptable, MapFn<U, V> mapFn,
PType<V> ptype) {
PTypeFamily ptf = ptable.getTypeFamily();
return ptable.parallelDo(name,
new PairMapFn<K, U, K, V>(IdentityFn.<K>getInstance(), mapFn),
ptf.tableOf(ptable.getKeyType(), ptype));
}
/**
* An analogue of the {@code mapValues} function for {@code PGroupedTable<K, U>} collections.
*
* @param ptable The {@code PGroupedTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned values
* @return A new {@code PTable<K, V>} instance
*/
public static <K, U, V> PTable<K, V> mapValues(PGroupedTable<K, U> ptable,
MapFn<Iterable<U>, V> mapFn,
PType<V> ptype) {
return mapValues("PTables.mapValues", ptable, mapFn, ptype);
}
/**
* An analogue of the {@code mapValues} function for {@code PGroupedTable<K, U>} collections.
*
* @param name The name of the operation
* @param ptable The {@code PGroupedTable} to be mapped
* @param mapFn The mapping function
* @param ptype The PType for the returned values
* @return A new {@code PTable<K, V>} instance
*/
public static <K, U, V> PTable<K, V> mapValues(String name,
PGroupedTable<K, U> ptable,
MapFn<Iterable<U>, V> mapFn,
PType<V> ptype) {
PTypeFamily ptf = ptable.getTypeFamily();
return ptable.parallelDo(name,
new PairMapFn<K, Iterable<U>, K, V>(IdentityFn.<K>getInstance(), mapFn),
ptf.tableOf((PType<K>) ptable.getPType().getSubTypes().get(0), ptype));
}
/**
* Extract the keys from the given {@code PTable<K, V>} as a {@code PCollection<K>}.
* @param ptable The {@code PTable}
* @return A {@code PCollection<K>}
*/
public static <K, V> PCollection<K> keys(PTable<K, V> ptable) {
return ptable.parallelDo("PTables.keys", new DoFn<Pair<K, V>, K>() {
@Override
public void process(Pair<K, V> input, Emitter<K> emitter) {
emitter.emit(input.first());
}
}, ptable.getKeyType());
}
/**
* Extract the values from the given {@code PTable<K, V>} as a {@code PCollection<V>}.
* @param ptable The {@code PTable}
* @return A {@code PCollection<V>}
*/
public static <K, V> PCollection<V> values(PTable<K, V> ptable) {
return ptable.parallelDo("PTables.values", new DoFn<Pair<K, V>, V>() {
@Override
public void process(Pair<K, V> input, Emitter<V> emitter) {
emitter.emit(input.second());
}
}, ptable.getValueType());
}
/**
* Create a detached value for a table {@link Pair}.
*
* @param tableType The table type
* @param value The value from which a detached value is to be created
* @return The detached value
* @see PType#getDetachedValue(Object)
*/
public static <K, V> Pair<K, V> getDetachedValue(PTableType<K, V> tableType, Pair<K, V> value) {
return Pair.of(tableType.getKeyType().getDetachedValue(value.first()), tableType.getValueType()
.getDetachedValue(value.second()));
}
/**
* Created a detached value for a {@link PGroupedTable} value.
*
*
* @param groupedTableType The grouped table type
* @param value The value from which a detached value is to be created
* @return The detached value
* @see PType#getDetachedValue(Object)
*/
public static <K, V> Pair<K, Iterable<V>> getGroupedDetachedValue(
PGroupedTableType<K, V> groupedTableType, Pair<K, Iterable<V>> value) {
PTableType<K, V> tableType = groupedTableType.getTableType();
List<V> detachedIterable = Lists.newArrayList();
PType<V> valueType = tableType.getValueType();
for (V v : value.second()) {
detachedIterable.add(valueType.getDetachedValue(v));
}
return Pair.of(tableType.getKeyType().getDetachedValue(value.first()),
(Iterable<V>) detachedIterable);
}
/**
* Swap the key and value part of a table. The original PTypes are used in the opposite order
* @param table PTable to process
* @param <K> Key type (will become value type)
* @param <V> Value type (will become key type)
* @return PType<V, K> containing the same data as the original
*/
public static <K, V> PTable<V, K> swapKeyValue(PTable<K, V> table) {
PTypeFamily ptf = table.getTypeFamily();
return table.parallelDo(new MapFn<Pair<K, V>, Pair<V, K>>() {
@Override
public Pair<V, K> map(Pair<K, V> input) {
return Pair.of(input.second(), input.first());
}
}, ptf.tableOf(table.getValueType(), table.getKeyType()));
}
}
| 2,881 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/TopList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.collect.Lists;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import java.util.Collection;
import java.util.Iterator;
/**
* Tools for creating top lists of items in PTables and PCollections
*/
public class TopList {
/**
* Create a top-list of elements in the provided PTable, categorised by the key of the input table and using the count
* of the value part of the input table. Example: if input = Table(Country, Track), then this will give you the most
* common n tracks for each country.
* @param input table of X Y pairs
* @param n How many Y values to include in the toplist per X (this will be in memory, so don't make this ridiculous)
* @param <X> group type
* @param <Y> value type
* @return table of each unique X value mapped to a collection of (count, Y) pairs
*/
public static <X, Y> PTable<X, Collection<Pair<Long, Y>>> topNYbyX(PTable<X, Y> input, final int n) {
final PType<X> xType = input.getKeyType();
final PType<Y> yType = input.getValueType();
PTypeFamily f = xType.getFamily();
PTable<X, Pair<Long, Y>> counted = input.count().parallelDo(new MapFn<Pair<Pair<X, Y>, Long>, Pair<X, Pair<Long, Y>>>() {
@Override
public Pair<X, Pair<Long, Y>> map(Pair<Pair<X, Y>, Long> input) {
return Pair.of(input.first().first(), Pair.of(-input.second(), input.first().second()));
}
}, f.tableOf(xType, f.pairs(f.longs(), yType)));
return SecondarySort.sortAndApply(counted, new MapFn<Pair<X, Iterable<Pair<Long, Y>>>, Pair<X, Collection<Pair<Long, Y>>>>() {
private PTableType<Long, Y> tableType;
@Override
public void initialize() {
PTypeFamily ptf = yType.getFamily();
tableType = ptf.tableOf(ptf.longs(), yType);
tableType.initialize(getConfiguration());
}
@Override
public Pair<X, Collection<Pair<Long, Y>>> map(Pair<X, Iterable<Pair<Long, Y>>> input) {
Collection<Pair<Long, Y>> values = Lists.newArrayList();
Iterator<Pair<Long, Y>> iter = input.second().iterator();
for (int i = 0; i < n; i++) {
if (!iter.hasNext()) {
break;
}
Pair<Long, Y> pair = PTables.getDetachedValue(tableType, iter.next());
values.add(Pair.of(-pair.first(), pair.second()));
}
return Pair.of(input.first(), values);
}
}, f.tableOf(xType, f.collections(f.pairs(f.longs(), yType))));
}
/**
* Create a list of unique items in the input collection with their count, sorted descending by their frequency.
* @param input input collection
* @param <X> record type
* @return global toplist
*/
public static <X> PTable<X, Long> globalToplist(PCollection<X> input) {
return negateCounts(negateCounts(input.count()).groupByKey(1).ungroup());
}
/**
* When creating toplists, it is often required to sort by count descending. As some sort operations don't support
* order (such as SecondarySort), this method will negate counts so that a natural-ordered sort will produce a
* descending order.
* @param table PTable to process
* @param <K> key type
* @return PTable of the same format with the value negated
*/
public static <K> PTable<K, Long> negateCounts(PTable<K, Long> table) {
return table.parallelDo(new MapFn<Pair<K, Long>, Pair<K, Long>>() {
@Override
public Pair<K, Long> map(Pair<K, Long> input) {
return Pair.of(input.first(), -input.second());
}
}, table.getPTableType());
}
}
| 2,882 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Quantiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.base.Function;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.PeekingIterator;
import org.apache.crunch.MapFn;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class Quantiles {
/**
* Calculate a set of quantiles for each key in a numerically-valued table.
*
* Quantiles are calculated on a per-key basis by counting, joining and sorting. This is highly scalable, but takes
* 2 more map-reduce cycles than if you can guarantee that the value set will fit into memory. Use inMemory
* if you have less than the order of 10M values per key.
*
* The quantile definition that we use here is the "nearest rank" defined here:
* http://en.wikipedia.org/wiki/Percentile#Definition
*
* @param table numerically-valued PTable
* @param p1 First quantile (in the range 0.0 - 1.0)
* @param pn More quantiles (in the range 0.0 - 1.0)
* @param <K> Key type of the table
* @param <V> Value type of the table (must extends java.lang.Number)
* @return PTable of each key with a collection of pairs of the quantile provided and it's result.
*/
public static <K, V extends Number> PTable<K, Result<V>> distributed(PTable<K, V> table,
double p1, double... pn) {
final List<Double> quantileList = createListFromVarargs(p1, pn);
PTypeFamily ptf = table.getTypeFamily();
PTable<K, Long> totalCounts = table.keys().count();
PTable<K, Pair<Long, V>> countValuePairs = totalCounts.join(table);
PTable<K, Pair<V, Long>> valueCountPairs =
countValuePairs.mapValues(new SwapPairComponents<Long, V>(), ptf.pairs(table.getValueType(), ptf.longs()));
return SecondarySort.sortAndApply(
valueCountPairs,
new DistributedQuantiles<K, V>(quantileList),
ptf.tableOf(table.getKeyType(), Result.pType(table.getValueType())));
}
/**
* Calculate a set of quantiles for each key in a numerically-valued table.
*
* Quantiles are calculated on a per-key basis by grouping, reading the data into memory, then sorting and
* and calculating. This is much faster than the distributed option, but if you get into the order of 10M+ per key, then
* performance might start to degrade or even cause OOMs.
*
* The quantile definition that we use here is the "nearest rank" defined here:
* http://en.wikipedia.org/wiki/Percentile#Definition
*
* @param table numerically-valued PTable
* @param p1 First quantile (in the range 0.0 - 1.0)
* @param pn More quantiles (in the range 0.0 - 1.0)
* @param <K> Key type of the table
* @param <V> Value type of the table (must extends java.lang.Number)
* @return PTable of each key with a collection of pairs of the quantile provided and it's result.
*/
public static <K, V extends Comparable> PTable<K, Result<V>> inMemory(PTable<K, V> table,
double p1, double... pn) {
final List<Double> quantileList = createListFromVarargs(p1, pn);
PTypeFamily ptf = table.getTypeFamily();
return table
.groupByKey()
.parallelDo(new InMemoryQuantiles<K, V>(quantileList),
ptf.tableOf(table.getKeyType(), Result.pType(table.getValueType())));
}
private static List<Double> createListFromVarargs(double p1, double[] pn) {
final List<Double> quantileList = Lists.newArrayList(p1);
for (double p: pn) {
quantileList.add(p);
}
return quantileList;
}
private static class SwapPairComponents<T1, T2> extends MapFn<Pair<T1, T2>, Pair<T2, T1>> {
@Override
public Pair<T2, T1> map(Pair<T1, T2> input) {
return Pair.of(input.second(), input.first());
}
}
private static <V> Collection<Pair<Double, V>> findQuantiles(Iterator<V> sortedCollectionIterator,
long collectionSize, List<Double> quantiles) {
Collection<Pair<Double, V>> output = Lists.newArrayList();
Multimap<Long, Double> quantileIndices = ArrayListMultimap.create();
for (double quantile: quantiles) {
long idx = Math.max((int) Math.ceil(quantile * collectionSize) - 1, 0);
quantileIndices.put(idx, quantile);
}
long index = 0;
while (sortedCollectionIterator.hasNext()) {
V value = sortedCollectionIterator.next();
if (quantileIndices.containsKey(index)) {
for (double quantile: quantileIndices.get(index)) {
output.add(Pair.of(quantile, value));
}
}
index++;
}
return output;
}
private static class InMemoryQuantiles<K, V extends Comparable> extends
MapFn<Pair<K, Iterable<V>>, Pair<K, Result<V>>> {
private final List<Double> quantileList;
public InMemoryQuantiles(List<Double> quantiles) {
this.quantileList = quantiles;
}
@Override
public Pair<K, Result<V>> map(Pair<K, Iterable<V>> input) {
List<V> values = Lists.newArrayList(input.second().iterator());
Collections.sort(values);
return Pair.of(input.first(), new Result<V>(values.size(), findQuantiles(values.iterator(), values.size(), quantileList)));
}
}
private static class DistributedQuantiles<K, V> extends
MapFn<Pair<K, Iterable<Pair<V, Long>>>, Pair<K, Result<V>>> {
private final List<Double> quantileList;
public DistributedQuantiles(List<Double> quantileList) {
this.quantileList = quantileList;
}
@Override
public Pair<K, Result<V>> map(Pair<K, Iterable<Pair<V, Long>>> input) {
PeekingIterator<Pair<V, Long>> iterator = Iterators.peekingIterator(input.second().iterator());
long count = iterator.peek().second();
Iterator<V> valueIterator = Iterators.transform(iterator, new Function<Pair<V, Long>, V>() {
@Override
public V apply(Pair<V, Long> input) {
return input.first();
}
});
Collection<Pair<Double, V>> output = findQuantiles(valueIterator, count, quantileList);
return Pair.of(input.first(), new Result<V>(count, output));
}
}
/**
* Output type for storing the results of a Quantiles computation
* @param <V> Quantile value type
*/
public static class Result<V> {
public final long count;
public final Map<Double, V> quantiles = Maps.newTreeMap();
public Result(long count, Iterable<Pair<Double, V>> quantiles) {
this.count = count;
for (Pair<Double,V> quantile: quantiles) {
this.quantiles.put(quantile.first(), quantile.second());
}
}
/**
* Create a PType for the result type, to be stored as a derived type from Crunch primitives
* @param valuePType PType for the V type, whose family will also be used to create the derived type
* @param <V> Value type
* @return PType for serializing Result<V>
*/
public static <V> PType<Result<V>> pType(PType<V> valuePType) {
PTypeFamily ptf = valuePType.getFamily();
@SuppressWarnings("unchecked")
Class<Result<V>> prClass = (Class<Result<V>>)(Class)Result.class;
return ptf.derivedImmutable(prClass, new MapFn<Pair<Collection<Pair<Double, V>>, Long>, Result<V>>() {
@Override
public Result<V> map(Pair<Collection<Pair<Double, V>>, Long> input) {
return new Result<V>(input.second(), input.first());
}
}, new MapFn<Result<V>, Pair<Collection<Pair<Double, V>>, Long>>() {
@Override
public Pair<Collection<Pair<Double, V>>, Long> map(Result<V> input) {
return Pair.of(asCollection(input.quantiles), input.count);
}
}, ptf.pairs(ptf.collections(ptf.pairs(ptf.doubles(), valuePType)), ptf.longs()));
}
private static <K, V> Collection<Pair<K, V>> asCollection(Map<K, V> map) {
Collection<Pair<K, V>> collection = Lists.newArrayList();
for (Map.Entry<K, V> entry: map.entrySet()) {
collection.add(Pair.of(entry.getKey(), entry.getValue()));
}
return collection;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Result result = (Result) o;
if (count != result.count) return false;
if (!quantiles.equals(result.quantiles)) return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (count ^ (count >>> 32));
result = 31 * result + quantiles.hashCode();
return result;
}
}
}
| 2,883 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Shard.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
/**
* Utilities for controlling how the data in a {@code PCollection} is balanced across reducers
* and output files.
*/
public class Shard {
/**
* Creates a {@code PCollection<T>} that has the same contents as its input argument but will
* be written to a fixed number of output files. This is useful for map-only jobs that process
* lots of input files but only write out a small amount of input per task.
*
* @param pc The {@code PCollection<T>} to rebalance
* @param numPartitions The number of output partitions to create
* @return A rebalanced {@code PCollection<T>} with the same contents as the input
*/
public static <T> PCollection<T> shard(PCollection<T> pc, int numPartitions) {
return pc.by(new ShardFn<T>(), pc.getTypeFamily().ints())
.groupByKey(numPartitions)
.ungroup()
.values();
}
private static class ShardFn<T> extends MapFn<T, Integer> {
private int count;
@Override
public void initialize() {
count = 0;
}
@Override
public Integer map(T input) {
return count++;
}
}
}
| 2,884 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Mapred.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.Set;
import javassist.util.proxy.MethodFilter;
import javassist.util.proxy.MethodHandler;
import javassist.util.proxy.ProxyFactory;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
/**
* Static functions for working with legacy Mappers and Reducers that live under the org.apache.hadoop.mapred.*
* package as part of Crunch pipelines.
*/
public class Mapred {
public static <K1, V1, K2 extends Writable, V2 extends Writable> PTable<K2, V2> map(
PTable<K1, V1> input,
Class<? extends Mapper<K1, V1, K2, V2>> mapperClass,
Class<K2> keyClass, Class<V2> valueClass) {
return input.parallelDo(new MapperFn<K1, V1, K2, V2>(mapperClass), tableOf(keyClass, valueClass));
}
public static <K1, V1, K2 extends Writable, V2 extends Writable> PTable<K2, V2> reduce(
PGroupedTable<K1, V1> input,
Class<? extends Reducer<K1, V1, K2, V2>> reducerClass,
Class<K2> keyClass, Class<V2> valueClass) {
return input.parallelDo(new ReducerFn<K1, V1, K2, V2>(reducerClass), tableOf(keyClass, valueClass));
}
private static <K extends Writable, V extends Writable> PTableType<K, V> tableOf(
Class<K> keyClass, Class<V> valueClass) {
return Writables.tableOf(Writables.writables(keyClass), Writables.writables(valueClass));
}
private static class MapperFn<K1, V1, K2 extends Writable, V2 extends Writable> extends
DoFn<Pair<K1, V1>, Pair<K2, V2>> implements Reporter {
private final Class<? extends Mapper<K1, V1, K2, V2>> mapperClass;
private transient Mapper<K1, V1, K2, V2> instance;
private transient OutputCollectorImpl<K2, V2> outputCollector;
public MapperFn(Class<? extends Mapper<K1, V1, K2, V2>> mapperClass) {
this.mapperClass = Preconditions.checkNotNull(mapperClass);
}
@Override
public void initialize() {
if (instance == null) {
this.instance = ReflectionUtils.newInstance(mapperClass, getConfiguration());
}
instance.configure(new JobConf(getConfiguration()));
outputCollector = new OutputCollectorImpl<K2, V2>();
}
@Override
public void process(Pair<K1, V1> input, Emitter<Pair<K2, V2>> emitter) {
outputCollector.set(emitter);
try {
instance.map(input.first(), input.second(), outputCollector, this);
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public void cleanup(Emitter<Pair<K2, V2>> emitter) {
try {
instance.close();
} catch (IOException e) {
throw new CrunchRuntimeException("Error closing mapper = " + mapperClass, e);
}
}
@Override
public void progress() {
super.progress();
}
@Override
public void setStatus(String status) {
super.setStatus(status);
}
public Counters.Counter getCounter(Enum<?> counter) {
return proxyCounter(super.getCounter(counter));
}
public Counters.Counter getCounter(String group, String name) {
return proxyCounter(super.getCounter(group, name));
}
@Override
public InputSplit getInputSplit() throws UnsupportedOperationException {
return null;
}
@Override
public void incrCounter(Enum<?> counter, long by) {
super.increment(counter, by);
}
@Override
public void incrCounter(String group, String name, long by) {
super.increment(group, name, by);
}
public float getProgress() {
return 0.5f;
}
}
private static class ReducerFn<K1, V1, K2 extends Writable, V2 extends Writable> extends
DoFn<Pair<K1, Iterable<V1>>, Pair<K2, V2>> implements Reporter {
private final Class<? extends Reducer<K1, V1, K2, V2>> reducerClass;
private transient Reducer<K1, V1, K2, V2> instance;
private transient OutputCollectorImpl<K2, V2> outputCollector;
public ReducerFn(Class<? extends Reducer<K1, V1, K2, V2>> reducerClass) {
this.reducerClass = Preconditions.checkNotNull(reducerClass);
}
@Override
public void initialize() {
if (instance == null) {
this.instance = ReflectionUtils.newInstance(reducerClass, getConfiguration());
}
instance.configure(new JobConf(getConfiguration()));
outputCollector = new OutputCollectorImpl<K2, V2>();
}
@Override
public void process(Pair<K1, Iterable<V1>> input, Emitter<Pair<K2, V2>> emitter) {
outputCollector.set(emitter);
try {
instance.reduce(input.first(), input.second().iterator(), outputCollector, this);
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public void cleanup(Emitter<Pair<K2, V2>> emitter) {
try {
instance.close();
} catch (IOException e) {
throw new CrunchRuntimeException("Error closing mapper = " + reducerClass, e);
}
}
@Override
public void progress() {
super.progress();
}
@Override
public void setStatus(String status) {
super.setStatus(status);
}
public Counters.Counter getCounter(Enum<?> counter) {
return proxyCounter(super.getCounter(counter));
}
public Counters.Counter getCounter(String group, String name) {
return proxyCounter(super.getCounter(group, name));
}
@Override
public InputSplit getInputSplit() throws UnsupportedOperationException {
return null;
}
@Override
public void incrCounter(Enum<?> counter, long by) {
super.increment(counter, by);
}
@Override
public void incrCounter(String group, String name, long by) {
super.increment(group, name, by);
}
public float getProgress() {
return 0.5f;
}
}
private static class OutputCollectorImpl<K, V> implements OutputCollector<K, V> {
private Emitter<Pair<K, V>> emitter;
public OutputCollectorImpl() { }
public void set(Emitter<Pair<K, V>> emitter) {
this.emitter = emitter;
}
@Override
public void collect(K k, V v) throws IOException {
emitter.emit(Pair.of(k, v));
}
}
private static Counters.Counter proxyCounter(Counter c) {
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.setSuperclass(Counters.Counter.class);
proxyFactory.setFilter(CCMethodHandler.FILTER);
CCMethodHandler handler = new CCMethodHandler(c);
try {
return (Counters.Counter) proxyFactory.create(new Class[0], new Object[0], handler);
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
private static class CCMethodHandler implements MethodHandler {
private static final Set<String> HANDLED = ImmutableSet.of("increment",
"getCounter", "getValue", "getName", "getDisplayName", "setValue",
"getUnderlyingCounter", "readFields", "write");
public static final MethodFilter FILTER = new MethodFilter() {
@Override
public boolean isHandled(Method m) {
return HANDLED.contains(m.getName());
}
};
private final Counter c;
public CCMethodHandler(Counter c) {
this.c = c;
}
@Override
public Object invoke(Object obj, Method m, Method m2, Object[] args) throws Throwable {
String name = m.getName();
if ("increment".equals(name)) {
c.increment((Long) args[0]);
return null;
} else if ("getCounter".equals(name) || "getValue".equals(name)) {
return c.getValue();
} else if ("setValue".equals(name)) {
c.setValue((Long) args[0]);
return null;
} else if ("getDisplayName".equals(name)) {
return c.getDisplayName();
} else if ("getName".equals(name)) {
return c.getName();
} else if ("getUnderlyingCounter".equals(name)) {
return c;
} else if ("readFields".equals(name)) {
c.readFields((DataInput) args[0]);
return null;
} else if ("write".equals(name)) {
c.write((DataOutput) args[0]);
return null;
}
throw new IllegalStateException("Unhandled Counters.Counter method = " + name);
}
}
}
| 2,885 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/SecondarySort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.Collection;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.join.JoinUtils;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.hadoop.conf.Configuration;
/**
* Utilities for performing a secondary sort on a {@code PTable<K, Pair<V1, V2>>} collection.
* <p>
* Secondary sorts are usually performed during sessionization: given a collection
* of events, we want to group them by a key (such as a user ID), then sort the grouped
* records by an auxillary key (such as a timestamp), and then perform some additional
* processing on the sorted records.
*/
public class SecondarySort {
/**
* Perform a secondary sort on the given {@code PTable} instance and then apply a
* {@code DoFn} to the resulting sorted data to yield an output {@code PCollection<T>}.
*/
public static <K, V1, V2, T> PCollection<T> sortAndApply(
PTable<K, Pair<V1, V2>> input,
DoFn<Pair<K, Iterable<Pair<V1, V2>>>, T> doFn,
PType<T> ptype) {
return sortAndApply(input, doFn, ptype, -1);
}
/**
* Perform a secondary sort on the given {@code PTable} instance and then apply a
* {@code DoFn} to the resulting sorted data to yield an output {@code PCollection<T>}, using
* the given number of reducers.
*/
public static <K, V1, V2, T> PCollection<T> sortAndApply(
PTable<K, Pair<V1, V2>> input,
DoFn<Pair<K, Iterable<Pair<V1, V2>>>, T> doFn,
PType<T> ptype,
int numReducers) {
return prepare(input, numReducers)
.parallelDo("SecondarySort.apply", new SSWrapFn<K, V1, V2, T>(doFn), ptype);
}
/**
* Perform a secondary sort on the given {@code PTable} instance and then apply a
* {@code DoFn} to the resulting sorted data to yield an output {@code PTable<U, V>}.
*/
public static <K, V1, V2, U, V> PTable<U, V> sortAndApply(
PTable<K, Pair<V1, V2>> input,
DoFn<Pair<K, Iterable<Pair<V1, V2>>>, Pair<U, V>> doFn,
PTableType<U, V> ptype) {
return sortAndApply(input, doFn, ptype, -1);
}
/**
* Perform a secondary sort on the given {@code PTable} instance and then apply a
* {@code DoFn} to the resulting sorted data to yield an output {@code PTable<U, V>}, using
* the given number of reducers.
*/
public static <K, V1, V2, U, V> PTable<U, V> sortAndApply(
PTable<K, Pair<V1, V2>> input,
DoFn<Pair<K, Iterable<Pair<V1, V2>>>, Pair<U, V>> doFn,
PTableType<U, V> ptype,
int numReducers) {
return prepare(input, numReducers)
.parallelDo("SecondarySort.apply", new SSWrapFn<K, V1, V2, Pair<U, V>>(doFn), ptype);
}
private static <K, V1, V2> PGroupedTable<Pair<K, V1>, Pair<V1, V2>> prepare(
PTable<K, Pair<V1, V2>> input, int numReducers) {
PTypeFamily ptf = input.getTypeFamily();
PType<Pair<V1, V2>> valueType = input.getValueType();
PTableType<Pair<K, V1>, Pair<V1, V2>> inter = ptf.tableOf(
ptf.pairs(input.getKeyType(), valueType.getSubTypes().get(0)),
valueType);
GroupingOptions.Builder gob = GroupingOptions.builder()
.requireSortedKeys()
.groupingComparatorClass(JoinUtils.getGroupingComparator(ptf))
.partitionerClass(JoinUtils.getPartitionerClass(ptf));
if (numReducers > 0) {
gob.numReducers(numReducers);
}
return input.parallelDo("SecondarySort.format", new SSFormatFn<K, V1, V2>(), inter)
.groupByKey(gob.build());
}
private static class SSFormatFn<K, V1, V2> extends MapFn<Pair<K, Pair<V1, V2>>, Pair<Pair<K, V1>, Pair<V1, V2>>> {
@Override
public Pair<Pair<K, V1>, Pair<V1, V2>> map(Pair<K, Pair<V1, V2>> input) {
return Pair.of(Pair.of(input.first(), input.second().first()), input.second());
}
}
private static class SSWrapFn<K, V1, V2, T> extends DoFn<Pair<Pair<K, V1>, Iterable<Pair<V1, V2>>>, T> {
private final DoFn<Pair<K, Iterable<Pair<V1, V2>>>, T> intern;
public SSWrapFn(DoFn<Pair<K, Iterable<Pair<V1, V2>>>, T> intern) {
this.intern = intern;
}
@Override
public void configure(Configuration conf) {
intern.configure(conf);
}
@Override
public void initialize() {
intern.setContext(getContext());
intern.initialize();
}
@Override
public void process(Pair<Pair<K, V1>, Iterable<Pair<V1, V2>>> input, Emitter<T> emitter) {
intern.process(Pair.of(input.first().first(), input.second()), emitter);
}
@Override
public void cleanup(Emitter<T> emitter) {
intern.cleanup(emitter);
}
}
}
| 2,886 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Set.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.Collection;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Tuple3;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
/**
* Utilities for performing set operations (difference, intersection, etc) on
* {@code PCollection} instances.
*/
public class Set {
/**
* Compute the set difference between two sets of elements.
*
* @return a collection containing elements that are in <code>coll1</code> but
* not in <code>coll2</code>
*/
public static <T> PCollection<T> difference(PCollection<T> coll1, PCollection<T> coll2) {
return Cogroup.cogroup(toTable(coll1), toTable(coll2)).parallelDo(
"Calculate differences of sets",
new DoFn<Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>>, T>() {
@Override
public void process(Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>> input, Emitter<T> emitter) {
Pair<Collection<Boolean>, Collection<Boolean>> groups = input.second();
if (!groups.first().isEmpty() && groups.second().isEmpty()) {
emitter.emit(input.first());
}
}
}, coll1.getPType());
}
/**
* Compute the intersection of two sets of elements.
*
* @return a collection containing elements that common to both sets
* <code>coll1</code> and <code>coll2</code>
*/
public static <T> PCollection<T> intersection(PCollection<T> coll1, PCollection<T> coll2) {
return Cogroup.cogroup(toTable(coll1), toTable(coll2)).parallelDo(
"Calculate intersection of sets",
new DoFn<Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>>, T>() {
@Override
public void process(Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>> input, Emitter<T> emitter) {
Pair<Collection<Boolean>, Collection<Boolean>> groups = input.second();
if (!groups.first().isEmpty() && !groups.second().isEmpty()) {
emitter.emit(input.first());
}
}
}, coll1.getPType());
}
/**
* Find the elements that are common to two sets, like the Unix
* <code>comm</code> utility. This method returns a {@link PCollection} of
* {@link Tuple3} objects, and the position in the tuple that an element
* appears is determined by the collections that it is a member of, as
* follows:
* <ol>
* <li>elements only in <code>coll1</code>,</li>
* <li>elements only in <code>coll2</code>, or</li>
* <li>elements in both collections</li>
* </ol>
* Tuples are otherwise filled with <code>null</code>.
*
* @return a collection of {@link Tuple3} objects
*/
public static <T> PCollection<Tuple3<T, T, T>> comm(PCollection<T> coll1, PCollection<T> coll2) {
PTypeFamily typeFamily = coll1.getTypeFamily();
PType<T> type = coll1.getPType();
return Cogroup.cogroup(toTable(coll1), toTable(coll2)).parallelDo(
"Calculate common values of sets",
new DoFn<Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>>, Tuple3<T, T, T>>() {
@Override
public void process(Pair<T, Pair<Collection<Boolean>, Collection<Boolean>>> input,
Emitter<Tuple3<T, T, T>> emitter) {
Pair<Collection<Boolean>, Collection<Boolean>> groups = input.second();
boolean inFirst = !groups.first().isEmpty();
boolean inSecond = !groups.second().isEmpty();
T t = input.first();
emitter.emit(Tuple3.of(inFirst && !inSecond ? t : null, !inFirst && inSecond ? t : null, inFirst
&& inSecond ? t : null));
}
}, typeFamily.triples(type, type, type));
}
private static <T> PTable<T, Boolean> toTable(PCollection<T> coll) {
PTypeFamily typeFamily = coll.getTypeFamily();
return coll.parallelDo(new DoFn<T, Pair<T, Boolean>>() {
@Override
public void process(T input, Emitter<Pair<T, Boolean>> emitter) {
emitter.emit(Pair.of(input, Boolean.TRUE));
}
}, typeFamily.tableOf(coll.getPType(), typeFamily.booleans()));
}
}
| 2,887 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Cartesian.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.Random;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
/**
* Utilities for Cartesian products of two {@code PTable} or {@code PCollection}
* instances.
*/
@SuppressWarnings("serial")
public class Cartesian {
/**
* Helper for building the artificial cross keys. This technique was taken
* from Pig's CROSS.
*/
private static class GFCross<V> extends DoFn<V, Pair<Pair<Integer, Integer>, V>> {
private final int constantField;
private final int parallelism;
private final Random r;
public GFCross(int constantField, int parallelism) {
this.constantField = constantField;
this.parallelism = parallelism;
this.r = new Random();
}
public void process(V input, Emitter<Pair<Pair<Integer, Integer>, V>> emitter) {
int c = r.nextInt(parallelism);
if (constantField == 0) {
for (int i = 0; i < parallelism; i++) {
emitter.emit(Pair.of(Pair.of(c, i), input));
}
} else {
for (int i = 0; i < parallelism; i++) {
emitter.emit(Pair.of(Pair.of(i, c), input));
}
}
}
}
static final int DEFAULT_PARALLELISM = 6;
/**
* Performs a full cross join on the specified {@link PTable}s (using the same
* strategy as Pig's CROSS operator).
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Cross_join">Cross
* Join</a>
* @param left
* A PTable to perform a cross join on.
* @param right
* A PTable to perform a cross join on.
* @param <K1>
* Type of left PTable's keys.
* @param <K2>
* Type of right PTable's keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result as tuples of ((K1,K2), (U,V)).
*/
public static <K1, K2, U, V> PTable<Pair<K1, K2>, Pair<U, V>> cross(PTable<K1, U> left, PTable<K2, V> right) {
return cross(left, right, DEFAULT_PARALLELISM);
}
/**
* Performs a full cross join on the specified {@link PTable}s (using the same
* strategy as Pig's CROSS operator).
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Cross_join">Cross
* Join</a>
* @param left
* A PTable to perform a cross join on.
* @param right
* A PTable to perform a cross join on.
* @param parallelism
* The square root of the number of reducers to use. Increasing
* parallelism also increases copied data.
* @param <K1>
* Type of left PTable's keys.
* @param <K2>
* Type of right PTable's keys.
* @param <U>
* Type of the first {@link PTable}'s values
* @param <V>
* Type of the second {@link PTable}'s values
* @return The joined result as tuples of ((K1,K2), (U,V)).
*/
public static <K1, K2, U, V> PTable<Pair<K1, K2>, Pair<U, V>> cross(PTable<K1, U> left, PTable<K2, V> right,
int parallelism) {
/*
* The strategy here is to simply emulate the following PigLatin: A =
* foreach table1 generate flatten(GFCross(0, 2)), flatten(*); B = foreach
* table2 generate flatten(GFCross(1, 2)), flatten(*); C = cogroup A by ($0,
* $1), B by ($0, $1); result = foreach C generate flatten(A), flatten(B);
*/
PTypeFamily ltf = left.getTypeFamily();
PTypeFamily rtf = right.getTypeFamily();
PTable<Pair<Integer, Integer>, Pair<K1, U>> leftCross = left.parallelDo(new GFCross<Pair<K1, U>>(0, parallelism),
ltf.tableOf(ltf.pairs(ltf.ints(), ltf.ints()), ltf.pairs(left.getKeyType(), left.getValueType())));
PTable<Pair<Integer, Integer>, Pair<K2, V>> rightCross = right.parallelDo(new GFCross<Pair<K2, V>>(1, parallelism),
rtf.tableOf(rtf.pairs(rtf.ints(), rtf.ints()), rtf.pairs(right.getKeyType(), right.getValueType())));
PTable<Pair<Integer, Integer>, Pair<Pair<K1, U>, Pair<K2, V>>> cg = leftCross.join(rightCross);
PTypeFamily ctf = cg.getTypeFamily();
return cg.parallelDo(
new MapFn<Pair<Pair<Integer, Integer>, Pair<Pair<K1, U>, Pair<K2, V>>>, Pair<Pair<K1, K2>, Pair<U, V>>>() {
@Override
public Pair<Pair<K1, K2>, Pair<U, V>> map(Pair<Pair<Integer, Integer>, Pair<Pair<K1, U>, Pair<K2, V>>> input) {
Pair<Pair<K1, U>, Pair<K2, V>> valuePair = input.second();
return Pair.of(Pair.of(valuePair.first().first(), valuePair.second().first()),
Pair.of(valuePair.first().second(), valuePair.second().second()));
}
},
ctf.tableOf(ctf.pairs(left.getKeyType(), right.getKeyType()),
ctf.pairs(left.getValueType(), right.getValueType())));
}
/**
* Performs a full cross join on the specified {@link PCollection}s (using the
* same strategy as Pig's CROSS operator).
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Cross_join">Cross
* Join</a>
* @param left
* A PCollection to perform a cross join on.
* @param right
* A PCollection to perform a cross join on.
* @param <U>
* Type of the first {@link PCollection}'s values
* @param <V>
* Type of the second {@link PCollection}'s values
* @return The joined result as tuples of (U,V).
*/
public static <U, V> PCollection<Pair<U, V>> cross(PCollection<U> left, PCollection<V> right) {
return cross(left, right, DEFAULT_PARALLELISM);
}
/**
* Performs a full cross join on the specified {@link PCollection}s (using the
* same strategy as Pig's CROSS operator).
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Cross_join">Cross
* Join</a>
* @param left
* A PCollection to perform a cross join on.
* @param right
* A PCollection to perform a cross join on.
* @param <U>
* Type of the first {@link PCollection}'s values
* @param <V>
* Type of the second {@link PCollection}'s values
* @return The joined result as tuples of (U,V).
*/
public static <U, V> PCollection<Pair<U, V>> cross(PCollection<U> left, PCollection<V> right, int parallelism) {
PTypeFamily ltf = left.getTypeFamily();
PTypeFamily rtf = right.getTypeFamily();
PTableType<Pair<Integer, Integer>, U> ptt = ltf.tableOf(ltf.pairs(ltf.ints(), ltf.ints()), left.getPType());
if (ptt == null)
throw new Error();
PTable<Pair<Integer, Integer>, U> leftCross = left.parallelDo(new GFCross<U>(0, parallelism),
ltf.tableOf(ltf.pairs(ltf.ints(), ltf.ints()), left.getPType()));
PTable<Pair<Integer, Integer>, V> rightCross = right.parallelDo(new GFCross<V>(1, parallelism),
rtf.tableOf(rtf.pairs(rtf.ints(), rtf.ints()), right.getPType()));
PTable<Pair<Integer, Integer>, Pair<U, V>> cg = leftCross.join(rightCross);
PTypeFamily ctf = cg.getTypeFamily();
return cg.parallelDo("Extract second element", new MapFn<Pair<Pair<Integer, Integer>, Pair<U, V>>, Pair<U, V>>() {
@Override
public Pair<U, V> map(Pair<Pair<Integer, Integer>, Pair<U, V>> input) {
return input.second();
}
}, ctf.pairs(left.getPType(), right.getPType()));
}
}
| 2,888 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Joining, sorting, aggregating, and other commonly used functionality.
*/
package org.apache.crunch.lib;
| 2,889 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/Distinct.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.util.Set;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
/**
* Functions for computing the distinct elements of a {@code PCollection}.
*/
public final class Distinct {
private static final int DEFAULT_FLUSH_EVERY = 50000;
/**
* Construct a new {@code PCollection} that contains the unique elements of a
* given input {@code PCollection}.
*
* @param input The input {@code PCollection}
* @return A new {@code PCollection} that contains the unique elements of the input
*/
public static <S> PCollection<S> distinct(PCollection<S> input) {
return distinct(input, DEFAULT_FLUSH_EVERY, GroupingOptions.builder().build());
}
/**
* A {@code PTable<K, V>} analogue of the {@code distinct} function.
*/
public static <K, V> PTable<K, V> distinct(PTable<K, V> input) {
return PTables.asPTable(distinct((PCollection<Pair<K, V>>) input));
}
/**
* A {@code distinct} operation that gives the client more control over how frequently
* elements are flushed to disk in order to allow control over performance or
* memory consumption.
*
* @param input The input {@code PCollection}
* @param flushEvery Flush the elements to disk whenever we encounter this many unique values
* @return A new {@code PCollection} that contains the unique elements of the input
*/
public static <S> PCollection<S> distinct(PCollection<S> input, int flushEvery) {
return distinct(input, flushEvery, GroupingOptions.builder().build());
}
/**
* A {@code PTable<K, V>} analogue of the {@code distinct} function.
*/
public static <K, V> PTable<K, V> distinct(PTable<K, V> input, int flushEvery) {
return PTables.asPTable(distinct((PCollection<Pair<K, V>>) input, flushEvery));
}
/**
* A {@code distinct} operation that gives the client more control over how frequently
* elements are flushed to disk in order to allow control over performance or
* memory consumption.
*
* @param input The input {@code PCollection}
* @param flushEvery Flush the elements to disk whenever we encounter this many unique values
* @param numReducers The number of reducers to use
* @return A new {@code PCollection} that contains the unique elements of the input
*/
public static <S> PCollection<S> distinct(PCollection<S> input, int flushEvery, int numReducers) {
Preconditions.checkArgument(flushEvery > 0);
PType<S> pt = input.getPType();
PTypeFamily ptf = pt.getFamily();
return input
.parallelDo("pre-distinct", new PreDistinctFn<S>(flushEvery, pt), ptf.tableOf(pt, ptf.nulls()))
.groupByKey(numReducers)
.parallelDo("post-distinct", new PostDistinctFn<S>(), pt);
}
/**
* A {@code PTable<K, V>} analogue of the {@code distinct} function.
*/
public static <K, V> PTable<K, V> distinct(PTable<K, V> input, int flushEvery, int numReducers) {
return PTables.asPTable(distinct((PCollection<Pair<K, V>>) input, flushEvery, numReducers));
}
/**
* A {@code distinct} operation that gives the client more control over how frequently
* elements are flushed to disk in order to allow control over performance or
* memory consumption.
*
* @param input The input {@code PCollection}
* @param flushEvery Flush the elements to disk whenever we encounter this many unique values
* @param options Options to provide finer control on how grouping is performed.
* @return A new {@code PCollection} that contains the unique elements of the input
*/
public static <S> PCollection<S> distinct(PCollection<S> input, int flushEvery, GroupingOptions options) {
Preconditions.checkArgument(flushEvery > 0);
PType<S> pt = input.getPType();
PTypeFamily ptf = pt.getFamily();
return input
.parallelDo("pre-distinct", new PreDistinctFn<S>(flushEvery, pt), ptf.tableOf(pt, ptf.nulls()))
.groupByKey(options)
.parallelDo("post-distinct", new PostDistinctFn<S>(), pt);
}
/**
* A {@code PTable<K, V>} analogue of the {@code distinct} function.
*/
public static <K, V> PTable<K, V> distinct(PTable<K, V> input, int flushEvery, GroupingOptions options) {
return PTables.asPTable(distinct((PCollection<Pair<K, V>>) input, flushEvery, options));
}
private static class PreDistinctFn<S> extends DoFn<S, Pair<S, Void>> {
private final Set<S> values = Sets.newHashSet();
private final int flushEvery;
private final PType<S> ptype;
PreDistinctFn(int flushEvery, PType<S> ptype) {
this.flushEvery = flushEvery;
this.ptype = ptype;
}
@Override
public void initialize() {
super.initialize();
ptype.initialize(getConfiguration());
}
@Override
public void process(S input, Emitter<Pair<S, Void>> emitter) {
values.add(ptype.getDetachedValue(input));
if (values.size() > flushEvery) {
cleanup(emitter);
}
}
@Override
public void cleanup(Emitter<Pair<S, Void>> emitter) {
for (S in : values) {
emitter.emit(Pair.<S, Void>of(in, null));
}
values.clear();
}
}
private static class PostDistinctFn<S> extends DoFn<Pair<S, Iterable<Void>>, S> {
@Override
public void process(Pair<S, Iterable<Void>> input, Emitter<S> emitter) {
emitter.emit(input.first());
}
}
// No instantiation
private Distinct() {}
}
| 2,890 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/BloomFilterJoinStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.FilterFn;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.fn.ExtractKeyFn;
import org.apache.crunch.fn.FilterFns;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.avro.AvroType;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.WritableType;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.bloom.BloomFilter;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;
/**
* Join strategy that uses a <a href="http://en.wikipedia.org/wiki/Bloom_filter">Bloom filter</a>
* that is trained on the keys of the left-side table to filter the key/value pairs of the right-side
* table before sending through the shuffle and reduce phase.
* <p>
* This strategy is useful in cases where the right-side table contains many keys that are not
* present in the left-side table. In this case, the use of the Bloom filter avoids a
* potentially costly shuffle phase for data that would never be joined to the left side.
* <p>
* Implementation Note: right and full outer join type are handled by splitting the right-side
* table (the bigger one) into two disjunctive streams: negatively filtered (right outer part)
* and positively filtered (passed to delegate strategy).
*/
public class BloomFilterJoinStrategy<K, U, V> implements JoinStrategy<K, U, V> {
private int vectorSize;
private int nbHash;
private JoinStrategy<K, U, V> delegateJoinStrategy;
/**
* Instantiate with the expected number of unique keys in the left table.
* <p>
* The {@link DefaultJoinStrategy} will be used to perform the actual join after filtering.
*
* @param numElements expected number of unique keys
*/
public BloomFilterJoinStrategy(int numElements) {
this(numElements, 0.05f);
}
/**
* Instantiate with the expected number of unique keys in the left table, and the acceptable
* false positive rate for the Bloom filter.
* <p>
* The {@link DefaultJoinStrategy} will be used to perform the actual join after filtering.
*
* @param numElements expected number of unique keys
* @param falsePositiveRate acceptable false positive rate for Bloom Filter
*/
public BloomFilterJoinStrategy(int numElements, float falsePositiveRate) {
this(numElements, falsePositiveRate, new DefaultJoinStrategy<K, U, V>());
}
/**
* Instantiate with the expected number of unique keys in the left table, and the acceptable
* false positive rate for the Bloom filter, and an underlying join strategy to delegate to.
*
* @param numElements expected number of unique keys
* @param falsePositiveRate acceptable false positive rate for Bloom Filter
* @param delegateJoinStrategy join strategy to delegate to after filtering
*/
public BloomFilterJoinStrategy(int numElements, float falsePositiveRate, JoinStrategy<K,U,V> delegateJoinStrategy) {
this.vectorSize = getOptimalVectorSize(numElements, falsePositiveRate);
this.nbHash = getOptimalNumHash(numElements, vectorSize);
this.delegateJoinStrategy = delegateJoinStrategy;
}
/**
* Calculates the optimal vector size for a given number of elements and acceptable false
* positive rate.
*/
private static int getOptimalVectorSize(int numElements, float falsePositiveRate) {
return (int) (-numElements * (float)Math.log(falsePositiveRate) / Math.pow(Math.log(2), 2));
}
/**
* Calculates the optimal number of hash functions to be used.
*/
private static int getOptimalNumHash(int numElements, float vectorSize) {
return (int)Math.round(vectorSize * Math.log(2) / numElements);
}
@Override
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType) {
PType<BloomFilter> bloomFilterType = getBloomFilterType(left.getTypeFamily());
PCollection<BloomFilter> bloomFilters = left.keys().parallelDo(
"Create bloom filters",
new CreateBloomFilterFn<>(vectorSize, nbHash, left.getKeyType()),
bloomFilterType);
ReadableData<BloomFilter> bloomData = bloomFilters.asReadable(true);
FilterKeysWithBloomFilterFn<K, V> filterKeysFn = new FilterKeysWithBloomFilterFn<>(
bloomData, vectorSize, nbHash, left.getKeyType());
if (joinType != JoinType.INNER_JOIN && joinType != JoinType.LEFT_OUTER_JOIN) {
right = right.parallelDo(
"disable deep copy", new DeepCopyDisablerFn<Pair<K, V>>(), right.getPTableType());
}
ParallelDoOptions options = ParallelDoOptions.builder()
.sourceTargets(bloomData.getSourceTargets()).build();
PTable<K, V> filteredRightSide = right.parallelDo(
"Filter right-side with BloomFilters",
filterKeysFn, right.getPTableType(), options);
PTable<K, Pair<U, V>> leftJoinedWithFilteredRight = delegateJoinStrategy
.join(left, filteredRightSide, joinType);
if (joinType == JoinType.INNER_JOIN || joinType == JoinType.LEFT_OUTER_JOIN) {
return leftJoinedWithFilteredRight;
}
return leftJoinedWithFilteredRight.union(
right
.parallelDo(
"Negatively filter right-side with BloomFilters",
FilterFns.not(filterKeysFn), right.getPTableType(), options)
.mapValues(
"Right outer join: attach null as left-value",
new NullKeyFn<U, V>(), leftJoinedWithFilteredRight.getValueType()));
}
/**
* Creates Bloom filter(s) for filtering of right-side keys.
*/
private static class CreateBloomFilterFn<K> extends DoFn<K, BloomFilter> {
private int vectorSize;
private int nbHash;
private transient BloomFilter bloomFilter;
private transient MapFn<K,byte[]> keyToBytesFn;
private PType<K> ptype;
CreateBloomFilterFn(int vectorSize, int nbHash, PType<K> ptype) {
this.vectorSize = vectorSize;
this.nbHash = nbHash;
this.ptype = ptype;
}
@Override
public void initialize() {
super.initialize();
bloomFilter = new BloomFilter(vectorSize, nbHash, Hash.MURMUR_HASH);
ptype.initialize(getConfiguration());
keyToBytesFn = getKeyToBytesMapFn(ptype, getConfiguration());
}
@Override
public void process(K input, Emitter<BloomFilter> emitter) {
bloomFilter.add(new Key(keyToBytesFn.map(input)));
}
@Override
public void cleanup(Emitter<BloomFilter> emitter) {
emitter.emit(bloomFilter);
}
}
/**
* Filters right-side keys with a Bloom filter before passing them off to the delegate join strategy.
*/
private static class FilterKeysWithBloomFilterFn<K,V> extends FilterFn<Pair<K, V>> {
private int vectorSize;
private int nbHash;
private PType<K> keyType;
private PType<BloomFilter> bloomFilterPType;
private transient BloomFilter bloomFilter;
private transient MapFn<K,byte[]> keyToBytesFn;
private ReadableData<BloomFilter> bloomData;
FilterKeysWithBloomFilterFn(ReadableData<BloomFilter> bloomData, int vectorSize, int nbHash, PType<K> keyType) {
this.bloomData = bloomData;
this.vectorSize = vectorSize;
this.nbHash = nbHash;
this.keyType = keyType;
}
@Override
public void configure(Configuration conf) {
bloomData.configure(conf);
}
@Override
public void initialize() {
super.initialize();
keyType.initialize(getConfiguration());
keyToBytesFn = getKeyToBytesMapFn(keyType, getConfiguration());
Iterable<BloomFilter> iterable;
try {
iterable = bloomData.read(getContext());
} catch (IOException e) {
throw new CrunchRuntimeException("Error reading right-side of map side join: ", e);
}
bloomFilter = new BloomFilter(vectorSize, nbHash, Hash.MURMUR_HASH);
for (BloomFilter subFilter : iterable) {
bloomFilter.or(subFilter);
}
}
@Override
public boolean accept(Pair<K, V> input) {
Key key = new Key(keyToBytesFn.map(input.first()));
return bloomFilter.membershipTest(key);
}
}
/**
* Returns the appropriate MapFn for converting the key type into byte arrays.
*/
private static <K> MapFn<K,byte[]> getKeyToBytesMapFn(PType<K> ptype, Configuration conf) {
if (ptype instanceof AvroType) {
return new AvroToBytesFn<K>((AvroType)ptype, conf);
}
if (ptype instanceof WritableType) {
return new WritableToBytesFn<K>((WritableType)ptype, conf);
}
throw new IllegalStateException("Unrecognized PType: " + ptype);
}
/**
* Returns the appropriate PType for serializing BloomFilters using the same
* type family as is used for the input collections.
*/
private static PType<BloomFilter> getBloomFilterType(PTypeFamily typeFamily) {
if (typeFamily.equals(AvroTypeFamily.getInstance())) {
return Avros.writables(BloomFilter.class);
} else if (typeFamily.equals(WritableTypeFamily.getInstance())) {
return Writables.writables(BloomFilter.class);
} else {
throw new IllegalStateException("Unrecognized PTypeFamily: " + typeFamily);
}
}
/**
* Converts a Writable into a byte array so that it can be added to a BloomFilter.
*/
private static class WritableToBytesFn<T> extends MapFn<T,byte[]>{
private WritableType<T,?> ptype;
private DataOutputBuffer dataOutputBuffer;
WritableToBytesFn(WritableType<T,?> ptype, Configuration conf) {
this.ptype = ptype;
dataOutputBuffer = new DataOutputBuffer();
}
@Override
public byte[] map(T input) {
dataOutputBuffer.reset();
Writable writable = (Writable) ptype.getOutputMapFn().map(input);
try {
writable.write(dataOutputBuffer);
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
byte[] output = new byte[dataOutputBuffer.getLength()];
System.arraycopy(dataOutputBuffer.getData(), 0, output, 0, dataOutputBuffer.getLength());
return output;
}
}
/**
* Converts an Avro value into a byte array so that it can be added to a Bloom filter.
*/
private static class AvroToBytesFn<T> extends MapFn<T,byte[]> {
private AvroType<T> ptype;
private BinaryEncoder encoder;
private DatumWriter datumWriter;
AvroToBytesFn(AvroType<T> ptype, Configuration conf) {
this.ptype = ptype;
datumWriter = AvroMode.fromType(ptype).withFactoryFromConfiguration(conf)
.getWriter(ptype.getSchema());
}
@Override
public byte[] map(T input) {
Object datum = ptype.getOutputMapFn().map(input);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
encoder = EncoderFactory.get().binaryEncoder(byteArrayOutputStream, encoder);
try {
datumWriter.write(datum, encoder);
encoder.flush();
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
return byteArrayOutputStream.toByteArray();
}
}
/**
* Converts value into a null-value pair. It is used to convert negatively filtered
* right-side values into right outer join part.
*/
private static class NullKeyFn<K, V> extends ExtractKeyFn<K, V> {
public NullKeyFn() {
super(new MapFn<V, K>() {
@Override public K map(V input) {
return null;
}
@Override public float scaleFactor() {
return 0.0001f;
}
});
}
}
/**
* Right and full outer join types are handled by splitting the right-side table (the bigger one)
* into two disjunctive streams: negatively filtered (right outer part) and positively filtered.
* To prevent concurrent modification Crunch performs a deep copy of such a splitted stream by
* default (see {@link DoFn#disableDeepCopy()}), which introduces an extra overhead. Since Bloom
* Filter directs every record to exactly one of these streams, making concurrent modification
* impossible, we can safely disable this feature. To achieve this we put the {@code right} PTable
* through a {@code parallelDo} call with this {@code DoFn}.
*/
private static class DeepCopyDisablerFn<T> extends MapFn<T, T> {
@Override
public T map(T input) {
return input;
}
@Override
public boolean disableDeepCopy() {
return true;
}
}
}
| 2,891 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/JoinUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.BinaryData;
import org.apache.avro.mapred.AvroJob;
import org.apache.avro.mapred.AvroWrapper;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroMode;
import org.apache.crunch.types.writable.TupleWritable;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.apache.crunch.util.HashUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* Utilities that are useful in joining multiple data sets via a MapReduce.
*
*/
public class JoinUtils {
public static Class<? extends Partitioner> getPartitionerClass(PTypeFamily typeFamily) {
if (typeFamily == WritableTypeFamily.getInstance()) {
return TupleWritablePartitioner.class;
} else {
return AvroIndexedRecordPartitioner.class;
}
}
public static Class<? extends RawComparator> getGroupingComparator(PTypeFamily typeFamily) {
if (typeFamily == WritableTypeFamily.getInstance()) {
return TupleWritableComparator.class;
} else {
return AvroPairGroupingComparator.class;
}
}
public static class TupleWritablePartitioner extends Partitioner<TupleWritable, Writable> {
@Override
public int getPartition(TupleWritable key, Writable value, int numPartitions) {
return (HashUtil.smearHash(key.get(0).hashCode()) & Integer.MAX_VALUE) % numPartitions;
}
}
public static class TupleWritableComparator implements RawComparator<TupleWritable> {
private DataInputBuffer buffer = new DataInputBuffer();
private TupleWritable key1 = new TupleWritable();
private TupleWritable key2 = new TupleWritable();
@Override
public int compare(TupleWritable o1, TupleWritable o2) {
return ((Comparable) o1.get(0)).compareTo(o2.get(0));
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1);
key1.readFields(buffer);
buffer.reset(b2, s2, l2);
key2.readFields(buffer);
} catch (Exception e) {
throw new RuntimeException(e);
}
return compare(key1, key2);
}
}
public static class AvroIndexedRecordPartitioner extends Partitioner<Object, Object> {
@Override
public int getPartition(Object key, Object value, int numPartitions) {
IndexedRecord record;
if (key instanceof AvroWrapper) {
record = (IndexedRecord) ((AvroWrapper) key).datum();
} else if (key instanceof IndexedRecord) {
record = (IndexedRecord) key;
} else {
throw new UnsupportedOperationException("Unknown avro key type: " + key);
}
return (HashUtil.smearHash(record.get(0).hashCode()) & Integer.MAX_VALUE) % numPartitions;
}
}
public static class AvroPairGroupingComparator<T> extends Configured implements RawComparator<AvroWrapper<T>> {
private Schema schema;
private AvroMode mode;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
Schema mapOutputSchema = AvroJob.getMapOutputSchema(conf);
Schema keySchema = org.apache.avro.mapred.Pair.getKeySchema(mapOutputSchema);
schema = keySchema.getFields().get(0).schema();
mode = AvroMode.fromShuffleConfiguration(conf);
}
}
@Override
public int compare(AvroWrapper<T> x, AvroWrapper<T> y) {
return mode.getData().compare(x.datum(), y.datum(), schema);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return BinaryData.compare(b1, s1, l1, b2, s2, l2, schema);
}
}
}
| 2,892 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/FullOuterJoinFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.util.List;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import com.google.common.collect.Lists;
/**
* Used to perform the last step of an full outer join.
*
* @param <K> Type of the keys.
* @param <U> Type of the first {@link org.apache.crunch.PTable}'s values
* @param <V> Type of the second {@link org.apache.crunch.PTable}'s values
*/
public class FullOuterJoinFn<K, U, V> extends JoinFn<K, U, V> {
private transient int lastId;
private transient K lastKey;
private transient List<U> leftValues;
public FullOuterJoinFn(PType<K> keyType, PType<U> leftValueType) {
super(keyType, leftValueType);
}
/** {@inheritDoc} */
@Override
public void initialize() {
super.initialize();
lastId = 1;
lastKey = null;
this.leftValues = Lists.newArrayList();
}
/** {@inheritDoc} */
@Override
public void join(K key, int id, Iterable<Pair<U, V>> pairs, Emitter<Pair<K, Pair<U, V>>> emitter) {
if (!key.equals(lastKey)) {
// Make sure that left side gets emitted.
if (0 == lastId) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, (V) null)));
}
}
lastKey = keyType.getDetachedValue(key);
leftValues.clear();
}
if (id == 0) {
for (Pair<U, V> pair : pairs) {
if (pair.first() != null)
leftValues.add(leftValueType.getDetachedValue(pair.first()));
}
} else {
for (Pair<U, V> pair : pairs) {
// Make sure that right side gets emitted.
if (leftValues.isEmpty()) {
leftValues.add(null);
}
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, pair.second())));
}
}
}
lastId = id;
}
/** {@inheritDoc} */
@Override
public void cleanup(Emitter<Pair<K, Pair<U, V>>> emitter) {
if (0 == lastId) {
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, (V) null)));
}
}
}
/** {@inheritDoc} */
@Override
public String getJoinType() {
return "fullOuterJoin";
}
}
| 2,893 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/JoinStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.io.Serializable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
/**
* Defines a strategy for joining two PTables together on a common key.
*/
public interface JoinStrategy<K, U, V> extends Serializable {
/**
* Join two tables with the given join type.
*
* @param left left table to be joined
* @param right right table to be joined
* @param joinType type of join to perform
* @return joined tables
*/
PTable<K, Pair<U,V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType);
}
| 2,894 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/JoinType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
/**
* Specifies the specific behavior of how a join should be performed in terms of requiring matching keys
* on both sides of the join.
*/
public enum JoinType {
/**
* Join two tables on a common key. Every value in the left-side table under a given key will be
* present with every value from the right-side table with the same key.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Inner_join">Inner Join</a>
*/
INNER_JOIN,
/**
* Join two tables on a common key, including entries from the left-side table that have
* no matching key in the right-side table.
* <p>
* This is an optional method for implementations.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Left_outer_join">Left Join</a>
*/
LEFT_OUTER_JOIN,
/**
* Join two tables on a common key, including entries from the right-side table that have
* no matching key in the left-side table.
* <p>
* This is an optional method for implementations.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Right_outer_join">Right Join</a>
*/
RIGHT_OUTER_JOIN,
/**
* Join two tables on a common key, also including entries from both tables that have no
* matching key in the other table.
* <p>
* This is an optional method for implementations.
*
* @see <a href="http://en.wikipedia.org/wiki/Join_(SQL)#Full_outer_join">Full Join</a>
*/
FULL_OUTER_JOIN
}
| 2,895 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/OneToManyJoin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.io.Serializable;
import javax.annotation.Nullable;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PCollection;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.lib.Join;
import org.apache.crunch.types.PType;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
/**
* Optimized join for situations where exactly one value is being joined with
* any other number of values based on a common key.
*/
public class OneToManyJoin {
/**
* Performs a join on two tables, where the left table only contains a single
* value per key.
* <p>
* This method accepts a DoFn, which is responsible for converting the single
* left-side value and the iterable of right-side values into output values.
* <p>
* This method of joining is useful when there is a single context value that
* contains a large number of related values, and all related values must be
* brought together, with the quantity of the right-side values being too big
* to fit in memory.
* <p>
* If there are multiple values for the same key in the left-side table, only
* a single one will be used.
*
* @param left left-side table to join
* @param right right-side table to join
* @param postProcessFn DoFn to process the results of the join
* @param ptype type of the output of the postProcessFn
* @return the post-processed output of the join
*/
public static <K, U, V, T> PCollection<T> oneToManyJoin(PTable<K, U> left, PTable<K, V> right,
DoFn<Pair<U, Iterable<V>>, T> postProcessFn, PType<T> ptype) {
return oneToManyJoin(left, right, postProcessFn, ptype, -1);
}
/**
* Supports a user-specified number of reducers for the one-to-many join.
*
* @param left left-side table to join
* @param right right-side table to join
* @param postProcessFn DoFn to process the results of the join
* @param ptype type of the output of the postProcessFn
* @param numReducers The number of reducers to use
* @return the post-processed output of the join
*/
public static <K, U, V, T> PCollection<T> oneToManyJoin(PTable<K, U> left, PTable<K, V> right,
DoFn<Pair<U, Iterable<V>>, T> postProcessFn, PType<T> ptype, int numReducers) {
PGroupedTable<Pair<K, Integer>, Pair<U, V>> grouped = DefaultJoinStrategy.preJoin(left, right, numReducers);
return grouped.parallelDo("One to many join " + grouped.getName(),
new OneToManyJoinFn<K, U, V, T>(left.getValueType(), postProcessFn), ptype);
}
/**
* Handles post-processing the output of {@link Join#oneToManyJoin}.
*/
static class OneToManyJoinFn<K, U, V, T> extends DoFn<Pair<Pair<K, Integer>, Iterable<Pair<U, V>>>, T> {
private PType<U> leftValueType;
private DoFn<Pair<U, Iterable<V>>, T> postProcessFn;
private SecondElementFunction<U, V> secondElementFunction;
private K currentKey;
private U leftValue;
public OneToManyJoinFn(PType<U> leftValueType, DoFn<Pair<U, Iterable<V>>, T> postProcessFn) {
this.leftValueType = leftValueType;
this.postProcessFn = postProcessFn;
this.secondElementFunction = new SecondElementFunction<U, V>();
}
@Override
public void initialize() {
super.initialize();
postProcessFn.initialize();
leftValueType.initialize(getConfiguration());
}
@Override
public void setContext(TaskInputOutputContext<?, ?, ?, ?> context) {
super.setContext(context);
postProcessFn.setContext(context);
}
@Override
public void process(Pair<Pair<K, Integer>, Iterable<Pair<U, V>>> input, Emitter<T> emitter) {
Pair<K, Integer> keyPair = input.first();
if (keyPair.second() == 0) {
leftValue = leftValueType.getDetachedValue(input.second().iterator().next().first());
currentKey = input.first().first();
} else if (keyPair.second() == 1 && input.first().first().equals(currentKey)) {
postProcessFn.process(Pair.of(leftValue, wrapIterable(input.second())), emitter);
leftValue = null;
}
}
private Iterable<V> wrapIterable(Iterable<Pair<U, V>> input) {
return Iterables.transform(input, secondElementFunction);
}
private static class SecondElementFunction<U, V> implements Function<Pair<U, V>, V>, Serializable {
@Override
public V apply(@Nullable Pair<U, V> input) {
return input.second();
}
}
}
}
| 2,896 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/RightOuterJoinFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.util.List;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PType;
import com.google.common.collect.Lists;
/**
* Used to perform the last step of an right outer join.
*
* @param <K> Type of the keys.
* @param <U> Type of the first {@link org.apache.crunch.PTable}'s values
* @param <V> Type of the second {@link org.apache.crunch.PTable}'s values
*/
public class RightOuterJoinFn<K, U, V> extends JoinFn<K, U, V> {
private transient K lastKey;
private transient List<U> leftValues;
public RightOuterJoinFn(PType<K> keyType, PType<U> leftValueType) {
super(keyType, leftValueType);
}
/** {@inheritDoc} */
@Override
public void initialize() {
super.initialize();
lastKey = null;
this.leftValues = Lists.newArrayList();
}
/** {@inheritDoc} */
@Override
public void join(K key, int id, Iterable<Pair<U, V>> pairs, Emitter<Pair<K, Pair<U, V>>> emitter) {
if (!key.equals(lastKey)) {
lastKey = keyType.getDetachedValue(key);
leftValues.clear();
}
if (id == 0) {
for (Pair<U, V> pair : pairs) {
if (pair.first() != null)
leftValues.add(leftValueType.getDetachedValue(pair.first()));
}
} else {
for (Pair<U, V> pair : pairs) {
// Make sure that right side gets emitted.
if (leftValues.isEmpty()) {
leftValues.add(null);
}
for (U u : leftValues) {
emitter.emit(Pair.of(lastKey, Pair.of(u, pair.second())));
}
}
}
}
/** {@inheritDoc} */
@Override
public String getJoinType() {
return "rightOuterJoin";
}
}
| 2,897 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/DefaultJoinStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.MapFn;
import org.apache.crunch.PGroupedTable;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
/**
* Default join strategy that simply sends all data through the map, shuffle, and reduce phase.
* <p>
* This join strategy is full-featured (i.e. all methods are available), but is not highly
* efficient due to its passing all data through the shuffle phase.
*/
public class DefaultJoinStrategy<K, U, V> implements JoinStrategy<K, U, V> {
private final int numReducers;
public DefaultJoinStrategy() {
this(-1);
}
public DefaultJoinStrategy(int numReducers) {
this.numReducers = numReducers;
}
@Override
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType) {
switch (joinType) {
case INNER_JOIN:
return join(left, right, new InnerJoinFn<K, U, V>(left.getKeyType(), left.getValueType()));
case LEFT_OUTER_JOIN:
return join(left, right, new LeftOuterJoinFn<K, U, V>(left.getKeyType(), left.getValueType()));
case RIGHT_OUTER_JOIN:
return join(left, right,
new RightOuterJoinFn<K, U, V>(left.getKeyType(), left.getValueType()));
case FULL_OUTER_JOIN:
return join(left, right, new FullOuterJoinFn<K, U, V>(left.getKeyType(), left.getValueType()));
default:
throw new UnsupportedOperationException("Join type " + joinType + " is not supported");
}
}
/**
* Perform a default join on the given {@code PTable} instances using a user-specified {@code JoinFn}.
*
* @param left left table to be joined
* @param right right table to be joined
* @param joinFn The user-specified implementation of the {@code JoinFn} class
* @return joined tables
*/
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinFn<K, U, V> joinFn) {
PTypeFamily ptf = left.getTypeFamily();
PGroupedTable<Pair<K, Integer>, Pair<U, V>> grouped = preJoin(left, right, numReducers);
PTableType<K, Pair<U, V>> ret = ptf
.tableOf(left.getKeyType(), ptf.pairs(left.getValueType(), right.getValueType()));
return grouped.parallelDo(joinFn.getJoinType() + grouped.getName(), joinFn, ret);
}
static <K, U, V> PGroupedTable<Pair<K, Integer>, Pair<U, V>> preJoin(PTable<K, U> left, PTable<K, V> right,
int numReducers) {
PTypeFamily ptf = left.getTypeFamily();
PTableType<Pair<K, Integer>, Pair<U, V>> ptt = ptf.tableOf(ptf.pairs(left.getKeyType(), ptf.ints()),
ptf.pairs(left.getValueType(), right.getValueType()));
PTable<Pair<K, Integer>, Pair<U, V>> tag1 = left.parallelDo("joinTagLeft",
new MapFn<Pair<K, U>, Pair<Pair<K, Integer>, Pair<U, V>>>() {
@Override
public Pair<Pair<K, Integer>, Pair<U, V>> map(Pair<K, U> input) {
return Pair.of(Pair.of(input.first(), 0), Pair.of(input.second(), (V) null));
}
}, ptt);
PTable<Pair<K, Integer>, Pair<U, V>> tag2 = right.parallelDo("joinTagRight",
new MapFn<Pair<K, V>, Pair<Pair<K, Integer>, Pair<U, V>>>() {
@Override
public Pair<Pair<K, Integer>, Pair<U, V>> map(Pair<K, V> input) {
return Pair.of(Pair.of(input.first(), 1), Pair.of((U) null, input.second()));
}
}, ptt);
GroupingOptions.Builder optionsBuilder = GroupingOptions.builder();
optionsBuilder.requireSortedKeys();
optionsBuilder.partitionerClass(JoinUtils.getPartitionerClass(ptf));
if (numReducers > 0) {
optionsBuilder.numReducers(numReducers);
}
return (tag1.union(tag2)).groupByKey(optionsBuilder.build());
}
}
| 2,898 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/lib/join/MapsideJoinStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.Collection;
/**
* Utility for doing map side joins on a common key between two {@link PTable}s.
* <p>
* A map side join is an optimized join which doesn't use a reducer; instead,
* one side of the join is loaded into memory and the join is performed in
* a mapper. This style of join has the important implication that the output of
* the join is not sorted, which is the case with a conventional (reducer-based)
* join.
* <p/>
* Instances of this class should be instantiated via the {@link #create()} or {@link #create(boolean)} factory
* methods, or optionally via the deprecated public constructor for backwards compatibility with
* older versions of Crunch where the right-side table was loaded into memory. The public constructor will be removed
* in a future release.
*/
public class MapsideJoinStrategy<K, U, V> implements JoinStrategy<K, U, V> {
private boolean materialize;
/**
* Constructs a new instance of the {@code MapsideJoinStratey}, materializing the right-side
* join table to disk before the join is performed.
*
* @deprecated Use the {@link #create()} factory method instead
*/
@Deprecated
public MapsideJoinStrategy() {
this(true);
}
/**
* Constructs a new instance of the {@code MapsideJoinStrategy}. If the {@code materialize}
* argument is true, then the right-side join {@code PTable} will be materialized to disk
* before the in-memory join is performed. If it is false, then Crunch can optionally read
* and process the data from the right-side table without having to run a job to materialize
* the data to disk first.
*
* @param materialize Whether or not to materialize the right-side table before the join
*
* @deprecated Use the {@link #create(boolean)} factory method instead
*/
@Deprecated
public MapsideJoinStrategy(boolean materialize) {
this.materialize = materialize;
}
/**
* Create a new {@code MapsideJoinStrategy} instance that will load its left-side table into memory,
* and will materialize the contents of the left-side table to disk before running the in-memory join.
* <p/>
* The smaller of the two tables to be joined should be provided as the left-side table of the created join
* strategy instance.
*/
public static <K, U, V> MapsideJoinStrategy<K, U, V> create() {
return create(true);
}
/**
* Create a new {@code MapsideJoinStrategy} instance that will load its left-side table into memory.
* <p/>
* If the {@code materialize} parameter is true, then the left-side {@code PTable} will be materialized to disk
* before the in-memory join is performed. If it is false, then Crunch can optionally read and process the data
* from the left-side table without having to run a job to materialize the data to disk first.
*
* @param materialize Whether or not to materialize the left-side table before the join
*/
public static <K, U, V> MapsideJoinStrategy<K, U, V> create(boolean materialize) {
return new LoadLeftSideMapsideJoinStrategy(materialize);
}
@Override
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType) {
switch (joinType) {
case INNER_JOIN:
return joinInternal(left, right, false);
case LEFT_OUTER_JOIN:
return joinInternal(left, right, true);
default:
throw new UnsupportedOperationException("Join type " + joinType
+ " not supported by MapsideJoinStrategy");
}
}
private PTable<K, Pair<U,V>> joinInternal(PTable<K, U> left, PTable<K, V> right, boolean includeUnmatchedLeftValues) {
PTypeFamily tf = left.getTypeFamily();
ReadableData<Pair<K, V>> rightReadable = right.asReadable(materialize);
MapsideJoinDoFn<K, U, V> mapJoinDoFn = new MapsideJoinDoFn<K, U, V>(
rightReadable, right.getPTableType(), includeUnmatchedLeftValues);
ParallelDoOptions options = ParallelDoOptions.builder()
.sourceTargets(rightReadable.getSourceTargets())
.build();
return left.parallelDo("mapjoin", mapJoinDoFn,
tf.tableOf(left.getKeyType(), tf.pairs(left.getValueType(), right.getValueType())),
options);
}
static class MapsideJoinDoFn<K, U, V> extends DoFn<Pair<K, U>, Pair<K, Pair<U, V>>> {
private final ReadableData<Pair<K, V>> readable;
private final PTableType<K, V> tableType;
private final boolean includeUnmatched;
private Multimap<K, V> joinMap;
public MapsideJoinDoFn(ReadableData<Pair<K, V>> rs, PTableType<K, V> tableType, boolean includeUnmatched) {
this.readable = rs;
this.tableType = tableType;
this.includeUnmatched = includeUnmatched;
}
@Override
public void configure(Configuration conf) {
readable.configure(conf);
}
@Override
public void initialize() {
super.initialize();
tableType.initialize(getConfiguration());
joinMap = ArrayListMultimap.create();
try {
for (Pair<K, V> joinPair : readable.read(getContext())) {
Pair<K, V> detachedPair = tableType.getDetachedValue(joinPair);
joinMap.put(detachedPair.first(), detachedPair.second());
}
} catch (IOException e) {
throw new CrunchRuntimeException("Error reading map-side join data", e);
}
}
@Override
public void process(Pair<K, U> input, Emitter<Pair<K, Pair<U, V>>> emitter) {
K key = input.first();
U value = input.second();
Collection<V> joinValues = joinMap.get(key);
if (includeUnmatched && joinValues.isEmpty()) {
emitter.emit(Pair.of(key, Pair.<U,V>of(value, null)));
} else {
for (V joinValue : joinValues) {
Pair<U, V> valuePair = Pair.of(value, joinValue);
emitter.emit(Pair.of(key, valuePair));
}
}
}
}
/**
* Loads the left-most table (instead of the right-most) in memory while performing the join.
*/
private static class LoadLeftSideMapsideJoinStrategy<K, U, V> extends MapsideJoinStrategy<K, U, V> {
private MapsideJoinStrategy<K, V, U> mapsideJoinStrategy;
public LoadLeftSideMapsideJoinStrategy(boolean materialize) {
mapsideJoinStrategy = new MapsideJoinStrategy<K, V, U>(materialize);
}
@Override
public PTable<K, Pair<U, V>> join(PTable<K, U> left, PTable<K, V> right, JoinType joinType) {
JoinType reversedJoinType;
switch (joinType) {
case INNER_JOIN:
reversedJoinType = JoinType.INNER_JOIN;
break;
case RIGHT_OUTER_JOIN:
reversedJoinType = JoinType.LEFT_OUTER_JOIN;
break;
default:
throw new UnsupportedOperationException("Join type " + joinType + " is not supported");
}
return mapsideJoinStrategy.join(right, left, reversedJoinType)
.mapValues("Reverse order out output table values",
new ReversePairOrderFn<V, U>(),
left.getTypeFamily().pairs(left.getValueType(), right.getValueType()));
}
}
private static class ReversePairOrderFn<V, U> extends MapFn<Pair<V, U>, Pair<U, V>> {
@Override
public Pair<U, V> map(Pair<V, U> input) {
return Pair.of(input.second(), input.first());
}
}
}
| 2,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.