index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/PersonProtos.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: resources/person.proto
package org.apache.crunch.lib;
public final class PersonProtos {
private PersonProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface PersonOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional string first = 1;
/**
* <code>optional string first = 1;</code>
*/
boolean hasFirst();
/**
* <code>optional string first = 1;</code>
*/
java.lang.String getFirst();
/**
* <code>optional string first = 1;</code>
*/
com.google.protobuf.ByteString
getFirstBytes();
// optional string last = 2;
/**
* <code>optional string last = 2;</code>
*/
boolean hasLast();
/**
* <code>optional string last = 2;</code>
*/
java.lang.String getLast();
/**
* <code>optional string last = 2;</code>
*/
com.google.protobuf.ByteString
getLastBytes();
}
/**
* Protobuf type {@code crunch.Person}
*/
public static final class Person extends
com.google.protobuf.GeneratedMessage
implements PersonOrBuilder {
// Use Person.newBuilder() to construct.
private Person(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Person(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Person defaultInstance;
public static Person getDefaultInstance() {
return defaultInstance;
}
public Person getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Person(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
first_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
last_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.crunch.lib.PersonProtos.internal_static_crunch_Person_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.crunch.lib.PersonProtos.internal_static_crunch_Person_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.crunch.lib.PersonProtos.Person.class, org.apache.crunch.lib.PersonProtos.Person.Builder.class);
}
public static com.google.protobuf.Parser<Person> PARSER =
new com.google.protobuf.AbstractParser<Person>() {
public Person parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Person(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Person> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional string first = 1;
public static final int FIRST_FIELD_NUMBER = 1;
private java.lang.Object first_;
/**
* <code>optional string first = 1;</code>
*/
public boolean hasFirst() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string first = 1;</code>
*/
public java.lang.String getFirst() {
java.lang.Object ref = first_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
first_ = s;
}
return s;
}
}
/**
* <code>optional string first = 1;</code>
*/
public com.google.protobuf.ByteString
getFirstBytes() {
java.lang.Object ref = first_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
first_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string last = 2;
public static final int LAST_FIELD_NUMBER = 2;
private java.lang.Object last_;
/**
* <code>optional string last = 2;</code>
*/
public boolean hasLast() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional string last = 2;</code>
*/
public java.lang.String getLast() {
java.lang.Object ref = last_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
last_ = s;
}
return s;
}
}
/**
* <code>optional string last = 2;</code>
*/
public com.google.protobuf.ByteString
getLastBytes() {
java.lang.Object ref = last_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
last_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
first_ = "";
last_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getFirstBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getLastBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getFirstBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getLastBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.crunch.lib.PersonProtos.Person parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.crunch.lib.PersonProtos.Person parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.crunch.lib.PersonProtos.Person parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.crunch.lib.PersonProtos.Person prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code crunch.Person}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.crunch.lib.PersonProtos.PersonOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.crunch.lib.PersonProtos.internal_static_crunch_Person_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.crunch.lib.PersonProtos.internal_static_crunch_Person_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.crunch.lib.PersonProtos.Person.class, org.apache.crunch.lib.PersonProtos.Person.Builder.class);
}
// Construct using org.apache.crunch.lib.PersonProtos.Person.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
first_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
last_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.crunch.lib.PersonProtos.internal_static_crunch_Person_descriptor;
}
public org.apache.crunch.lib.PersonProtos.Person getDefaultInstanceForType() {
return org.apache.crunch.lib.PersonProtos.Person.getDefaultInstance();
}
public org.apache.crunch.lib.PersonProtos.Person build() {
org.apache.crunch.lib.PersonProtos.Person result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.crunch.lib.PersonProtos.Person buildPartial() {
org.apache.crunch.lib.PersonProtos.Person result = new org.apache.crunch.lib.PersonProtos.Person(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.first_ = first_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.last_ = last_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.crunch.lib.PersonProtos.Person) {
return mergeFrom((org.apache.crunch.lib.PersonProtos.Person)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.crunch.lib.PersonProtos.Person other) {
if (other == org.apache.crunch.lib.PersonProtos.Person.getDefaultInstance()) return this;
if (other.hasFirst()) {
bitField0_ |= 0x00000001;
first_ = other.first_;
onChanged();
}
if (other.hasLast()) {
bitField0_ |= 0x00000002;
last_ = other.last_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.crunch.lib.PersonProtos.Person parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.crunch.lib.PersonProtos.Person) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional string first = 1;
private java.lang.Object first_ = "";
/**
* <code>optional string first = 1;</code>
*/
public boolean hasFirst() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string first = 1;</code>
*/
public java.lang.String getFirst() {
java.lang.Object ref = first_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
first_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string first = 1;</code>
*/
public com.google.protobuf.ByteString
getFirstBytes() {
java.lang.Object ref = first_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
first_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string first = 1;</code>
*/
public Builder setFirst(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
first_ = value;
onChanged();
return this;
}
/**
* <code>optional string first = 1;</code>
*/
public Builder clearFirst() {
bitField0_ = (bitField0_ & ~0x00000001);
first_ = getDefaultInstance().getFirst();
onChanged();
return this;
}
/**
* <code>optional string first = 1;</code>
*/
public Builder setFirstBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
first_ = value;
onChanged();
return this;
}
// optional string last = 2;
private java.lang.Object last_ = "";
/**
* <code>optional string last = 2;</code>
*/
public boolean hasLast() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional string last = 2;</code>
*/
public java.lang.String getLast() {
java.lang.Object ref = last_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
last_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string last = 2;</code>
*/
public com.google.protobuf.ByteString
getLastBytes() {
java.lang.Object ref = last_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
last_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string last = 2;</code>
*/
public Builder setLast(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
last_ = value;
onChanged();
return this;
}
/**
* <code>optional string last = 2;</code>
*/
public Builder clearLast() {
bitField0_ = (bitField0_ & ~0x00000002);
last_ = getDefaultInstance().getLast();
onChanged();
return this;
}
/**
* <code>optional string last = 2;</code>
*/
public Builder setLastBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
last_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:crunch.Person)
}
static {
defaultInstance = new Person(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:crunch.Person)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_crunch_Person_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_crunch_Person_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\ngist.proto\022\006crunch\"%\n\006Person\022\r\n\005first\030" +
"\001 \001(\t\022\014\n\004last\030\002 \001(\tB\'\n\025org.apache.crunch" +
".libB\014PersonProtosH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_crunch_Person_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_crunch_Person_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_crunch_Person_descriptor,
new java.lang.String[] { "First", "Last", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
| 2,600 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/DistinctIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import java.io.IOException;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.test.CrunchTestSupport;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import com.google.common.collect.Lists;
import static org.junit.Assert.assertEquals;
public class DistinctIT extends CrunchTestSupport {
@Test
public void testDistinct() throws IOException {
Pipeline p = new MRPipeline(DistinctIT.class, tempDir.getDefaultConfiguration());
Path inputPath = tempDir.copyResourcePath("list.txt");
PCollection<String> in = p.read(From.textFile(inputPath));
PCollection<String> distinct = Distinct.distinct(in);
assertEquals(Lists.newArrayList("a", "b", "c", "d"), Lists.newArrayList(distinct.materialize()));
}
@Test
public void testDistinctWithExplicitNumReducers() throws IOException {
Pipeline p = new MRPipeline(DistinctIT.class, tempDir.getDefaultConfiguration());
Path inputPath = tempDir.copyResourcePath("list.txt");
PCollection<String> in = p.read(From.textFile(inputPath));
PCollection<String> distinct = Distinct.distinct(in, 50, 1);
assertEquals(Lists.newArrayList("a", "b", "c", "d"), Lists.newArrayList(distinct.materialize()));
}
}
| 2,601 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/MapreduceIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import static org.junit.Assert.assertEquals;
import java.io.Serializable;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.PipelineResult.StageResult;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.io.To;
import org.apache.crunch.test.CrunchTestSupport;
import org.apache.crunch.types.avro.Avros;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.junit.Before;
import org.junit.Test;
public class MapreduceIT extends CrunchTestSupport implements Serializable {
private static class TestMapper extends Mapper<IntWritable, Text, IntWritable, Text> {
@Override
protected void map(IntWritable k, Text v, Mapper<IntWritable, Text, IntWritable, Text>.Context ctxt) {
try {
ctxt.getCounter("written", "out").increment(1L);
ctxt.write(new IntWritable(v.getLength()), v);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
private static class TestReducer extends Reducer<IntWritable, Text, Text, LongWritable> {
protected void reduce(IntWritable key, Iterable<Text> values,
org.apache.hadoop.mapreduce.Reducer<IntWritable, Text, Text, LongWritable>.Context ctxt) {
boolean hasWhere = false;
String notWhere = "";
for (Text t : values) {
String next = t.toString();
if (next.contains("where")) {
hasWhere = true;
ctxt.getCounter("words", "where").increment(1);
} else {
notWhere = next;
}
}
try {
ctxt.write(new Text(notWhere), hasWhere ? new LongWritable(1L) : new LongWritable(0L));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testMapper() throws Exception {
Pipeline p = new MRPipeline(MapreduceIT.class, tempDir.getDefaultConfiguration());
Path shakesPath = tempDir.copyResourcePath("shakes.txt");
PCollection<String> in = p.read(From.textFile(shakesPath));
PTable<IntWritable, Text> two = in.parallelDo(new MapFn<String, Pair<IntWritable, Text>>() {
@Override
public Pair<IntWritable, Text> map(String input) {
return Pair.of(new IntWritable(input.length()), new Text(input));
}
}, Writables.tableOf(Writables.writables(IntWritable.class), Writables.writables(Text.class)));
PTable<IntWritable, Text> out = Mapreduce.map(two, TestMapper.class, IntWritable.class, Text.class);
out.write(To.sequenceFile(tempDir.getPath("temp")));
PipelineResult res = p.done();
assertEquals(1, res.getStageResults().size());
StageResult sr = res.getStageResults().get(0);
assertEquals(3285, sr.getCounters().findCounter("written", "out").getValue());
}
@Test
public void testReducer() throws Exception {
Pipeline p = new MRPipeline(MapredIT.class, tempDir.getDefaultConfiguration());
Path shakesPath = tempDir.copyResourcePath("shakes.txt");
PCollection<String> in = p.read(From.textFile(shakesPath));
PTable<IntWritable, Text> two = in.parallelDo(new MapFn<String, Pair<IntWritable, Text>>() {
@Override
public Pair<IntWritable, Text> map(String input) {
return Pair.of(new IntWritable(input.length()), new Text(input));
}
}, Writables.tableOf(Writables.writables(IntWritable.class), Writables.writables(Text.class)));
PTable<Text, LongWritable> out = Mapreduce.reduce(two.groupByKey(), TestReducer.class, Text.class, LongWritable.class);
out.write(To.sequenceFile(tempDir.getPath("temp")));
PipelineResult res = p.done();
assertEquals(1, res.getStageResults().size());
StageResult sr = res.getStageResults().get(0);
assertEquals(19, sr.getCounters().findCounter("words", "where").getValue());
}
}
| 2,602 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/ShardIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultiset;
import org.apache.commons.io.FileUtils;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.test.TemporaryPath;
import org.junit.Rule;
import org.junit.Test;
import java.io.File;
import static org.junit.Assert.assertEquals;
public class ShardIT {
@Rule
public TemporaryPath tempDir = new TemporaryPath("crunch.tmp.dir", "hadoop.tmp.dir");
@Test
public void testShard() throws Exception {
File inDir = tempDir.getFile("in");
FileUtils.writeLines(new File(inDir, "part1"), ImmutableList.of("part1", "part1"));
FileUtils.writeLines(new File(inDir, "part2"), ImmutableList.of("part2"));
Pipeline pipeline = new MRPipeline(ShardIT.class);
PCollection<String> in = pipeline.read(From.textFile(inDir.getPath()));
// We can only test on 1 shard here, as local MR does not support multiple reducers.
PCollection<String> out = Shard.shard(in, 1);
assertEquals(
ImmutableMultiset.copyOf(out.materialize()),
ImmutableMultiset.of("part1", "part1", "part2"));
}
}
| 2,603 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/CogroupIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import org.apache.crunch.*;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.lib.PersonProtos.Person;
import org.apache.crunch.lib.PersonProtos.Person.Builder;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.TemporaryPaths;
import org.apache.crunch.test.Tests;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.PTypes;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
public class CogroupIT {
@Rule
public TemporaryPath tmpDir = TemporaryPaths.create();
private MRPipeline pipeline;
private PCollection<String> lines1;
private PCollection<String> lines2;
private PCollection<String> lines3;
private PCollection<String> lines4;
@Before
public void setUp() throws IOException {
pipeline = new MRPipeline(CogroupIT.class, tmpDir.getDefaultConfiguration());
lines1 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src1.txt")));
lines2 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src2.txt")));
lines3 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src1.txt")));
lines4 = pipeline.readTextFile(tmpDir.copyResourceFileName(Tests.resource(this, "src2.txt")));
}
@After
public void tearDown() {
pipeline.done();
}
@Test
public void testCogroupWritables() {
runCogroup(WritableTypeFamily.getInstance());
}
@Test
public void testCogroupAvro() {
runCogroup(AvroTypeFamily.getInstance());
}
@Test
public void testCogroup3Writables() {
runCogroup3(WritableTypeFamily.getInstance());
}
@Test
public void testCogroup3Avro() {
runCogroup3(AvroTypeFamily.getInstance());
}
@Test
public void testCogroup4Writables() {
runCogroup4(WritableTypeFamily.getInstance());
}
@Test
public void testCogroup4Avro() {
runCogroup4(AvroTypeFamily.getInstance());
}
@Test
public void testCogroupNWritables() {
runCogroupN(WritableTypeFamily.getInstance());
}
@Test
public void testCogroupNAvro() {
runCogroupN(AvroTypeFamily.getInstance());
}
@Test
public void testCogroupProtosWritables() {
runCogroupProtos(WritableTypeFamily.getInstance());
}
@Test
public void testCogroupProtosAvro() {
runCogroupProtos(AvroTypeFamily.getInstance());
}
@Test
public void testCogroupProtosPairsWritables() {
runCogroupProtosPairs(WritableTypeFamily.getInstance());
}
@Test
public void testCogroupProtosPairsAvro() {
runCogroupProtosPairs(AvroTypeFamily.getInstance());
}
public void runCogroup(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, Pair<Collection<String>, Collection<String>>> cg = Cogroup.cogroup(kv1, kv2);
Map<String, Pair<Collection<String>, Collection<String>>> result = cg.materializeToMap();
Map<String, Pair<Collection<String>, Collection<String>>> actual = Maps.newHashMap();
for (Map.Entry<String, Pair<Collection<String>, Collection<String>>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
actual.put(e.getKey(), Pair.of(one, two));
}
Map<String, Pair<Collection<String>, Collection<String>>> expected = ImmutableMap.of(
"a", Pair.of(coll("1-1", "1-4"), coll()),
"b", Pair.of(coll("1-2"), coll("2-1")),
"c", Pair.of(coll("1-3"), coll("2-2", "2-3")),
"d", Pair.of(coll(), coll("2-4"))
);
assertThat(actual, is(expected));
}
public void runCogroupProtos(PTypeFamily ptf) {
PTableType<String, Person> tt = ptf.tableOf(ptf.strings(), PTypes.protos(Person.class, ptf));
PTable<String, Person> kv1 = lines1.parallelDo("kv1", new GenerateProto(), tt);
PTable<String, Person> kv2 = lines2.parallelDo("kv2", new GenerateProto(), tt);
PTable<String, Pair<Collection<Person>, Collection<Person>>> cg = Cogroup.cogroup(kv1, kv2);
Map<String, Pair<Collection<Person>, Collection<Person>>> result = cg.materializeToMap();
assertThat(result.size(), is(4));
}
public void runCogroupProtosPairs(PTypeFamily ptf) {
PTableType<String, Pair<String, Person>> tt = ptf.tableOf(ptf.strings(), ptf.pairs(ptf.strings(), PTypes.protos(Person.class, ptf)));
PTable<String, Pair<String, Person>> kv1 = lines1.parallelDo("kv1", new GenerateProtoPairs(), tt);
PTable<String, Pair<String, Person>> kv2 = lines2.parallelDo("kv2", new GenerateProtoPairs(), tt);
PTable<String, Pair<Collection<Pair<String, Person>>, Collection<Pair<String, Person>>>> cg = Cogroup.cogroup(kv1, kv2);
Map<String, Pair<Collection<Pair<String, Person>>, Collection<Pair<String, Person>>>> result = cg.materializeToMap();
assertThat(result.size(), is(4));
}
public void runCogroup3(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, String> kv3 = lines3.parallelDo("kv3", new KeyValueSplit(), tt);
PTable<String, Tuple3.Collect<String, String, String>> cg = Cogroup.cogroup(kv1, kv2, kv3);
Map<String, Tuple3.Collect<String, String, String>> result = cg.materializeToMap();
Map<String, Tuple3.Collect<String, String, String>> actual = Maps.newHashMap();
for (Map.Entry<String, Tuple3.Collect<String, String, String>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
Collection<String> three = ImmutableSet.copyOf(e.getValue().third());
actual.put(e.getKey(), new Tuple3.Collect<String, String, String>(one, two, three));
}
Map<String, Tuple3.Collect<String, String, String>> expected = ImmutableMap.of(
"a", new Tuple3.Collect<String, String, String>(coll("1-1", "1-4"), coll(), coll("1-1", "1-4")),
"b", new Tuple3.Collect<String, String, String>(coll("1-2"), coll("2-1"), coll("1-2")),
"c", new Tuple3.Collect<String, String, String>(coll("1-3"), coll("2-2", "2-3"), coll("1-3")),
"d", new Tuple3.Collect<String, String, String>(coll(), coll("2-4"), coll())
);
assertThat(actual, is(expected));
}
public void runCogroup4(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, String> kv3 = lines3.parallelDo("kv3", new KeyValueSplit(), tt);
PTable<String, String> kv4 = lines4.parallelDo("kv4", new KeyValueSplit(), tt);
PTable<String, Tuple4.Collect<String, String, String, String>> cg = Cogroup.cogroup(kv1, kv2, kv3, kv4);
Map<String, Tuple4.Collect<String, String, String, String>> result = cg.materializeToMap();
Map<String, Tuple4.Collect<String, String, String, String>> actual = Maps.newHashMap();
for (Map.Entry<String, Tuple4.Collect<String, String, String, String>> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf(e.getValue().first());
Collection<String> two = ImmutableSet.copyOf(e.getValue().second());
Collection<String> three = ImmutableSet.copyOf(e.getValue().third());
Collection<String> four = ImmutableSet.copyOf(e.getValue().fourth());
actual.put(e.getKey(), new Tuple4.Collect<String, String, String, String>(one, two, three, four));
}
Map<String, Tuple4.Collect<String, String, String, String>> expected = ImmutableMap.of(
"a", new Tuple4.Collect<String, String, String, String>(coll("1-1", "1-4"), coll(), coll("1-1", "1-4"), coll()),
"b", new Tuple4.Collect<String, String, String, String>(coll("1-2"), coll("2-1"), coll("1-2"), coll("2-1")),
"c", new Tuple4.Collect<String, String, String, String>(coll("1-3"), coll("2-2", "2-3"), coll("1-3"), coll("2-2", "2-3")),
"d", new Tuple4.Collect<String, String, String, String>(coll(), coll("2-4"), coll(), coll("2-4"))
);
assertThat(actual, is(expected));
}
public void runCogroupN(PTypeFamily ptf) {
PTableType<String, String> tt = ptf.tableOf(ptf.strings(), ptf.strings());
PTable<String, String> kv1 = lines1.parallelDo("kv1", new KeyValueSplit(), tt);
PTable<String, String> kv2 = lines2.parallelDo("kv2", new KeyValueSplit(), tt);
PTable<String, TupleN> cg = Cogroup.cogroup(kv1, new PTable[]{kv2});
Map<String, TupleN> result = cg.materializeToMap();
Map<String, TupleN> actual = Maps.newHashMap();
for (Map.Entry<String, TupleN> e : result.entrySet()) {
Collection<String> one = ImmutableSet.copyOf((Collection<? extends String>) e.getValue().get(0));
Collection<String> two = ImmutableSet.copyOf((Collection<? extends String>)e.getValue().get(1));
actual.put(e.getKey(), TupleN.of(one, two));
}
Map<String, TupleN> expected = ImmutableMap.of(
"a", TupleN.of(coll("1-1", "1-4"), coll()),
"b", TupleN.of(coll("1-2"), coll("2-1")),
"c", TupleN.of(coll("1-3"), coll("2-2", "2-3")),
"d", TupleN.of(coll(), coll("2-4"))
);
assertThat(actual, is(expected));
PType<TupleN> tupleValueType = cg.getValueType();
List<PType> expectedSubtypes = ImmutableList.<PType>of(
ptf.collections(ptf.strings()),
ptf.collections(ptf.strings()));
assertThat(tupleValueType.getSubTypes(), is(expectedSubtypes));
}
private static class KeyValueSplit extends DoFn<String, Pair<String, String>> {
@Override
public void process(String input, Emitter<Pair<String, String>> emitter) {
String[] fields = input.split(",");
emitter.emit(Pair.of(fields[0], fields[1]));
}
}
private static class GenerateProto extends DoFn<String, Pair<String, Person>> {
@Override
public void process(String input, Emitter<Pair<String, Person>> emitter) {
String[] fields = input.split(",");
String key = fields[0];
Builder b = Person.newBuilder().setFirst("first"+key).setLast("last"+key);
emitter.emit(Pair.of(fields[0], b.build()));
}
}
private static class GenerateProtoPairs extends DoFn<String, Pair<String, Pair<String, Person>>> {
@Override
public void process(String input, Emitter<Pair<String, Pair<String, Person>>> emitter) {
String[] fields = input.split(",");
String key = fields[0];
Builder b = Person.newBuilder().setFirst("first"+key).setLast("last"+key);
emitter.emit(Pair.of(fields[0], Pair.of(fields[1], b.build())));
}
}
private static Collection<String> coll(String... values) {
return ImmutableSet.copyOf(values);
}
}
| 2,604 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/FullOuterJoinFnTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.apache.crunch.test.StringWrapper.wrap;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import org.apache.crunch.Emitter;
import org.apache.crunch.Pair;
import org.apache.crunch.test.StringWrapper;
import org.apache.crunch.types.avro.Avros;
public class FullOuterJoinFnTest extends JoinFnTestBase {
@Override
protected void checkOutput(Emitter<Pair<StringWrapper, Pair<StringWrapper, String>>> emitter) {
verify(emitter)
.emit(Pair.of(wrap("left-only"), Pair.of(wrap("left-only-left"), (String) null)));
verify(emitter).emit(Pair.of(wrap("both"), Pair.of(wrap("both-left"), "both-right")));
verify(emitter).emit(
Pair.of(wrap("right-only"), Pair.of((StringWrapper) null, "right-only-right")));
verifyNoMoreInteractions(emitter);
}
@Override
protected JoinFn<StringWrapper, StringWrapper, String> getJoinFn() {
return new FullOuterJoinFn<StringWrapper, StringWrapper, String>(
Avros.reflects(StringWrapper.class),
Avros.reflects(StringWrapper.class));
}
}
| 2,605 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/AbstractRightOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.junit.Assert.assertTrue;
import org.apache.crunch.Pair;
public abstract class AbstractRightOuterJoinIT extends JoinTester {
@Override
public void assertPassed(Iterable<Pair<String, Long>> lines) {
boolean passed1 = false;
boolean passed2 = true;
boolean passed3 = false;
for (Pair<String, Long> line : lines) {
if ("wretched".equals(line.first()) && 19 == line.second()) {
passed1 = true;
}
if ("againe".equals(line.first())) {
passed2 = false;
}
if ("moon".equals(line.first()) && 9 == line.second()) {
passed3 = true;
}
}
assertTrue(passed1);
assertTrue(passed2);
assertTrue(passed3);
}
@Override
protected JoinType getJoinType() {
return JoinType.RIGHT_OUTER_JOIN;
}
}
| 2,606 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/BloomFilterRightOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class BloomFilterRightOuterJoinIT extends AbstractRightOuterJoinIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new BloomFilterJoinStrategy<K, U, V>(20000);
}
}
| 2,607 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/DefaultFullOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class DefaultFullOuterJoinIT extends AbstractFullOuterJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new DefaultJoinStrategy<K, U, V>();
}
}
| 2,608 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/DefaultInnerJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class DefaultInnerJoinIT extends AbstractInnerJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new DefaultJoinStrategy<K, U, V>();
}
}
| 2,609 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/AbstractLeftOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.junit.Assert.assertTrue;
import org.apache.crunch.Pair;
public abstract class AbstractLeftOuterJoinIT extends JoinTester {
@Override
public void assertPassed(Iterable<Pair<String, Long>> lines) {
boolean passed1 = false;
boolean passed2 = false;
boolean passed3 = true;
for (Pair<String, Long> line : lines) {
if ("wretched".equals(line.first()) && 19 == line.second()) {
passed1 = true;
}
if ("againe".equals(line.first()) && 10 == line.second()) {
passed2 = true;
}
if ("moon".equals(line.first())) {
passed3 = false;
}
}
assertTrue(passed1);
assertTrue(passed2);
assertTrue(passed3);
}
@Override
protected JoinType getJoinType() {
return JoinType.LEFT_OUTER_JOIN;
}
}
| 2,610 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/AbstractInnerJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.junit.Assert.assertTrue;
import org.apache.crunch.Pair;
public abstract class AbstractInnerJoinIT extends JoinTester {
@Override
public void assertPassed(Iterable<Pair<String, Long>> lines) {
boolean passed1 = false;
boolean passed2 = true;
boolean passed3 = true;
for (Pair<String, Long> line : lines) {
if ("wretched".equals(line.first()) && 19 == line.second()) {
passed1 = true;
}
if ("againe".equals(line.first())) {
passed2 = false;
}
if ("moon".equals(line.first())) {
passed3 = false;
}
}
assertTrue(passed1);
assertTrue(passed2);
assertTrue(passed3);
}
@Override
protected JoinType getJoinType() {
return JoinType.INNER_JOIN;
}
}
| 2,611 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/BloomFilterFullOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class BloomFilterFullOuterJoinIT extends AbstractFullOuterJoinIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new BloomFilterJoinStrategy<K, U, V>(20000);
}
}
| 2,612 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/MultiAvroSchemaJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.apache.crunch.types.avro.Avros.records;
import static org.apache.crunch.types.avro.Avros.strings;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.avro.specific.SpecificRecord;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.io.From;
import org.apache.crunch.test.Employee;
import org.apache.crunch.test.Person;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.TemporaryPaths;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
public class MultiAvroSchemaJoinIT {
private File personFile;
private File employeeFile;
@Rule
public TemporaryPath tmpDir = TemporaryPaths.create();
@Before
public void setUp() throws Exception {
this.personFile = File.createTempFile("person", ".avro");
this.employeeFile = File.createTempFile("employee", ".avro");
DatumWriter<Person> pdw = new SpecificDatumWriter<Person>();
DataFileWriter<Person> pfw = new DataFileWriter<Person>(pdw);
pfw.create(Person.SCHEMA$, personFile);
Person p1 = new Person();
p1.name = "Josh";
p1.age = 19;
p1.siblingnames = ImmutableList.<CharSequence> of("Kate", "Mike");
pfw.append(p1);
Person p2 = new Person();
p2.name = "Kate";
p2.age = 17;;
p2.siblingnames = ImmutableList.<CharSequence> of("Josh", "Mike");
pfw.append(p2);
Person p3 = new Person();
p3.name = "Mike";
p3.age = 12;
p3.siblingnames = ImmutableList.<CharSequence> of("Josh", "Kate");
pfw.append(p3);
pfw.close();
DatumWriter<Employee> edw = new SpecificDatumWriter<Employee>();
DataFileWriter<Employee> efw = new DataFileWriter<Employee>(edw);
efw.create(Employee.SCHEMA$, employeeFile);
Employee e1 = new Employee();
e1.name = "Kate";
e1.salary = 100000;
e1.department = "Marketing";
efw.append(e1);
efw.close();
}
@After
public void tearDown() throws Exception {
personFile.delete();
employeeFile.delete();
}
public static class NameFn<K extends SpecificRecord> extends MapFn<K, String> {
@Override
public String map(K input) {
Schema s = input.getSchema();
Schema.Field f = s.getField("name");
return input.get(f.pos()).toString();
}
}
@Test
public void testJoin() throws Exception {
Pipeline p = new MRPipeline(MultiAvroSchemaJoinIT.class, tmpDir.getDefaultConfiguration());
PCollection<Person> people = p.read(From.avroFile(personFile.getAbsolutePath(), records(Person.class)));
PCollection<Employee> employees = p.read(From.avroFile(employeeFile.getAbsolutePath(), records(Employee.class)));
Iterable<Pair<Person, Employee>> result = people.by(new NameFn<Person>(), strings())
.join(employees.by(new NameFn<Employee>(), strings())).values().materialize();
List<Pair<Person, Employee>> v = Lists.newArrayList(result);
assertEquals(1, v.size());
assertEquals("Kate", v.get(0).first().name.toString());
assertEquals("Kate", v.get(0).second().name.toString());
}
}
| 2,613 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/ShardedRightOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class ShardedRightOuterJoinIT extends AbstractRightOuterJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new ShardedJoinStrategy<K, U, V>(3);
}
}
| 2,614 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/MapsideJoinStrategyIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.Charset;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import com.google.common.collect.Lists;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.fn.FilterFns;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.TemporaryPaths;
import org.apache.crunch.types.writable.Writables;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
public class MapsideJoinStrategyIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
private static class LineSplitter extends MapFn<String, Pair<Integer, String>> {
@Override
public Pair<Integer, String> map(String input) {
String[] fields = input.split("\\|");
return Pair.of(Integer.parseInt(fields[0]), fields[1]);
}
}
private static class CapOrdersFn extends MapFn<String, String> {
@Override
public String map(String v) {
return v.toUpperCase(Locale.ENGLISH);
}
}
private static class ConcatValuesFn extends MapFn<Pair<String, String>, String> {
@Override
public String map(Pair<String, String> v) {
return v.toString();
}
}
@Rule
public TemporaryPath tmpDir = TemporaryPaths.create();
@Test
public void testMapSideJoin_MemPipeline() {
runMapsideJoin(MemPipeline.getInstance(), true, false, MapsideJoinStrategy.<Integer,String,String>create(false));
}
@Test
public void testLegacyMapSideJoin_MemPipeline() {
runLegacyMapsideJoin(MemPipeline.getInstance(), true, false, new MapsideJoinStrategy<Integer, String, String>(false));
}
@Test
public void testMapSideJoin_MemPipeline_Materialized() {
runMapsideJoin(MemPipeline.getInstance(), true, true, MapsideJoinStrategy.<Integer,String,String>create(true));
}
@Test
public void testLegacyMapSideJoin_MemPipeline_Materialized() {
runLegacyMapsideJoin(MemPipeline.getInstance(), true, true, new MapsideJoinStrategy<Integer, String, String>(true));
}
@Test
public void testMapSideJoinRightOuterJoin_MemPipeline() {
runMapsideRightOuterJoin(MemPipeline.getInstance(), true, false,
MapsideJoinStrategy.<Integer, String, String>create(false));
}
@Test
public void testLegacyMapSideJoinLeftOuterJoin_MemPipeline() {
runLegacyMapsideLeftOuterJoin(MemPipeline.getInstance(), true, false, new MapsideJoinStrategy<Integer, String, String>(false));
}
@Test
public void testMapSideJoinRightOuterJoin_MemPipeline_Materialized() {
runMapsideRightOuterJoin(MemPipeline.getInstance(), true, true,
MapsideJoinStrategy.<Integer, String, String>create(true));
}
@Test
public void testLegacyMapSideJoinLeftOuterJoin_MemPipeline_Materialized() {
runLegacyMapsideLeftOuterJoin(MemPipeline.getInstance(), true, true, new MapsideJoinStrategy<Integer, String, String>(true));
}
@Test
public void testMapsideJoin_RightSideIsEmpty() throws IOException {
MRPipeline pipeline = new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration());
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> filteredOrderTable = orderTable
.parallelDo(FilterFns.<Pair<Integer, String>>REJECT_ALL(), orderTable.getPTableType());
JoinStrategy<Integer, String, String> mapsideJoin = new MapsideJoinStrategy<Integer, String, String>();
PTable<Integer, Pair<String, String>> joined = mapsideJoin.join(customerTable, filteredOrderTable, JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> materializedJoin = Lists.newArrayList(joined.materialize());
assertTrue(materializedJoin.isEmpty());
}
@Test
public void testLegacyMapsideJoin_LeftSideIsEmpty() throws IOException {
MRPipeline pipeline = new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration());
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
readTable(pipeline, "orders.txt");
PTable<Integer, String> filteredCustomerTable = customerTable
.parallelDo(FilterFns.<Pair<Integer, String>>REJECT_ALL(), customerTable.getPTableType());
JoinStrategy<Integer, String, String> mapsideJoin = new MapsideJoinStrategy<Integer, String, String>();
PTable<Integer, Pair<String, String>> joined = mapsideJoin.join(customerTable, filteredCustomerTable,
JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> materializedJoin = Lists.newArrayList(joined.materialize());
assertTrue(materializedJoin.isEmpty());
}
@Test
public void testMapsideJoin() throws IOException {
runMapsideJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, false, MapsideJoinStrategy.<Integer, String, String>create(false));
}
@Test
public void testLegacyMapsideJoin() throws IOException {
runLegacyMapsideJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, false, new MapsideJoinStrategy<Integer, String, String>(false));
}
@Test
public void testMapsideJoin_Materialized() throws IOException {
runMapsideJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, true, MapsideJoinStrategy.<Integer, String, String>create(true));
}
@Test
public void testLegacyMapsideJoin_Materialized() throws IOException {
runLegacyMapsideJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, true, new MapsideJoinStrategy<Integer, String, String>(true));
}
@Test
public void testMapsideJoin_RightOuterJoin() throws IOException {
runMapsideRightOuterJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, false, MapsideJoinStrategy.<Integer, String, String>create(false));
}
@Test
public void testLegacyMapsideJoin_LeftOuterJoin() throws IOException {
runLegacyMapsideLeftOuterJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, false,
new MapsideJoinStrategy<Integer, String, String>(false));
}
@Test
public void testMapsideJoin_RightOuterJoin_Materialized() throws IOException {
runMapsideRightOuterJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, true, MapsideJoinStrategy.<Integer, String, String>create(true));
}
@Test
public void testLegacyMapsideJoin_LeftOuterJoin_Materialized() throws IOException {
runLegacyMapsideLeftOuterJoin(new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration()),
false, true,
new MapsideJoinStrategy<Integer, String, String>(true));
}
@Test
public void testMapSideJoinWithImmutableBytesWritable() throws IOException, InterruptedException {
//Write out input files
FileSystem fs = FileSystem.get(tmpDir.getDefaultConfiguration());
Path path1 = tmpDir.getPath("input1.txt");
Path path2 = tmpDir.getPath("input2.txt");
OutputStream out1 = fs.create(path1, true);
OutputStream out2 = fs.create(path2, true);
for(int i = 0; i < 4; i++){
byte[] value = ("value" + i + "\n").getBytes(Charset.forName("UTF-8"));
out1.write(value);
out2.write(value);
}
out1.flush();
out1.close();
out2.flush();
out2.close();
final MRPipeline pipeline = new MRPipeline(MapsideJoinStrategyIT.class, tmpDir.getDefaultConfiguration());
final PCollection<String> values1 = pipeline.readTextFile(path1.toString());
final PCollection<String> values2 = pipeline.readTextFile(path2.toString());
final PTable<Text, Text> convertedValues1 = convertStringToText(values1);
final PTable<Text, Text> convertedValues2 = convertStringToText(values2);
// for map side join
final MapsideJoinStrategy<Text, Text, Text> mapSideJoinStrategy = MapsideJoinStrategy.<Text, Text, Text>create();
final PTable<Text, Pair<Text, Text>> updatedJoinedRows = mapSideJoinStrategy.join(convertedValues1, convertedValues2, JoinType.INNER_JOIN);
pipeline.run();
// Join should have 2 results
// Join should have contentBytes1 and contentBytes2
assertEquals(4, updatedJoinedRows.materializeToMap().size());
}
/**
* The method is used to convert string to entity key
*/
public static PTable<Text, Text> convertStringToText(final PCollection<String> entityKeysStringPCollection) {
return entityKeysStringPCollection.parallelDo(new DoFn<String, Pair<Text, Text>>() {
@Override
public void process(final String input, final Emitter<Pair<Text, Text>> emitter) {
emitter.emit(new Pair<Text, Text>(new Text(input), new Text(input)));
}
}, Writables.tableOf(Writables.writables(Text.class), Writables.writables(Text.class)));
}
private void runMapsideJoin(Pipeline pipeline, boolean inMemory, boolean materialize,
MapsideJoinStrategy<Integer,String, String> joinStrategy) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> custOrders = joinStrategy.join(orderTable, customerTable, JoinType.INNER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined = joinStrategy.join(ORDER_TABLE, custOrders, JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("CORN FLAKES", "[Corn flakes,John Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PAPER", "[Toilet paper,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PAPER", "[Toilet plunger,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PLUNGER", "[Toilet paper,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PLUNGER", "[Toilet plunger,Jane Doe]")));
expectedJoinResult.add(Pair.of(333, Pair.of("TOILET BRUSH", "[Toilet brush,Someone Else]")));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
PipelineResult res = pipeline.run();
if (!inMemory) {
assertEquals(materialize ? 2 : 1, res.getStageResults().size());
}
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
}
private void runLegacyMapsideJoin(Pipeline pipeline, boolean inMemory, boolean materialize,
MapsideJoinStrategy<Integer, String, String> mapsideJoinStrategy) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> custOrders = mapsideJoinStrategy.join(customerTable, orderTable, JoinType.INNER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined = mapsideJoinStrategy.join(custOrders, ORDER_TABLE, JoinType.INNER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("[John Doe,Corn flakes]", "CORN FLAKES")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(333, Pair.of("[Someone Else,Toilet brush]", "TOILET BRUSH")));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
PipelineResult res = pipeline.run();
if (!inMemory) {
assertEquals(materialize ? 2 : 1, res.getStageResults().size());
}
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
}
private void runMapsideRightOuterJoin(Pipeline pipeline, boolean inMemory, boolean materialize,
MapsideJoinStrategy<Integer, String, String> mapsideJoinStrategy) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> custOrders = mapsideJoinStrategy.join(orderTable, customerTable, JoinType.RIGHT_OUTER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined = mapsideJoinStrategy.join(ORDER_TABLE, custOrders,
JoinType.RIGHT_OUTER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("CORN FLAKES", "[Corn flakes,John Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PAPER", "[Toilet paper,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PAPER", "[Toilet plunger,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PLUNGER", "[Toilet paper,Jane Doe]")));
expectedJoinResult.add(Pair.of(222, Pair.of("TOILET PLUNGER", "[Toilet plunger,Jane Doe]")));
expectedJoinResult.add(Pair.of(333, Pair.of("TOILET BRUSH", "[Toilet brush,Someone Else]")));
expectedJoinResult.add(Pair.of(444, Pair.<String,String>of(null, "[null,Has No Orders]")));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
PipelineResult res = pipeline.run();
if (!inMemory) {
assertEquals(materialize ? 2 : 1, res.getStageResults().size());
}
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
}
private void runLegacyMapsideLeftOuterJoin(Pipeline pipeline, boolean inMemory, boolean materialize,
MapsideJoinStrategy<Integer, String, String> legacyMapsideJoinStrategy) {
PTable<Integer, String> customerTable = readTable(pipeline, "customers.txt");
PTable<Integer, String> orderTable = readTable(pipeline, "orders.txt");
PTable<Integer, String> custOrders = legacyMapsideJoinStrategy.join(customerTable, orderTable,
JoinType.LEFT_OUTER_JOIN)
.mapValues("concat", new ConcatValuesFn(), Writables.strings());
PTable<Integer, String> ORDER_TABLE = orderTable.mapValues(new CapOrdersFn(), orderTable.getValueType());
PTable<Integer, Pair<String, String>> joined =
legacyMapsideJoinStrategy.join(custOrders, ORDER_TABLE, JoinType.LEFT_OUTER_JOIN);
List<Pair<Integer, Pair<String, String>>> expectedJoinResult = Lists.newArrayList();
expectedJoinResult.add(Pair.of(111, Pair.of("[John Doe,Corn flakes]", "CORN FLAKES")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet paper]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PAPER")));
expectedJoinResult.add(Pair.of(222, Pair.of("[Jane Doe,Toilet plunger]", "TOILET PLUNGER")));
expectedJoinResult.add(Pair.of(333, Pair.of("[Someone Else,Toilet brush]", "TOILET BRUSH")));
expectedJoinResult.add(Pair.of(444, Pair.<String,String>of("[Has No Orders,null]", null)));
Iterable<Pair<Integer, Pair<String, String>>> iter = joined.materialize();
PipelineResult res = pipeline.run();
if (!inMemory) {
assertEquals(materialize ? 2 : 1, res.getStageResults().size());
}
List<Pair<Integer, Pair<String, String>>> joinedResultList = Lists.newArrayList(iter);
Collections.sort(joinedResultList);
assertEquals(expectedJoinResult, joinedResultList);
}
private PTable<Integer, String> readTable(Pipeline pipeline, String filename) {
try {
return pipeline.readTextFile(tmpDir.copyResourceFileName(filename)).parallelDo("asTable",
new LineSplitter(),
Writables.tableOf(Writables.ints(), Writables.strings()));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 2,615 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/DefaultRightOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class DefaultRightOuterJoinIT extends AbstractRightOuterJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new DefaultJoinStrategy<K, U, V>();
}
}
| 2,616 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/JoinTester.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import java.io.IOException;
import java.io.Serializable;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.impl.mem.MemPipeline;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.lib.Aggregate;
import org.apache.crunch.test.TemporaryPath;
import org.apache.crunch.test.TemporaryPaths;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PTypeFamily;
import org.apache.crunch.types.avro.AvroTypeFamily;
import org.apache.crunch.types.writable.WritableTypeFamily;
import org.junit.Rule;
import org.junit.Test;
public abstract class JoinTester implements Serializable {
private static class WordSplit extends DoFn<String, String> {
@Override
public void process(String input, Emitter<String> emitter) {
for (String word : input.split("\\s+")) {
emitter.emit(word);
}
}
}
protected PTable<String, Long> join(PCollection<String> w1, PCollection<String> w2, PTypeFamily ptf) {
PTableType<String, Long> ntt = ptf.tableOf(ptf.strings(), ptf.longs());
PTable<String, Long> ws1 = Aggregate.count(w1.parallelDo("ws1", new WordSplit(), ptf.strings()));
PTable<String, Long> ws2 = Aggregate.count(w2.parallelDo("ws2", new WordSplit(), ptf.strings()));
JoinStrategy<String,Long,Long> joinStrategy = getJoinStrategy();
PTable<String, Pair<Long, Long>> join = joinStrategy.join(ws1, ws2, getJoinType());
PTable<String, Long> sums = join.parallelDo("cnt", new DoFn<Pair<String, Pair<Long, Long>>, Pair<String, Long>>() {
@Override
public void process(Pair<String, Pair<Long, Long>> input, Emitter<Pair<String, Long>> emitter) {
Pair<Long, Long> pair = input.second();
long sum = (pair.first() != null ? pair.first() : 0) + (pair.second() != null ? pair.second() : 0);
emitter.emit(Pair.of(input.first(), sum));
}
}, ntt);
return sums;
}
protected void run(Pipeline pipeline, PTypeFamily typeFamily) throws IOException {
String shakesInputPath = tmpDir.copyResourceFileName("shakes.txt");
String dickensInputPath = tmpDir.copyResourceFileName("dickens.txt");
PCollection<String> shakespeare = pipeline.readTextFile(shakesInputPath);
PCollection<String> dickens = pipeline.readTextFile(dickensInputPath);
PTable<String, Long> joined = join(shakespeare, dickens, typeFamily);
Iterable<Pair<String, Long>> lines = joined.materialize();
assertPassed(lines);
pipeline.done();
}
@Rule
public transient TemporaryPath tmpDir = TemporaryPaths.create();
@Test
public void testWritableJoin() throws Exception {
run(new MRPipeline(AbstractInnerJoinIT.class, tmpDir.getDefaultConfiguration()), WritableTypeFamily.getInstance());
}
@Test
public void testAvroJoin() throws Exception {
run(new MRPipeline(AbstractInnerJoinIT.class, tmpDir.getDefaultConfiguration()), AvroTypeFamily.getInstance());
}
@Test
public void testAvroJoin_MemPipeline() throws Exception {
run(MemPipeline.getInstance(), AvroTypeFamily.getInstance());
}
@Test
public void testWritableJoin_MemPipeline() throws Exception {
run(MemPipeline.getInstance(), WritableTypeFamily.getInstance());
}
/**
* Return the JoinStrategy to be tested.
*/
protected abstract <K, U, V> JoinStrategy<K, U, V> getJoinStrategy();
/**
* Used to check that the result of the join makes sense.
*
* @param lines
* The result of the join.
*/
public abstract void assertPassed(Iterable<Pair<String, Long>> lines);
/**
* @return The JoinType to be used in the test.
*/
protected abstract JoinType getJoinType();
}
| 2,617 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/BloomFilterInnerJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class BloomFilterInnerJoinIT extends AbstractInnerJoinIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new BloomFilterJoinStrategy<K, U, V>(20000);
}
}
| 2,618 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/BloomFilterLeftOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class BloomFilterLeftOuterJoinIT extends AbstractLeftOuterJoinIT {
private static String saveTempDir;
@BeforeClass
public static void setUpClass(){
// Ensure a consistent temporary directory for use of the DistributedCache.
// The DistributedCache technically isn't supported when running in local mode, and the default
// temporary directiory "/tmp" is used as its location. This typically only causes an issue when
// running integration tests on Mac OS X, as OS X doesn't use "/tmp" as it's default temporary
// directory. The following call ensures that "/tmp" is used as the temporary directory on all platforms.
saveTempDir = System.setProperty("java.io.tmpdir", "/tmp");
}
@AfterClass
public static void tearDownClass(){
System.setProperty("java.io.tmpdir", saveTempDir);
}
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new BloomFilterJoinStrategy<K, U, V>(20000);
}
}
| 2,619 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/ShardedInnerJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class ShardedInnerJoinIT extends AbstractInnerJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new ShardedJoinStrategy<K, U, V>(3);
}
}
| 2,620 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/DefaultLeftOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
public class DefaultLeftOuterJoinIT extends AbstractLeftOuterJoinIT {
@Override
protected <K, U, V> JoinStrategy<K, U, V> getJoinStrategy() {
return new DefaultJoinStrategy<K, U, V>();
}
}
| 2,621 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/lib/join/AbstractFullOuterJoinIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.lib.join;
import static org.junit.Assert.assertTrue;
import org.apache.crunch.Pair;
public abstract class AbstractFullOuterJoinIT extends JoinTester {
@Override
public void assertPassed(Iterable<Pair<String, Long>> lines) {
boolean passed1 = false;
boolean passed2 = false;
boolean passed3 = false;
for (Pair<String, Long> line : lines) {
if ("wretched".equals(line.first()) && 19 == line.second()) {
passed1 = true;
}
if ("againe".equals(line.first()) && 10 == line.second()) {
passed2 = true;
}
if ("moon".equals(line.first()) && 9 == line.second()) {
passed3 = true;
}
}
assertTrue(passed1);
assertTrue(passed2);
assertTrue(passed3);
}
@Override
protected JoinType getJoinType() {
return JoinType.FULL_OUTER_JOIN;
}
}
| 2,622 |
0 |
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/it/java/org/apache/crunch/fn/AggregatorsIT.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.fn;
import static org.apache.crunch.fn.Aggregators.SUM_INTS;
import static org.apache.crunch.fn.Aggregators.pairAggregator;
import static org.apache.crunch.types.writable.Writables.ints;
import static org.apache.crunch.types.writable.Writables.pairs;
import static org.apache.crunch.types.writable.Writables.strings;
import static org.apache.crunch.types.writable.Writables.tableOf;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import java.util.Collection;
import java.util.Map;
import org.apache.crunch.MapFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.Pipeline;
import org.apache.crunch.test.Tests;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class AggregatorsIT {
private Pipeline pipeline;
@Parameters
public static Collection<Object[]> params() {
return Tests.pipelinesParams(AggregatorsIT.class);
}
public AggregatorsIT(Pipeline pipeline) {
this.pipeline = pipeline;
}
@Test
public void testPairAggregator() {
PCollection<String> lines = pipeline.readTextFile(Tests.pathTo(this, "ints.txt"));
PTable<String, Pair<Integer, Integer>> table = lines.parallelDo(new SplitLine(),
tableOf(strings(), pairs(ints(), ints())));
PTable<String, Pair<Integer, Integer>> combinedTable = table.groupByKey().combineValues(
pairAggregator(SUM_INTS(), SUM_INTS()));
Map<String, Pair<Integer, Integer>> result = combinedTable.asMap().getValue();
assertThat(result.size(), is(2));
assertThat(result.get("a"), is(Pair.of(9, 12)));
assertThat(result.get("b"), is(Pair.of(11, 13)));
}
private static final class SplitLine extends MapFn<String, Pair<String, Pair<Integer, Integer>>> {
@Override
public Pair<String, Pair<Integer, Integer>> map(String input) {
String[] split = input.split("\t");
return Pair.of(split[0],
Pair.of(Integer.parseInt(split[1]), Integer.parseInt(split[2])));
}
}
}
| 2,623 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Collection;
import java.util.Map;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
/**
* A sub-interface of {@code PCollection} that represents an immutable,
* distributed multi-map of keys and values.
*
*/
public interface PTable<K, V> extends PCollection<Pair<K, V>> {
/**
Returns a {@code PTable} instance that acts as the union of this
* {@code PTable} and the other {@code PTable}s.
*/
PTable<K, V> union(PTable<K, V> other);
/**
* Returns a {@code PTable} instance that acts as the union of this
* {@code PTable} and the input {@code PTable}s.
*/
PTable<K, V> union(PTable<K, V>... others);
/**
* Performs a grouping operation on the keys of this table.
*
* @return a {@code PGroupedTable} instance that represents the grouping
*/
PGroupedTable<K, V> groupByKey();
/**
* Performs a grouping operation on the keys of this table, using the given
* number of partitions.
*
* @param numPartitions
* The number of partitions for the data.
* @return a {@code PGroupedTable} instance that represents this grouping
*/
PGroupedTable<K, V> groupByKey(int numPartitions);
/**
* Performs a grouping operation on the keys of this table, using the
* additional {@code GroupingOptions} to control how the grouping is executed.
*
* @param options
* The grouping options to use
* @return a {@code PGroupedTable} instance that represents the grouping
*/
PGroupedTable<K, V> groupByKey(GroupingOptions options);
/**
* Writes this {@code PTable} to the given {@code Target}.
*/
PTable<K, V> write(Target target);
/**
* Writes this {@code PTable} to the given {@code Target}, using the
* given {@code Target.WriteMode} to handle existing targets.
*/
PTable<K, V> write(Target target, Target.WriteMode writeMode);
PTable<K, V> cache();
PTable<K, V> cache(CachingOptions options);
/**
* Returns the {@code PTableType} of this {@code PTable}.
*/
PTableType<K, V> getPTableType();
/**
* Returns the {@code PType} of the key.
*/
PType<K> getKeyType();
/**
* Returns the {@code PType} of the value.
*/
PType<V> getValueType();
/**
* Returns a {@code PTable} that has the same keys as this instance, but
* uses the given function to map the values.
*/
<U> PTable<K, U> mapValues(MapFn<V, U> mapFn, PType<U> ptype);
/**
* Returns a {@code PTable} that has the same keys as this instance, but
* uses the given function to map the values.
*/
<U> PTable<K, U> mapValues(String name, MapFn<V, U> mapFn, PType<U> ptype);
/**
* Returns a {@code PTable} that has the same values as this instance, but
* uses the given function to map the keys.
*/
<K2> PTable<K2, V> mapKeys(MapFn<K, K2> mapFn, PType<K2> ptype);
/**
* Returns a {@code PTable} that has the same values as this instance, but
* uses the given function to map the keys.
*/
<K2> PTable<K2, V> mapKeys(String name, MapFn<K, K2> mapFn, PType<K2> ptype);
/**
* Aggregate all of the values with the same key into a single key-value pair
* in the returned PTable.
*/
PTable<K, Collection<V>> collectValues();
/**
* Apply the given filter function to this instance and return the resulting
* {@code PTable}.
*/
PTable<K, V> filter(FilterFn<Pair<K, V>> filterFn);
/**
* Apply the given filter function to this instance and return the resulting
* {@code PTable}.
*
* @param name
* An identifier for this processing step
* @param filterFn
* The {@code FilterFn} to apply
*/
PTable<K, V> filter(String name, FilterFn<Pair<K, V>> filterFn);
/**
* Returns a PTable made up of the pairs in this PTable with the largest value
* field.
*
* @param count
* The number of pairs to return
*/
PTable<K, V> top(int count);
/**
* Returns a PTable made up of the pairs in this PTable with the smallest
* value field.
*
* @param count
* The number of pairs to return
*/
PTable<K, V> bottom(int count);
/**
* Perform an inner join on this table and the one passed in as an argument on
* their common keys.
*/
<U> PTable<K, Pair<V, U>> join(PTable<K, U> other);
/**
* Co-group operation with the given table.
* <p>
* <b>Note:</b> If the given table contains keys that are not present in this PTable, an empty
* PCollection is set for the relationship.
*/
<U> PTable<K, Pair<Collection<V>, Collection<U>>> cogroup(PTable<K, U> other);
/**
* Returns a {@link PCollection} made up of the keys in this PTable.
*/
PCollection<K> keys();
/**
* Returns a {@link PCollection} made up of the values in this PTable.
*/
PCollection<V> values();
/**
* Returns a Map<K, V> made up of the keys and values in this PTable.
* <p>
* <b>Note:</b> The contents of the returned map may not be exactly the same
* as this PTable, as a PTable is a multi-map (i.e. can contain multiple
* values for a single key).
*/
Map<K, V> materializeToMap();
/**
* Returns a {@link PObject} encapsulating a {@link Map} made up of the keys and values in this
* {@code PTable}.
* <p><b>Note:</b>The contents of the returned map may not be exactly the same as this PTable,
* as a PTable is a multi-map (i.e. can contain multiple values for a single key).
* </p>
*
* @return The {@code PObject} encapsulating a {@code Map} made up of the keys and values in
* this {@code PTable}.
*/
PObject<Map<K, V>> asMap();
}
| 2,624 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/GroupingOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.Serializable;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import org.apache.crunch.impl.mr.run.UniformHashPartitioner;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
* Options that can be passed to a {@code groupByKey} operation in order to
* exercise finer control over how the partitioning, grouping, and sorting of
* keys is performed.
*
*/
public class GroupingOptions implements Serializable {
private final Class<? extends Partitioner> partitionerClass;
private final Class<? extends RawComparator> groupingComparatorClass;
private final Class<? extends RawComparator> sortComparatorClass;
private final boolean requireSortedKeys;
private final int numReducers;
private final Map<String, String> extraConf;
private transient Set<SourceTarget<?>> sourceTargets;
private GroupingOptions(Class<? extends Partitioner> partitionerClass,
Class<? extends RawComparator> groupingComparatorClass, Class<? extends RawComparator> sortComparatorClass,
boolean requireSortedKeys, int numReducers,
Map<String, String> extraConf,
Set<SourceTarget<?>> sourceTargets) {
this.partitionerClass = partitionerClass;
this.groupingComparatorClass = groupingComparatorClass;
this.sortComparatorClass = sortComparatorClass;
this.requireSortedKeys = requireSortedKeys;
this.numReducers = numReducers;
this.extraConf = extraConf;
this.sourceTargets = sourceTargets;
}
public int getNumReducers() {
return numReducers;
}
public boolean requireSortedKeys() {
return requireSortedKeys;
}
public Class<? extends RawComparator> getSortComparatorClass() {
return sortComparatorClass;
}
public Class<? extends RawComparator> getGroupingComparatorClass() {
return groupingComparatorClass;
}
public Class<? extends Partitioner> getPartitionerClass() {
return partitionerClass;
}
public Set<SourceTarget<?>> getSourceTargets() {
return sourceTargets;
}
public void configure(Job job) {
if (partitionerClass != null) {
job.setPartitionerClass(partitionerClass);
}
if (groupingComparatorClass != null) {
job.setGroupingComparatorClass(groupingComparatorClass);
}
if (sortComparatorClass != null) {
job.setSortComparatorClass(sortComparatorClass);
}
if (numReducers > 0) {
job.setNumReduceTasks(numReducers);
}
for (Map.Entry<String, String> e : extraConf.entrySet()) {
job.getConfiguration().set(e.getKey(), e.getValue());
}
}
public boolean isCompatibleWith(GroupingOptions other) {
if (partitionerClass != other.partitionerClass) {
return false;
}
if (groupingComparatorClass != other.groupingComparatorClass) {
return false;
}
if (sortComparatorClass != other.sortComparatorClass) {
return false;
}
if (!extraConf.equals(other.extraConf)) {
return false;
}
return true;
}
public static Builder builder() {
return new Builder();
}
/**
* Builder class for creating {@code GroupingOptions} instances.
*
*/
public static class Builder {
private Class<? extends Partitioner> partitionerClass = UniformHashPartitioner.class;
private Class<? extends RawComparator> groupingComparatorClass;
private Class<? extends RawComparator> sortComparatorClass;
private boolean requireSortedKeys;
private int numReducers;
private Map<String, String> extraConf = Maps.newHashMap();
private Set<SourceTarget<?>> sourceTargets = Sets.newHashSet();
public Builder() {
}
public Builder partitionerClass(Class<? extends Partitioner> partitionerClass) {
this.partitionerClass = partitionerClass;
return this;
}
public Builder groupingComparatorClass(Class<? extends RawComparator> groupingComparatorClass) {
this.groupingComparatorClass = groupingComparatorClass;
return this;
}
public Builder sortComparatorClass(Class<? extends RawComparator> sortComparatorClass) {
this.sortComparatorClass = sortComparatorClass;
return this;
}
public Builder requireSortedKeys() {
requireSortedKeys = true;
return this;
}
public Builder numReducers(int numReducers) {
if (numReducers <= 0) {
throw new IllegalArgumentException("Invalid number of reducers: " + numReducers);
}
this.numReducers = numReducers;
return this;
}
public Builder conf(String confKey, String confValue) {
this.extraConf.put(confKey, confValue);
return this;
}
@Deprecated
public Builder sourceTarget(SourceTarget<?> st) {
this.sourceTargets.add(st);
return this;
}
public Builder sourceTargets(SourceTarget<?>... st) {
Collections.addAll(this.sourceTargets, st);
return this;
}
public Builder sourceTargets(Collection<SourceTarget<?>> st) {
this.sourceTargets.addAll(st);
return this;
}
public GroupingOptions build() {
return new GroupingOptions(partitionerClass, groupingComparatorClass, sortComparatorClass,
requireSortedKeys, numReducers, extraConf, sourceTargets);
}
}
}
| 2,625 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Union.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* Allows us to represent the combination of multiple data sources that may contain different types of data
* as a single type with an index to indicate which of the original sources the current record was from.
*/
public class Union {
private final int index;
private final Object value;
public Union(int index, Object value) {
this.index = index;
this.value = value;
}
/**
* Returns the index of the original data source for this union type.
*/
public int getIndex() {
return index;
}
/**
* Returns the underlying object value of the record.
*/
public Object getValue() {
return value;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Union that = (Union) o;
if (index != that.index) return false;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
}
@Override
public int hashCode() {
return 31 * index + (value != null ? value.hashCode() : 0);
}
}
| 2,626 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Pipeline.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import java.util.List;
/**
* Manages the state of a pipeline execution.
*
*/
public interface Pipeline {
/**
* Set the {@code Configuration} to use with this pipeline.
*/
void setConfiguration(Configuration conf);
/**
* Returns the name of this pipeline.
*
* @return Name of the pipeline
*/
String getName();
/**
* Returns the {@code Configuration} instance associated with this pipeline.
*/
Configuration getConfiguration();
/**
* Converts the given {@code Source} into a {@code PCollection} that is
* available to jobs run using this {@code Pipeline} instance.
*
* @param source
* The source of data
* @return A PCollection that references the given source
*/
<T> PCollection<T> read(Source<T> source);
/**
* Converts the given {@code Source} into a {@code PCollection} that is
* available to jobs run using this {@code Pipeline} instance.
*
* @param source The source of data
* @param named A name for the returned PCollection
* @return A PCollection that references the given source
*/
<T> PCollection<T> read(Source<T> source, String named);
/**
* A version of the read method for {@code TableSource} instances that map to
* {@code PTable}s.
*
* @param tableSource
* The source of the data
* @return A PTable that references the given source
*/
<K, V> PTable<K, V> read(TableSource<K, V> tableSource);
/**
* A version of the read method for {@code TableSource} instances that map to
* {@code PTable}s.
*
* @param tableSource The source of the data
* @param named A name for the returned PTable
* @return A PTable that references the given source
*/
<K, V> PTable<K, V> read(TableSource<K, V> tableSource, String named);
/**
* Write the given collection to the given target on the next pipeline run. The
* system will check to see if the target's location already exists using the
* {@code WriteMode.DEFAULT} rule for the given {@code Target}.
*
* @param collection
* The collection
* @param target
* The output target
*/
void write(PCollection<?> collection, Target target);
/**
* Write the contents of the {@code PCollection} to the given {@code Target},
* using the storage format specified by the target and the given
* {@code WriteMode} for cases where the referenced {@code Target}
* already exists.
*
* @param collection
* The collection
* @param target
* The target to write to
* @param writeMode
* The strategy to use for handling existing outputs
*/
void write(PCollection<?> collection, Target target,
Target.WriteMode writeMode);
/**
* Create the given PCollection and read the data it contains into the
* returned Collection instance for client use.
*
* @param pcollection
* The PCollection to materialize
* @return the data from the PCollection as a read-only Collection
*/
<T> Iterable<T> materialize(PCollection<T> pcollection);
/**
* Caches the given PCollection so that it will be processed at most once
* during pipeline execution.
*
* @param pcollection The PCollection to cache
* @param options The options for how the cached data is stored
*/
<T> void cache(PCollection<T> pcollection, CachingOptions options);
/**
* Creates an empty {@code PCollection} of the given {@code PType}.
*
* @param ptype The PType of the empty PCollection
* @return A valid PCollection with no contents
*/
<T> PCollection<T> emptyPCollection(PType<T> ptype);
/**
* Creates an empty {@code PTable} of the given {@code PTable Type}.
*
* @param ptype The PTableType of the empty PTable
* @return A valid PTable with no contents
*/
<K, V> PTable<K, V> emptyPTable(PTableType<K, V> ptype);
/**
* Creates a {@code PCollection} containing the values found in the given {@code Iterable}
* using an implementation-specific distribution mechanism.
*
* @param contents The values the new PCollection will contain
* @param ptype The PType of the PCollection
* @return A PCollection that contains the given values
*/
<T> PCollection<T> create(Iterable<T> contents, PType<T> ptype);
/**
* Creates a {@code PCollection} containing the values found in the given {@code Iterable}
* using an implementation-specific distribution mechanism.
*
* @param contents The values the new PCollection will contain
* @param ptype The PType of the PCollection
* @param options Additional options, such as the name or desired parallelism of the PCollection
* @return A PCollection that contains the given values
*/
<T> PCollection<T> create(Iterable<T> contents, PType<T> ptype, CreateOptions options);
/**
* Creates a {@code PTable} containing the values found in the given {@code Iterable}
* using an implementation-specific distribution mechanism.
*
* @param contents The values the new PTable will contain
* @param ptype The PTableType of the PTable
* @return A PTable that contains the given values
*/
<K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype);
/**
* Creates a {@code PTable} containing the values found in the given {@code Iterable}
* using an implementation-specific distribution mechanism.
*
* @param contents The values the new PTable will contain
* @param ptype The PTableType of the PTable
* @param options Additional options, such as the name or desired parallelism of the PTable
* @return A PTable that contains the given values
*/
<K, V> PTable<K, V> create(Iterable<Pair<K, V>> contents, PTableType<K, V> ptype, CreateOptions options);
<S> PCollection<S> union(List<PCollection<S>> collections);
<K, V> PTable<K, V> unionTables(List<PTable<K, V>> tables);
/**
* Executes the given {@code PipelineCallable} on the client after the {@code Targets}
* that the PipelineCallable depends on (if any) have been created by other pipeline
* processing steps.
*
* @param pipelineCallable The sequential logic to execute
* @param <Output> The return type of the PipelineCallable
* @return The result of executing the PipelineCallable
*/
<Output> Output sequentialDo(PipelineCallable<Output> pipelineCallable);
/**
* Constructs and executes a series of MapReduce jobs in order to write data
* to the output targets.
*/
PipelineResult run();
/**
* Constructs and starts a series of MapReduce jobs in order ot write data to
* the output targets, but returns a {@code ListenableFuture} to allow clients to control
* job execution.
* @return
*/
PipelineExecution runAsync();
/**
* Run any remaining jobs required to generate outputs and then clean up any
* intermediate data files that were created in this run or previous calls to
* {@code run}.
*/
PipelineResult done();
/**
* Cleans up any artifacts created as a result of {@link #run() running} the pipeline.
* @param force forces the cleanup even if all targets of the pipeline have not been completed.
*/
void cleanup(boolean force);
/**
* A convenience method for reading a text file.
*/
PCollection<String> readTextFile(String pathName);
/**
* A convenience method for writing a text file.
*/
<T> void writeTextFile(PCollection<T> collection, String pathName);
/**
* Turn on debug logging for jobs that are run from this pipeline.
*/
void enableDebug();
}
| 2,627 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/TableSource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.crunch.types.PTableType;
/**
* The interface {@code Source} implementations that return a {@link PTable}.
*
*/
public interface TableSource<K, V> extends Source<Pair<K, V>> {
PTableType<K, V> getTableType();
}
| 2,628 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/ReadableData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import java.io.IOException;
import java.io.Serializable;
import java.util.Set;
/**
* Represents the contents of a data source that can be read on the cluster from within one
* of the tasks running as part of a Crunch pipeline.
*/
public interface ReadableData<T> extends Serializable {
/**
* @return Any {@code SourceTarget} instances that must exist before the data in
* this instance can be read. Used by the planner in sequencing job processing.
*/
Set<SourceTarget<?>> getSourceTargets();
/**
* Allows this instance to specify any additional configuration settings that may
* be needed by the job that it is launched in.
*
* @param conf The {@code Configuration} object for the job
*/
void configure(Configuration conf);
/**
* Read the data referenced by this instance within the given context.
*
* @param context The context of the task that is reading the data
* @return An iterable reference to the data in this instance
* @throws IOException If the data cannot be read
*/
Iterable<T> read(TaskInputOutputContext<?, ?, ?, ?> context) throws IOException;
}
| 2,629 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Pair.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.Serializable;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Ordering;
import org.apache.commons.lang.builder.HashCodeBuilder;
/**
* A convenience class for two-element {@link Tuple}s.
*/
public class Pair<K, V> implements Tuple, Comparable<Pair<K, V>>, Serializable {
private final K first;
private final V second;
public static <T, U> Pair<T, U> of(T first, U second) {
return new Pair<T, U>(first, second);
}
public Pair(K first, V second) {
this.first = first;
this.second = second;
}
public K first() {
return first;
}
public V second() {
return second;
}
public Object get(int index) {
switch (index) {
case 0:
return first;
case 1:
return second;
default:
throw new ArrayIndexOutOfBoundsException();
}
}
public int size() {
return 2;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(first).append(second).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Pair<?, ?> other = (Pair<?, ?>) obj;
return (first == other.first || (first != null && first.equals(other.first)))
&& (second == other.second || (second != null && second.equals(other.second)));
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("[");
sb.append(first).append(",").append(second).append("]");
return sb.toString();
}
private int cmp(Object lhs, Object rhs) {
if (lhs == rhs) {
return 0;
} else if (lhs != null && Comparable.class.isAssignableFrom(lhs.getClass())) {
return Ordering.natural().nullsLast().compare((Comparable) lhs, (Comparable) rhs);//(Comparable) lhs).compareTo(rhs);
}
if (lhs == null) {
return 1; // nulls last
}
if (rhs == null) {
return -1; // nulls last
}
if (lhs.equals(rhs)) {
return 0;
}
// Now we compare based on hash code. We already know that the two sides are not equal, so
// if the hash codes are equal, we just use arbitrary (but consistent) ordering
return ComparisonChain.start()
.compare(lhs.hashCode(), rhs.hashCode())
.compare(lhs, rhs, Ordering.arbitrary())
.result();
}
@Override
public int compareTo(Pair<K, V> o) {
int diff = cmp(first, o.first);
if (diff == 0) {
diff = cmp(second, o.second);
}
return diff;
}
}
| 2,630 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Aggregator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.Serializable;
import org.apache.hadoop.conf.Configuration;
/**
* Aggregate a sequence of values into a possibly smaller sequence of the same type.
*
* <p>In most cases, an Aggregator will turn multiple values into a single value,
* like creating a sum, finding the minimum or maximum, etc. In some cases
* (ie. finding the top K elements), an implementation may return more than
* one value. The {@link org.apache.crunch.fn.Aggregators} utility class contains
* factory methods for creating all kinds of pre-defined Aggregators that should
* cover the most common cases.</p>
*
* <p>Aggregator implementations should usually be <em>associative</em> and
* <em>commutative</em>, which makes their results deterministic. If your aggregation
* function isn't commutative, you can still use secondary sort to that effect.</p>
*
* <p>The lifecycle of an {@link Aggregator} always begins with you instantiating
* it and passing it to Crunch. When running your {@link Pipeline}, Crunch serializes
* the instance and deserializes it wherever it is needed on the cluster. This is how
* Crunch uses a deserialized instance:<p>
*
* <ol>
* <li>call {@link #initialize(Configuration)} once</li>
* <li>call {@link #reset()}
* <li>call {@link #update(Object)} multiple times until all values of a sequence
* have been aggregated</li>
* <li>call {@link #results()} to retrieve the aggregated result</li>
* <li>go back to step 2 until all sequences have been aggregated</li>
* </ol>
*
* @param <T> The value types to aggregate
*/
public interface Aggregator<T> extends Serializable {
/**
* Perform any setup of this instance that is required prior to processing
* inputs.
*
* @param conf Hadoop configuration
*/
void initialize(Configuration conf);
/**
* Clears the internal state of this Aggregator and prepares it for the
* values associated with the next key.
*
* Depending on what you aggregate, this typically means setting a variable
* to zero or clearing a list. Failing to do this will yield wrong results!
*/
void reset();
/**
* Incorporate the given value into the aggregate state maintained by this
* instance.
*
* @param value The value to add to the aggregated state
*/
void update(T value);
/**
* Returns the current aggregated state of this instance.
*/
Iterable<T> results();
}
| 2,631 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Tuple.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* A fixed-size collection of Objects, used in Crunch for representing joins
* between {@code PCollection}s.
*
*/
public interface Tuple {
/**
* Returns the Object at the given index.
*/
Object get(int index);
/**
* Returns the number of elements in this Tuple.
*/
int size();
}
| 2,632 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/DoFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.Serializable;
import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.base.Preconditions;
/**
* Base class for all data processing functions in Crunch.
*
* <p>
* Note that all {@code DoFn} instances implement {@link Serializable}, and thus
* all of their non-transient member variables must implement
* {@code Serializable} as well. If your DoFn depends on non-serializable
* classes for data processing, they may be declared as {@code transient} and
* initialized in the DoFn's {@code initialize} method.
*
*/
public abstract class DoFn<S, T> implements Serializable {
/** This will be null prior to being set in {@link #setContext(TaskInputOutputContext)}. */
@CheckForNull
private transient TaskInputOutputContext<?, ?, ?, ?> context;
/** This will be null prior to being set in {@link #setConfiguration(Configuration)}. */
@CheckForNull
private transient Configuration conf;
/**
* Configure this DoFn. Subclasses may override this method to modify the
* configuration of the Job that this DoFn instance belongs to.
*
* <p>
* Called during the job planning phase by the crunch-client.
* </p>
*
* @param conf
* The Configuration instance for the Job.
*/
public void configure(Configuration conf) {
}
/**
* Initialize this DoFn. This initialization will happen before the actual
* {@link #process(Object, Emitter)} is triggered. Subclasses may override
* this method to do appropriate initialization.
*
* <p>
* Called during the setup of the job instance this {@code DoFn} is associated
* with.
* </p>
*
*/
public void initialize() {
}
/**
* Processes the records from a {@link PCollection}.
*
* <br/>
* <br/>
* <b>Note:</b> Crunch can reuse a single input record object whose content
* changes on each {@link #process(Object, Emitter)} method call. This
* functionality is imposed by Hadoop's <a href=
* "http://hadoop.apache.org/common/docs/current/api/org/apache/hadoop/mapred/Reducer.html"
* >Reducer</a> implementation: <i>The framework will reuse the key and value
* objects that are passed into the reduce, therefore the application should
* clone the objects they want to keep a copy of.</i>
*
* @param input
* The input record.
* @param emitter
* The emitter to send the output to
*/
public abstract void process(S input, Emitter<T> emitter);
/**
* Called during the cleanup of the MapReduce job this {@code DoFn} is
* associated with. Subclasses may override this method to do appropriate
* cleanup.
*
* @param emitter
* The emitter that was used for output
*/
public void cleanup(Emitter<T> emitter) {
}
/**
* Called during setup to pass the {@link TaskInputOutputContext} to this
* {@code DoFn} instance. The specified {@code TaskInputOutputContext} must not be null.
*/
public void setContext(@Nonnull TaskInputOutputContext<?, ?, ?, ?> context) {
Preconditions.checkNotNull(context);
this.context = context;
}
/**
* Called during the setup of an initialized {@link org.apache.crunch.types.PType} that
* relies on this instance.
*
* @param conf
* The non-null configuration for the {@code PType} being initialized
*/
public void setConfiguration(@Nonnull Configuration conf) {
Preconditions.checkNotNull(conf);
this.conf = conf;
}
/**
* Returns an estimate of how applying this function to a {@link PCollection}
* will cause it to change in side. The optimizer uses these estimates to
* decide where to break up dependent MR jobs into separate Map and Reduce
* phases in order to minimize I/O.
*
* <p>
* Subclasses of {@code DoFn} that will substantially alter the size of the
* resulting {@code PCollection} should override this method.
*/
public float scaleFactor() {
return 0.99f;
}
/**
* By default, Crunch will do a defensive deep copy of the outputs of a
* DoFn when there are multiple downstream consumers of that item, in order to
* prevent the downstream functions from making concurrent modifications to
* data objects. This introduces some extra overhead in cases where you know
* that the downstream code is only reading the objects and not modifying it,
* so you can disable this feature by overriding this function to
* return {@code true}.
*/
public boolean disableDeepCopy() {
return false;
}
protected TaskInputOutputContext<?, ?, ?, ?> getContext() {
return context;
}
protected Configuration getConfiguration() {
if (conf != null) {
return conf;
} else if (context != null) {
return context.getConfiguration();
} else {
return null;
}
}
/**
* @deprecated The {@link Counter} class changed incompatibly between Hadoop 1 and 2
* (from a class to an interface) so user programs should avoid this method and use
* one of the <code>increment</code> methods instead, such as {@link #increment(Enum)}.
*/
@Deprecated
protected Counter getCounter(Enum<?> counterName) {
if (context == null) {
return null;
}
return context.getCounter(counterName);
}
/**
* @deprecated The {@link Counter} class changed incompatibly between Hadoop 1 and 2
* (from a class to an interface) so user programs should avoid this method and use
* one of the <code>increment</code> methods instead, such as {@link #increment(Enum)}.
*/
@Deprecated
protected Counter getCounter(String groupName, String counterName) {
if (context == null) {
return null;
}
return context.getCounter(groupName, counterName);
}
protected void increment(String groupName, String counterName) {
increment(groupName, counterName, 1);
}
protected void increment(String groupName, String counterName, long value) {
if (context != null) {
context.getCounter(groupName, counterName).increment(value);
}
}
protected void increment(Enum<?> counterName) {
increment(counterName, 1);
}
protected void increment(Enum<?> counterName, long value) {
if (context != null) {
context.getCounter(counterName).increment(value);
}
}
protected void progress() {
if (context != null) {
context.progress();
}
}
protected TaskAttemptID getTaskAttemptID() {
if (context == null) {
return null;
}
return context.getTaskAttemptID();
}
protected void setStatus(String status) {
if (context != null) {
context.setStatus(status);
}
}
protected String getStatus() {
if (context == null) {
return null;
}
return context.getStatus();
}
}
| 2,633 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/FilterFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.List;
import org.apache.crunch.fn.FilterFns;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.google.common.collect.ImmutableList;
/**
* A {@link DoFn} for the common case of filtering the members of a
* {@link PCollection} based on a boolean condition.
*/
public abstract class FilterFn<T> extends DoFn<T, T> {
/**
* If true, emit the given record.
*/
public abstract boolean accept(T input);
@Override
public void process(T input, Emitter<T> emitter) {
if (accept(input)) {
emitter.emit(input);
}
}
@Override
public final void cleanup(Emitter<T> emitter) {
cleanup();
}
/**
* Called during the cleanup of the MapReduce job this {@code FilterFn} is
* associated with. Subclasses may override this method to do appropriate
* cleanup.
*/
public void cleanup() {
}
@Override
public float scaleFactor() {
return 0.5f;
}
}
| 2,634 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PObject.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* A {@code PObject} represents a singleton object value that results from a distributed
* computation. Computation producing the value is deferred until
* {@link org.apache.crunch.PObject#getValue()} is called.
*
* @param <T> The type of value encapsulated by this {@code PObject}.
*/
public interface PObject<T> {
/**
* Gets the value associated with this {@code PObject}. Calling this method will trigger
* whatever computation is necessary to obtain the value and block until that computation
* succeeds.
*
* @return The value associated with this {@code PObject}.
*/
T getValue();
}
| 2,635 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PipelineExecution.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* A handle to allow clients to control a Crunch pipeline as it runs.
*
* This interface is thread-safe.
*/
public interface PipelineExecution extends ListenableFuture<PipelineResult> {
enum Status { READY, RUNNING, SUCCEEDED, FAILED, KILLED }
/** Returns the .dot file that allows a client to graph the Crunch execution plan for this
* pipeline.
*/
String getPlanDotFile();
/**
* Returns all .dot files that allows a client to graph the Crunch execution plan internals.
* Key is the name of the dot file and the value is the file itself
*/
Map<String, String> getNamedDotFiles();
/** Blocks until pipeline completes or the specified waiting time elapsed. */
void waitFor(long timeout, TimeUnit timeUnit) throws InterruptedException;
/** Blocks until pipeline completes, i.e. {@code SUCCEEDED}, {@code FAILED} or {@code KILLED}. */
void waitUntilDone() throws InterruptedException;
Status getStatus();
/** Retrieve the result of a pipeline if it has been completed, otherwise {@code null}. */
PipelineResult getResult();
/**
* Kills the pipeline if it is running, no-op otherwise.
*
* This method only delivers a kill signal to the pipeline, and does not guarantee the pipeline exits on return.
* To wait for completely exits, use {@link #waitUntilDone()} after this call.
*/
void kill() throws InterruptedException;
}
| 2,636 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PipelineCallable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.Callable;
/**
* A specialization of {@code Callable} that executes some sequential logic on the client machine as
* part of an overall Crunch pipeline in order to generate zero or more outputs, some of
* which may be {@code PCollection} instances that are processed by other jobs in the
* pipeline.
*
* <p>{@code PipelineCallable} is intended to be used to inject auxiliary logic into the control
* flow of a Crunch pipeline. This can be used for a number of purposes, such as importing or
* exporting data into a cluster using Apache Sqoop, executing a legacy MapReduce job
* or Pig/Hive script within a Crunch pipeline, or sending emails or status notifications
* about the status of a long-running pipeline during its execution.</p>
*
* <p>The Crunch planner needs to know three things about a {@code PipelineCallable} instance in order
* to manage it:
* <ol>
* <li>The {@code Target} and {@code PCollection} instances that must have been materialized
* before this instance is allowed to run. This information should be specified via the {@code dependsOn}
* methods of the class.</li>
* <li>What Outputs will be created after this instance is executed, if any. These outputs may be
* new {@code PCollection} instances that are used as inputs in other Crunch jobs. These outputs should
* be specified by the {@code getOutput(Pipeline)} method of the class, which will be executed immediately
* after this instance is registered with the {@link Pipeline#sequentialDo} method.</li>
* <li>The actual logic to execute when the dependent Targets and PCollections have been created in
* order to materialize the output data. This is defined in the {@code call} method of the class.</li>
* </ol>
* </p>
*
* <p>If a given PipelineCallable does not have any dependencies, it will be executed before any jobs are run
* by the planner. After that, the planner will keep track of when the dependencies of a given instance
* have been materialized, and then execute the instance as soon as they all exist. The Crunch planner
* uses a thread pool executor to run multiple {@code PipelineCallable} instances simultaneously, but you can
* indicate that an instance should be run by itself by overriding the {@code boolean runSingleThreaded()} method
* below to return true.</p>
*
* <p>The {@code call} method returns a {@code Status} to indicate whether it succeeded or failed. A failed
* instance, or any exceptions/errors thrown by the call method, will cause the overall Crunch pipeline containing
* this instance to fail.</p>
*
* <p>A number of helper methods for accessing the dependent Target/PCollection instances that this instance
* needs to exist, as well as the {@code Configuration} instance for the overall Pipeline execution, are available
* as protected methods in this class so that they may be accessed from implementations of {@code PipelineCallable}
* within the {@code call} method.
* </p>
* @param <Output> the output value returned by this instance (Void, PCollection, Pair<PCollection, PCollection>,
* etc.
*/
public abstract class PipelineCallable<Output> implements Callable<PipelineCallable.Status> {
private static final Logger LOG = LoggerFactory.getLogger(PipelineCallable.class);
public enum Status { SUCCESS, FAILURE };
private String name;
private String message;
private Map<String, Target> namedTargets = Maps.newHashMap();
private Map<String, PCollection<?>> namedPCollections = Maps.newHashMap();
private Configuration conf;
private boolean outputsGenerated = false;
/**
* Clients should override this method to define the outputs that will exist after this instance is
* executed. These may be PCollections, PObjects, or nothing (which can be indicated with Java's {@code Void}
* type and a null value.
*
* @param pipeline The pipeline that is managing the execution of this instance
*/
protected abstract Output getOutput(Pipeline pipeline);
/**
* Override this method to indicate to the planner that this instance should not be run at the
* same time as any other {@code PipelineCallable} instances.
*
* @return true if this instance should run by itself, false otherwise
*/
public boolean runSingleThreaded() {
return false;
}
/**
* Requires that the given {@code Target} exists before this instance may be
* executed.
*
* @param label A string that can be used to retrieve the given Target inside of the {@code call} method.
* @param t the {@code Target} itself
* @return this instance
*/
public PipelineCallable<Output> dependsOn(String label, Target t) {
Preconditions.checkNotNull(label, "label");
if (outputsGenerated) {
throw new IllegalStateException(
"Dependencies may not be added to a PipelineCallable after its outputs have been generated");
}
if (namedTargets.containsKey(label)) {
throw new IllegalStateException("Label " + label + " cannot be reused for multiple targets");
}
this.namedTargets.put(label, t);
return this;
}
/**
* Requires that the given {@code PCollection} be materialized to disk before this instance may be
* executed.
*
* @param label A string that can be used to retrieve the given PCollection inside of the {@code call} method.
* @param pcollect the {@code PCollection} itself
* @return this instance
*/
public PipelineCallable<Output> dependsOn(String label, PCollection<?> pcollect) {
Preconditions.checkNotNull(label, "label");
if (outputsGenerated) {
throw new IllegalStateException(
"Dependencies may not be added to a PipelineCallable after its outputs have been generated");
}
if (namedPCollections.containsKey(label)) {
throw new IllegalStateException("Label " + label + " cannot be reused for multiple PCollections");
}
this.namedPCollections.put(label, pcollect);
return this;
}
/**
* Called by the {@code Pipeline} when this instance is registered with {@code Pipeline#sequentialDo}. In general,
* clients should override the protected {@code getOutput(Pipeline)} method instead of this one.
*/
public Output generateOutput(Pipeline pipeline) {
if (outputsGenerated == true) {
throw new IllegalStateException("PipelineCallable.generateOutput should only be called once");
}
outputsGenerated = true;
this.conf = pipeline.getConfiguration();
return getOutput(pipeline);
}
/**
* Returns the name of this instance.
*/
public String getName() {
return name == null ? this.getClass().getName() : name;
}
/**
* Use the given name to identify this instance in the logs.
*/
public PipelineCallable<Output> named(String name) {
this.name = name;
return this;
}
/**
* Returns a message associated with this callable's execution, especially in case of errors.
*/
public String getMessage() {
if (message == null) {
LOG.warn("No message specified for PipelineCallable instance \"{}\". Consider overriding PipelineCallable.getMessage()", getName());
return toString();
}
return message;
}
/**
* Sets a message associated with this callable's execution, especially in case of errors.
*/
public void setMessage(String message) {
this.message = message;
}
/**
* The {@code Configuration} instance for the {@code Pipeline} this callable is registered with.
*/
protected Configuration getConfiguration() {
return conf;
}
/**
* Returns the {@code Target} associated with the given label in the dependencies list,
* or null if no such target exists.
*/
protected Target getTarget(String label) {
return namedTargets.get(label);
}
/**
* Returns the {@code PCollection} associated with the given label in the dependencies list,
* or null if no such instance exists.
*/
protected PCollection getPCollection(String label) {
return namedPCollections.get(label);
}
/**
* Returns the only PCollection this instance depends on. Only valid in the case that this callable
* has precisely one dependency.
*/
protected PCollection getOnlyPCollection() {
return Iterables.getOnlyElement(namedPCollections.values());
}
/**
* Returns the mapping of labels to PCollection dependencies for this instance.
*/
public Map<String, PCollection<?>> getAllPCollections() {
return ImmutableMap.copyOf(namedPCollections);
}
/**
* Returns the mapping of labels to Target dependencies for this instance.
*/
public Map<String, Target> getAllTargets() {
return ImmutableMap.copyOf(namedTargets);
}
}
| 2,637 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/CombineFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.Serializable;
import java.math.BigInteger;
import java.util.LinkedList;
import java.util.List;
import java.util.SortedSet;
import org.apache.crunch.fn.Aggregators;
import org.apache.crunch.util.Tuples;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* A special {@link DoFn} implementation that converts an {@link Iterable} of
* values into a single value. If a {@code CombineFn} instance is used on a
* {@link PGroupedTable}, the function will be applied to the output of the map
* stage before the data is passed to the reducer, which can improve the runtime
* of certain classes of jobs.
* <p>
* Note that the incoming {@code Iterable} can only be used to create an
* {@code Iterator} once. Calling {@link Iterable#iterator()} method a second
* time will throw an {@link IllegalStateException}.
*/
public abstract class CombineFn<S, T> extends DoFn<Pair<S, Iterable<T>>, Pair<S, T>> {
}
| 2,638 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/ParallelDoOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.hadoop.conf.Configuration;
/**
* Container class that includes optional information about a {@code parallelDo} operation
* applied to a {@code PCollection}. Primarily used within the Crunch framework
* itself for certain types of advanced processing operations, such as in-memory joins
* that require reading a file from the filesystem into a {@code DoFn}.
*/
public class ParallelDoOptions {
private final Set targets;
private final Map<String, String> extraConf;
private ParallelDoOptions(Set<Target> targets, Map<String, String> extraConf) {
this.targets = targets;
this.extraConf = extraConf;
}
@Deprecated
public Set<SourceTarget<?>> getSourceTargets() {
return (Set<SourceTarget<?>>) targets;
}
public Set<Target> getTargets() { return targets; }
/**
* Applies the key-value pairs that were associated with this instance to the given {@code Configuration}
* object. This is called just before the {@code configure} method on the {@code DoFn} corresponding to this
* instance is called, so it is possible for the {@code DoFn} to see (and possibly override) these settings.
*/
public void configure(Configuration conf) {
for (Map.Entry<String, String> e : extraConf.entrySet()) {
conf.set(e.getKey(), e.getValue());
}
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private Set<Target> targets;
private Map<String, String> extraConf;
public Builder() {
this.targets = Sets.newHashSet();
this.extraConf = Maps.newHashMap();
}
public Builder sources(Source<?>... sources) {
return sources(Arrays.asList(sources));
}
public Builder sources(Collection<Source<?>> sources) {
for (Source<?> src : sources) {
// Only SourceTargets need to be checked for materialization
if (src instanceof SourceTarget) {
targets.add((SourceTarget) src);
}
}
return this;
}
public Builder sourceTargets(SourceTarget<?>... sourceTargets) {
Collections.addAll(this.targets, sourceTargets);
return this;
}
public Builder sourceTargets(Collection<SourceTarget<?>> sourceTargets) {
this.targets.addAll(sourceTargets);
return this;
}
public Builder targets(Target... targets) {
Collections.addAll(this.targets, targets);
return this;
}
public Builder targets(Collection<Target> targets) {
this.targets.addAll(targets);
return this;
}
/**
* Specifies key-value pairs that should be added to the {@code Configuration} object associated with the
* {@code Job} that includes these options.
* @param confKey The key
* @param confValue The value
* @return This {@code Builder} instance
*/
public Builder conf(String confKey, String confValue) {
this.extraConf.put(confKey, confValue);
return this;
}
public ParallelDoOptions build() {
return new ParallelDoOptions(targets, extraConf);
}
}
}
| 2,639 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/CrunchRuntimeException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* A {@code RuntimeException} implementation that includes some additional options
* for the Crunch execution engine to track reporting status. Clients may
* use instances of this class in their own {@code DoFn} implementations.
*/
public class CrunchRuntimeException extends RuntimeException {
private boolean logged = false;
public CrunchRuntimeException(String msg) {
super(msg);
}
public CrunchRuntimeException(Exception e) {
super(e);
}
public CrunchRuntimeException(String msg, Exception e) {
super(msg, e);
}
/**
* Returns true if this exception was written to the debug logs.
*/
public boolean wasLogged() {
return logged;
}
/**
* Indicate that this exception has been written to the debug logs.
*/
public void markLogged() {
this.logged = true;
}
}
| 2,640 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Tuple4.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Collection;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
/**
* A convenience class for four-element {@link Tuple}s.
*/
public class Tuple4<V1, V2, V3, V4> implements Tuple {
public static class Collect<V1, V2, V3, V4> extends Tuple4<
Collection<V1>,
Collection<V2>,
Collection<V3>,
Collection<V4>> {
public static <V1, V2, V3, V4> PType<Tuple4.Collect<V1, V2, V3, V4>> derived(PType<V1> first,
PType<V2> second, PType<V3> third, PType<V4> fourth) {
PTypeFamily tf = first.getFamily();
PType<Tuple4<Collection<V1>, Collection<V2>, Collection<V3>, Collection<V4>>> pt =
tf.quads(
tf.collections(first),
tf.collections(second),
tf.collections(third),
tf.collections(fourth));
Object clazz = Tuple4.Collect.class;
return tf.derived((Class<Tuple4.Collect<V1, V2, V3, V4>>) clazz,
new MapFn<Tuple4<Collection<V1>, Collection<V2>, Collection<V3>, Collection<V4>>,
Collect<V1, V2, V3, V4>>() {
@Override
public Collect<V1, V2, V3, V4> map(
Tuple4<Collection<V1>, Collection<V2>, Collection<V3>, Collection<V4>> in) {
return new Collect<V1, V2, V3, V4>(in.first(), in.second(), in.third(), in.fourth());
}
},
new MapFn<Collect<V1, V2, V3, V4>, Tuple4<Collection<V1>, Collection<V2>, Collection<V3>, Collection<V4>>>() {
@Override
public Tuple4<Collection<V1>, Collection<V2>, Collection<V3>, Collection<V4>> map(
Collect<V1, V2, V3, V4> input) {
return input;
}
}, pt);
}
public Collect(Collection<V1> first, Collection<V2> second, Collection<V3> third, Collection<V4> fourth) {
super(first, second, third, fourth);
}
}
private final V1 first;
private final V2 second;
private final V3 third;
private final V4 fourth;
public static <A, B, C, D> Tuple4<A, B, C, D> of(A a, B b, C c, D d) {
return new Tuple4<A, B, C, D>(a, b, c, d);
}
public Tuple4(V1 first, V2 second, V3 third, V4 fourth) {
this.first = first;
this.second = second;
this.third = third;
this.fourth = fourth;
}
public V1 first() {
return first;
}
public V2 second() {
return second;
}
public V3 third() {
return third;
}
public V4 fourth() {
return fourth;
}
public Object get(int index) {
switch (index) {
case 0:
return first;
case 1:
return second;
case 2:
return third;
case 3:
return fourth;
default:
throw new ArrayIndexOutOfBoundsException();
}
}
public int size() {
return 4;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(first).append(second).append(third).append(fourth).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Tuple4<?, ?, ?, ?> other = (Tuple4<?, ?, ?, ?>) obj;
return (first == other.first || (first != null && first.equals(other.first)))
&& (second == other.second || (second != null && second.equals(other.second)))
&& (third == other.third || (third != null && third.equals(other.third)))
&& (fourth == other.fourth || (fourth != null && fourth.equals(other.fourth)));
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Tuple4[");
sb.append(first).append(",").append(second).append(",").append(third);
return sb.append(",").append(fourth).append("]").toString();
}
}
| 2,641 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/TableSourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* An interface for classes that implement both the {@code TableSource} and the
* {@code Target} interfaces.
*/
public interface TableSourceTarget<K, V> extends TableSource<K, V>, SourceTarget<Pair<K, V>> {
}
| 2,642 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/CreateOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.base.Preconditions;
/**
* Additional options that can be specified when creating a new PCollection using {@link Pipeline#create}.
*/
public class CreateOptions {
public static CreateOptions none() {
return new CreateOptions("CREATED", 1);
}
public static CreateOptions parallelism(int parallelism) {
return new CreateOptions("CREATED", parallelism);
}
public static CreateOptions name(String name) {
return new CreateOptions(name, 1);
}
public static CreateOptions nameAndParallelism(String name, int parallelism) {
return new CreateOptions(name, parallelism);
}
private final String name;
private final int parallelism;
private CreateOptions(String name, int parallelism) {
this.name = Preconditions.checkNotNull(name);
Preconditions.checkArgument(parallelism > 0, "Invalid parallelism value = %d", parallelism);
this.parallelism = parallelism;
}
public String getName() {
return name;
}
public int getParallelism() {
return parallelism;
}
}
| 2,643 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PGroupedTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.crunch.Aggregator;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PType;
/**
* The Crunch representation of a grouped {@link PTable}, which corresponds to the output of
* the shuffle phase of a MapReduce job.
*/
public interface PGroupedTable<K, V> extends PCollection<Pair<K, Iterable<V>>> {
/**
* Combines the values of this grouping using the given {@code CombineFn}.
*
* @param combineFn
* The combiner function
* @return A {@code PTable} where each key has a single value
*/
PTable<K, V> combineValues(CombineFn<K, V> combineFn);
/**
* Combines and reduces the values of this grouping using the given {@code CombineFn} instances.
*
* @param combineFn
* The combiner function during the combine phase
* @param reduceFn
* The combiner function during the reduce phase
* @return A {@code PTable} where each key has a single value
*/
PTable<K, V> combineValues(CombineFn<K, V> combineFn, CombineFn<K, V> reduceFn);
/**
* Combine the values in each group using the given {@link Aggregator}.
*
* @param aggregator The function to use
* @return A {@link PTable} where each group key maps to an aggregated
* value. Group keys may be repeated if an aggregator returns
* more than one value.
*/
PTable<K, V> combineValues(Aggregator<V> aggregator);
/**
* Combine and reduces the values in each group using the given {@link Aggregator} instances.
*
* @param combineAggregator The aggregator to use during the combine phase
* @param reduceAggregator The aggregator to use during the reduce phase
* @return A {@link PTable} where each group key maps to an aggregated
* value. Group keys may be repeated if an aggregator returns
* more than one value.
*/
PTable<K, V> combineValues(Aggregator<V> combineAggregator, Aggregator<V> reduceAggregator);
/**
* Maps the {@code Iterable<V>} elements of each record to a new type. Just like
* any {@code parallelDo} operation on a {@code PGroupedTable}, this may only be
* called once.
*
* @param mapFn The mapping function
* @param ptype The serialization information for the returned data
* @return A new {@code PTable} instance
*/
<U> PTable<K, U> mapValues(MapFn<Iterable<V>, U> mapFn, PType<U> ptype);
/**
* Maps the {@code Iterable<V>} elements of each record to a new type. Just like
* any {@code parallelDo} operation on a {@code PGroupedTable}, this may only be
* called once.
*
* @param name A name for this operation
* @param mapFn The mapping function
* @param ptype The serialization information for the returned data
* @return A new {@code PTable} instance
*/
<U> PTable<K, U> mapValues(String name, MapFn<Iterable<V>, U> mapFn, PType<U> ptype);
/**
* Convert this grouping back into a multimap.
*
* @return an ungrouped version of the data in this {@code PGroupedTable}.
*/
PTable<K, V> ungroup();
/**
* Return the {@code PGroupedTableType} containing serialization information for
* this {@code PGroupedTable}.
*/
PGroupedTableType<K, V> getGroupedTableType();
}
| 2,644 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/TupleN.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Arrays;
import org.apache.commons.lang.builder.HashCodeBuilder;
/**
* A {@link Tuple} instance for an arbitrary number of values.
*/
public class TupleN implements Tuple {
private final Object[] values;
public static TupleN of(Object... values) {
return new TupleN(values);
}
public TupleN(Object... values) {
this.values = new Object[values.length];
System.arraycopy(values, 0, this.values, 0, values.length);
}
public Object[] getValues() { return values; }
@Override
public Object get(int index) {
return values[index];
}
@Override
public int size() {
return values.length;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
for (Object v : values) {
hcb.append(v);
}
return hcb.toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TupleN other = (TupleN) obj;
return Arrays.equals(this.values, other.values);
}
@Override
public String toString() {
return Arrays.toString(values);
}
}
| 2,645 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Collection;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
/**
* A representation of an immutable, distributed collection of elements that is
* the fundamental target of computations in Crunch.
*
*/
public interface PCollection<S> {
/**
* Returns the {@code Pipeline} associated with this PCollection.
*/
Pipeline getPipeline();
/**
* Returns a {@code PCollection} instance that acts as the union of this
* {@code PCollection} and the given {@code PCollection}.
*/
PCollection<S> union(PCollection<S> other);
/**
* Returns a {@code PCollection} instance that acts as the union of this
* {@code PCollection} and the input {@code PCollection}s.
*/
PCollection<S> union(PCollection<S>... collections);
/**
* Applies the given doFn to the elements of this {@code PCollection} and
* returns a new {@code PCollection} that is the output of this processing.
*
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PType} of the resulting {@code PCollection}
* @return a new {@code PCollection}
*/
<T> PCollection<T> parallelDo(DoFn<S, T> doFn, PType<T> type);
/**
* Applies the given doFn to the elements of this {@code PCollection} and
* returns a new {@code PCollection} that is the output of this processing.
*
* @param name
* An identifier for this processing step, useful for debugging
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PType} of the resulting {@code PCollection}
* @return a new {@code PCollection}
*/
<T> PCollection<T> parallelDo(String name, DoFn<S, T> doFn, PType<T> type);
/**
* Applies the given doFn to the elements of this {@code PCollection} and
* returns a new {@code PCollection} that is the output of this processing.
*
* @param name
* An identifier for this processing step, useful for debugging
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PType} of the resulting {@code PCollection}
* @param options
* Optional information that is needed for certain pipeline operations
* @return a new {@code PCollection}
*/
<T> PCollection<T> parallelDo(String name, DoFn<S, T> doFn, PType<T> type,
ParallelDoOptions options);
/**
* Similar to the other {@code parallelDo} instance, but returns a
* {@code PTable} instance instead of a {@code PCollection}.
*
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PTableType} of the resulting {@code PTable}
* @return a new {@code PTable}
*/
<K, V> PTable<K, V> parallelDo(DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type);
/**
* Similar to the other {@code parallelDo} instance, but returns a
* {@code PTable} instance instead of a {@code PCollection}.
*
* @param name
* An identifier for this processing step
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PTableType} of the resulting {@code PTable}
* @return a new {@code PTable}
*/
<K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type);
/**
* Similar to the other {@code parallelDo} instance, but returns a
* {@code PTable} instance instead of a {@code PCollection}.
*
* @param name
* An identifier for this processing step
* @param doFn
* The {@code DoFn} to apply
* @param type
* The {@link PTableType} of the resulting {@code PTable}
* @param options
* Optional information that is needed for certain pipeline operations
* @return a new {@code PTable}
*/
<K, V> PTable<K, V> parallelDo(String name, DoFn<S, Pair<K, V>> doFn, PTableType<K, V> type,
ParallelDoOptions options);
/**
* Write the contents of this {@code PCollection} to the given {@code Target},
* using the storage format specified by the target.
*
* @param target
* The target to write to
*/
PCollection<S> write(Target target);
/**
* Write the contents of this {@code PCollection} to the given {@code Target},
* using the given {@code Target.WriteMode} to handle existing
* targets.
*
* @param target
* The target
* @param writeMode
* The rule for handling existing outputs at the target location
*/
PCollection<S> write(Target target, Target.WriteMode writeMode);
/**
* Returns a reference to the data set represented by this PCollection that
* may be used by the client to read the data locally.
*/
Iterable<S> materialize();
/**
* Marks this data as cached using the default {@link CachingOptions}. Cached {@code PCollection}s will only
* be processed once, and then their contents will be saved so that downstream code can process them many times.
*
* @return this {@code PCollection} instance
*/
PCollection<S> cache();
/**
* Marks this data as cached using the given {@code CachingOptions}. Cached {@code PCollection}s will only
* be processed once and then their contents will be saved so that downstream code can process them many times.
*
* @param options the options that control the cache settings for the data
* @return this {@code PCollection} instance
*/
PCollection<S> cache(CachingOptions options);
/**
* @return A {@code PObject} encapsulating an in-memory {@link Collection} containing the values
* of this {@code PCollection}.
*/
PObject<Collection<S>> asCollection();
/**
* @return The first element of this {@code PCollection}.
*/
PObject<S> first();
/**
* Adds the materialized data in this {@code PCollection} as a dependency to the given
* {@code PipelineCallable} and registers it with the {@code Pipeline} associated with this
* instance.
*
* @param label the label to use inside of the PipelineCallable for referencing this PCollection
* @param pipelineCallable the function itself
*
* @return The value of the {@code getOutput} function on the given argument.
*/
<Output> Output sequentialDo(String label, PipelineCallable<Output> pipelineCallable);
/**
* @return A reference to the data in this instance that can be read from a job running
* on a cluster.
*
* @param materialize If true, materialize this data before returning a reference to it
*/
ReadableData<S> asReadable(boolean materialize);
/**
* Returns the {@code PType} of this {@code PCollection}.
*/
PType<S> getPType();
/**
* Returns the {@code PTypeFamily} of this {@code PCollection}.
*/
PTypeFamily getTypeFamily();
/**
* Returns the size of the data represented by this {@code PCollection} in
* bytes.
*/
long getSize();
/**
* Returns the number of elements represented by this {@code PCollection}.
*
* @return An {@code PObject} containing the number of elements in this {@code PCollection}.
*/
PObject<Long> length();
/**
* Returns a shorthand name for this PCollection.
*/
String getName();
/**
* Apply the given filter function to this instance and return the resulting
* {@code PCollection}.
*/
PCollection<S> filter(FilterFn<S> filterFn);
/**
* Apply the given filter function to this instance and return the resulting
* {@code PCollection}.
*
* @param name
* An identifier for this processing step
* @param filterFn
* The {@code FilterFn} to apply
*/
PCollection<S> filter(String name, FilterFn<S> filterFn);
/**
* Apply the given map function to each element of this instance in order to
* create a {@code PTable}.
*/
<K> PTable<K, S> by(MapFn<S, K> extractKeyFn, PType<K> keyType);
/**
* Apply the given map function to each element of this instance in order to
* create a {@code PTable}.
*
* @param name
* An identifier for this processing step
* @param extractKeyFn
* The {@code MapFn} to apply
*/
<K> PTable<K, S> by(String name, MapFn<S, K> extractKeyFn, PType<K> keyType);
/**
* Returns a {@code PTable} instance that contains the counts of each unique
* element of this PCollection.
*/
PTable<S, Long> count();
/**
* Returns a {@code PObject} of the maximum element of this instance.
*/
PObject<S> max();
/**
* Returns a {@code PObject} of the minimum element of this instance.
*/
PObject<S> min();
/**
* Returns a {@code PCollection} that contains the result of aggregating all values in this instance.
*/
PCollection<S> aggregate(Aggregator<S> aggregator);
}
| 2,646 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Source.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.io.IOException;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapreduce.Job;
/**
* A {@code Source} represents an input data set that is an input to one or more
* MapReduce jobs.
*
*/
public interface Source<T> {
/**
* Adds the given key-value pair to the {@code Configuration} instance that is used to read
* this {@code Source<T></T>}. Allows for multiple inputs to re-use the same config keys with
* different values when necessary.
*/
Source<T> inputConf(String key, String value);
/**
* Adds the {@code Configuration} of the given filesystem such that the source can read from it when the {@code
* Pipeline} itself does not have that configuration.
* </p>
* Changing the filesystem after it is set is not supported and will result in {@link
* IllegalStateException}
*
* @param fileSystem the filesystem
* @return this Source
* @throws IllegalStateException if the filesystem has already been set
* @throws IllegalArgumentException if the source is pointing to a fully qualified Path in a different FileSystem
*/
Source<T> fileSystem(FileSystem fileSystem);
/**
* Returns the {@code FileSystem} for this source or null if no explicit filesystem {@link #fileSystem(FileSystem)
* has been set}.
*/
FileSystem getFileSystem();
/**
* Returns the {@code PType} for this source.
*/
PType<T> getType();
/**
* Returns the {@code Converter} used for mapping the inputs from this instance
* into {@code PCollection} or {@code PTable} values.
*/
Converter<?, ?, ?, ?> getConverter();
/**
* Configure the given job to use this source as an input.
*
* @param job
* The job to configure
* @param inputId
* For a multi-input job, an identifier for this input to the job
* @throws IOException
*/
void configureSource(Job job, int inputId) throws IOException;
/**
* Returns the number of bytes in this {@code Source}.
*/
long getSize(Configuration configuration);
/**
* Returns the time (in milliseconds) that this {@code Source} was most recently
* modified (e.g., because an input file was edited or new files were added to
* a directory.)
*/
long getLastModifiedAt(Configuration configuration);
}
| 2,647 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/PipelineResult.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import java.util.List;
/**
* Container for the results of a call to {@code run} or {@code done} on the
* Pipeline interface that includes details and statistics about the component
* stages of the data pipeline.
*/
public class PipelineResult {
public static class StageResult {
private final String stageName;
private final String stageId;
private final Counters counters;
private final long startTimeMsec;
private final long jobStartTimeMsec;
private final long jobEndTimeMsec;
private final long endTimeMsec;
public StageResult(String stageName, Counters counters) {
this(stageName, counters, System.currentTimeMillis(), System.currentTimeMillis());
}
public StageResult(String stageName, Counters counters, long startTimeMsec, long endTimeMsec) {
this(stageName, stageName, counters, startTimeMsec, startTimeMsec, endTimeMsec, endTimeMsec);
}
public StageResult(String stageName, String stageId, Counters counters, long startTimeMsec,
long jobStartTimeMsec, long jobEndTimeMsec, long endTimeMsec) {
this.stageName = stageName;
this.stageId = stageId;
this.counters = counters;
this.startTimeMsec = startTimeMsec;
this.jobStartTimeMsec = jobStartTimeMsec;
this.jobEndTimeMsec = jobEndTimeMsec;
this.endTimeMsec = endTimeMsec;
}
public String getStageName() {
return stageName;
}
public String getStageId() {
return stageId;
}
/**
* @return the overall start time for this stage, that is, the time at which any pre-job hooks were
* started.
*/
public long getStartTimeMsec() {
return startTimeMsec;
}
/**
* @return the time that the work for this stage was submitted to the cluster for execution, if applicable.
*/
public long getJobStartTimeMsec() {
return jobStartTimeMsec;
}
/**
* @return the time that the work for this stage finished processing on the cluster, if applicable.
*/
public long getJobEndTimeMsec() {
return jobEndTimeMsec;
}
/**
* @return the overall end time for this stage, that is, the time at which any post-job hooks completed.
*/
public long getEndTimeMsec() {
return endTimeMsec;
}
/**
* @deprecated The {@link Counter} class changed incompatibly between Hadoop 1 and 2
* (from a class to an interface) so user programs should avoid this method and use
* {@link #getCounterNames()}.
*/
@Deprecated
public Counters getCounters() {
return counters;
}
/**
* @return a map of group names to counter names.
*/
public Map<String, Set<String>> getCounterNames() {
if (counters == null) {
return ImmutableMap.of();
}
Map<String, Set<String>> names = Maps.newHashMap();
for (CounterGroup counterGroup : counters) {
Set<String> counterNames = Sets.newHashSet();
for (Counter counter : counterGroup) {
counterNames.add(counter.getName());
}
names.put(counterGroup.getName(), counterNames);
}
return names;
}
/**
* @deprecated The {@link Counter} class changed incompatibly between Hadoop 1 and 2
* (from a class to an interface) so user programs should avoid this method and use
* {@link #getCounterValue(Enum)} and/or {@link #getCounterDisplayName(Enum)}.
*/
@Deprecated
public Counter findCounter(Enum<?> key) {
if (counters == null) {
return null;
}
return counters.findCounter(key);
}
public long getCounterValue(String groupName, String counterName) {
if (counters == null) {
return 0L;
}
return counters.findCounter(groupName, counterName).getValue();
}
public String getCounterDisplayName(String groupName, String counterName) {
if (counters == null) {
return null;
}
return counters.findCounter(groupName, counterName).getDisplayName();
}
public long getCounterValue(Enum<?> key) {
if (counters == null) {
return 0L;
}
return counters.findCounter(key).getValue();
}
public String getCounterDisplayName(Enum<?> key) {
if (counters == null) {
return null;
}
return counters.findCounter(key).getDisplayName();
}
}
public static final PipelineResult EMPTY = new PipelineResult(ImmutableList.<StageResult> of(), PipelineExecution.Status.READY);
public static final PipelineResult DONE = new PipelineResult(ImmutableList.<StageResult> of(), PipelineExecution.Status.SUCCEEDED);
private final List<StageResult> stageResults;
public PipelineExecution.Status status;
public PipelineResult(List<StageResult> stageResults, PipelineExecution.Status status) {
this.stageResults = ImmutableList.copyOf(stageResults);
this.status = status;
}
public boolean succeeded() {
// return !stageResults.isEmpty();
return this.status == PipelineExecution.Status.SUCCEEDED;
}
public List<StageResult> getStageResults() {
return stageResults;
}
}
| 2,648 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Target.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
/**
* A {@code Target} represents the output destination of a Crunch {@code PCollection}
* in the context of a Crunch job.
*/
public interface Target {
/**
* An enum to represent different options the client may specify
* for handling the case where the output path, table, etc. referenced
* by a {@code Target} already exists.
*/
enum WriteMode {
/**
* Check to see if the output target already exists before running
* the pipeline, and if it does, print an error and throw an exception.
*/
DEFAULT,
/**
* Check to see if the output target already exists, and if it does,
* delete it and overwrite it with the new output (if any).
*/
OVERWRITE,
/**
* If the output target does not exist, create it. If it does exist,
* add the output of this pipeline to the target. This was the
* behavior in Crunch up to version 0.4.0.
*/
APPEND,
/**
* If the output target exists and is newer than any of its source inputs, don't rewrite it,
* just start the pipeline from here. Only works with {@code SourceTarget} instances.
*/
CHECKPOINT
}
/**
* Adds the given key-value pair to the {@code Configuration} instance that is used to write
* this {@code Target}. Allows for multiple target outputs to re-use the same config keys with
* different values when necessary.
*/
Target outputConf(String key, String value);
/**
* Adds the {@code Configuration} of the given filesystem such that the target can write to it when the {@code
* Pipeline} itself does not have that configuration.
* </p>
* Changing the filesystem after it is set is not supported and will result in {@link
* IllegalStateException}
*
* @param fileSystem the filesystem
* @return this Target
* @throws IllegalStateException if the filesystem has already been set
* @throws IllegalArgumentException if the target is pointing to a fully qualified Path in a different FileSystem
*/
Target fileSystem(FileSystem fileSystem);
/**
* Returns the {@code FileSystem} for this target or null if no explicit filesystem {@link #fileSystem(FileSystem)
* has been set}.
*/
FileSystem getFileSystem();
/**
* Apply the given {@code WriteMode} to this {@code Target} instance.
*
* @param writeMode The strategy for handling existing outputs
* @param lastModifiedAt the time of the most recent modification to one of the source inputs for handling based
* on the provided {@code writeMode}, or -1 if not relevant for the provided {@code writeMode}
* @param conf The ever-useful {@code Configuration} instance
* @return true if the target did exist
*/
boolean handleExisting(WriteMode writeMode, long lastModifiedAt, Configuration conf);
/**
* Checks to see if this {@code Target} instance is compatible with the
* given {@code PType}.
*
* @param handler The {@link OutputHandler} that is managing the output for the job
* @param ptype The {@code PType} to check
* @return True if this Target can write data in the form of the given {@code PType},
* false otherwise
*/
boolean accept(OutputHandler handler, PType<?> ptype);
/**
* Returns the {@code Converter} to use for mapping from the output {@code PCollection}
* into the output values expected by this instance.
*
* @param ptype The {@code PType} of the data that is being written to this instance
* @return A valid {@code Converter} for the output represented by this instance
*/
Converter<?, ?, ?, ?> getConverter(PType<?> ptype);
/**
* Attempt to create the {@code SourceTarget} type that corresponds to this {@code Target}
* for the given {@code PType}, if possible. If it is not possible, return {@code null}.
*
* @param ptype The {@code PType} to use in constructing the {@code SourceTarget}
* @return A new {@code SourceTarget} or null if such a {@code SourceTarget} does not exist
*/
<T> SourceTarget<T> asSourceTarget(PType<T> ptype);
}
| 2,649 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Tuple3.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import java.util.Collection;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.types.PType;
import org.apache.crunch.types.PTypeFamily;
/**
* A convenience class for three-element {@link Tuple}s.
*/
public class Tuple3<V1, V2, V3> implements Tuple {
public static class Collect<V1, V2, V3> extends Tuple3<Collection<V1>, Collection<V2>, Collection<V3>> {
public static <V1, V2, V3> PType<Tuple3.Collect<V1, V2, V3>> derived(PType<V1> first,
PType<V2> second, PType<V3> third) {
PTypeFamily tf = first.getFamily();
PType<Tuple3<Collection<V1>, Collection<V2>, Collection<V3>>> pt =
tf.triples(
tf.collections(first),
tf.collections(second),
tf.collections(third));
Object clazz = Tuple3.Collect.class;
return tf.derived((Class<Tuple3.Collect<V1, V2, V3>>) clazz,
new MapFn<Tuple3<Collection<V1>, Collection<V2>, Collection<V3>>, Collect<V1, V2, V3>>() {
@Override
public Collect<V1, V2, V3> map(
Tuple3<Collection<V1>, Collection<V2>, Collection<V3>> in) {
return new Collect<V1, V2, V3>(in.first(), in.second(), in.third());
}
},
new MapFn<Collect<V1, V2, V3>, Tuple3<Collection<V1>, Collection<V2>, Collection<V3>>>() {
@Override
public Tuple3<Collection<V1>, Collection<V2>, Collection<V3>> map(
Collect<V1, V2, V3> in) {
return in;
}
}, pt);
}
public Collect(Collection<V1> first, Collection<V2> second, Collection<V3> third) {
super(first, second, third);
}
}
private final V1 first;
private final V2 second;
private final V3 third;
public static <A, B, C> Tuple3<A, B, C> of(A a, B b, C c) {
return new Tuple3<A, B, C>(a, b, c);
}
public Tuple3(V1 first, V2 second, V3 third) {
this.first = first;
this.second = second;
this.third = third;
}
public V1 first() {
return first;
}
public V2 second() {
return second;
}
public V3 third() {
return third;
}
public Object get(int index) {
switch (index) {
case 0:
return first;
case 1:
return second;
case 2:
return third;
default:
throw new ArrayIndexOutOfBoundsException();
}
}
public int size() {
return 3;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(first).append(second).append(third).toHashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Tuple3<?, ?, ?> other = (Tuple3<?, ?, ?>) obj;
return (first == other.first || (first != null && first.equals(other.first)))
&& (second == other.second || (second != null && second.equals(other.second)))
&& (third == other.third || (third != null && third.equals(other.third)));
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Tuple3[");
sb.append(first).append(",").append(second).append(",").append(third);
return sb.append("]").toString();
}
}
| 2,650 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/Emitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* Interface for writing outputs from a {@link DoFn}.
*
*/
public interface Emitter<T> {
/**
* Write the emitted value to the next stage of the pipeline.
*
* @param emitted
* The value to write
*/
void emit(T emitted);
/**
* Flushes any values cached by this emitter. Called during the cleanup stage.
*/
void flush();
}
| 2,651 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Client-facing API and core abstractions.
*
* @see <a href="http://crunch.apache.org/intro.html">Introduction to
* Apache Crunch</a>
*/
package org.apache.crunch;
| 2,652 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/MapFn.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
/**
* A {@link DoFn} for the common case of emitting exactly one value for each
* input record.
*
*/
public abstract class MapFn<S, T> extends DoFn<S, T> {
/**
* Maps the given input into an instance of the output type.
*/
public abstract T map(S input);
@Override
public void process(S input, Emitter<T> emitter) {
emitter.emit(map(input));
}
@Override
public float scaleFactor() {
return 1.0f;
}
}
| 2,653 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/SourceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import org.apache.hadoop.fs.FileSystem;
/**
* An interface for classes that implement both the {@code Source} and the
* {@code Target} interfaces.
*
*/
public interface SourceTarget<T> extends Source<T>, Target {
/**
* Adds the given key-value pair to the {@code Configuration} instance(s) that are used to
* read and write this {@code SourceTarget<T>}. Allows for multiple inputs and outputs to
* re-use the same config keys with different values when necessary.
*/
SourceTarget<T> conf(String key, String value);
/**
* Adds the {@code Configuration} of the given filesystem such that the source-target can read/write from/to it when
* the {@code Pipeline} itself does not have that configuration.
* </p>
* Changing the filesystem after it is set is not supported and will result in {@link
* IllegalStateException}
*
* @param fileSystem the filesystem
* @return this SourceTarget
* @throws IllegalStateException if the filesystem has already been set
* @throws IllegalArgumentException if the source/target is pointing to a fully qualified Path in a different
* FileSystem
*/
SourceTarget<T> fileSystem(FileSystem fileSystem);
}
| 2,654 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/CachingOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch;
import com.google.common.base.Preconditions;
/**
* Options for controlling how a {@code PCollection<T>} is cached for subsequent processing. Different pipeline
* execution frameworks may use some or all of these options when deciding how to cache a given {@code PCollection}
* depending on the implementation details of the framework.
*/
public class CachingOptions {
private final boolean useDisk;
private final boolean useMemory;
private final boolean deserialized;
private final int replicas;
private CachingOptions(
boolean useDisk,
boolean useMemory,
boolean deserialized,
int replicas) {
this.useDisk = useDisk;
this.useMemory = useMemory;
this.deserialized = deserialized;
this.replicas = replicas;
}
/**
* Whether the framework may cache data on disk.
*/
public boolean useDisk() {
return useDisk;
}
/**
* Whether the framework may cache data in memory without writing it to disk.
*/
public boolean useMemory() {
return useMemory;
}
/**
* Whether the data should remain deserialized in the cache, which trades off CPU processing time
* for additional storage overhead.
*/
public boolean deserialized() {
return deserialized;
}
/**
* Returns the number of replicas of the data that should be maintained in the cache.
*/
public int replicas() {
return replicas;
}
/**
* Creates a new {@link Builder} instance to use for specifying the caching options for a particular
* {@code PCollection<T>}.
* @return
*/
public static Builder builder() {
return new CachingOptions.Builder();
}
/**
* An instance of {@code CachingOptions} with the default caching settings.
*/
public static final CachingOptions DEFAULT = CachingOptions.builder().build();
/**
* A Builder class to use for setting the {@code CachingOptions} for a {@link PCollection}. The default
* settings are to keep a single replica of the data deserialized in memory, without writing to disk
* unless it is required due to resource limitations.
*/
public static class Builder {
private boolean useDisk = false;
private boolean useMemory = true;
private boolean deserialized = true;
private int replicas = 1;
public Builder() {}
public Builder useMemory(boolean useMemory) {
this.useMemory = useMemory;
return this;
}
public Builder useDisk(boolean useDisk) {
this.useDisk = useDisk;
return this;
}
public Builder deserialized(boolean deserialized) {
this.deserialized = deserialized;
return this;
}
public Builder replicas(int replicas) {
Preconditions.checkArgument(replicas > 0);
this.replicas = replicas;
return this;
}
public CachingOptions build() {
return new CachingOptions(useDisk, useMemory, deserialized, replicas);
}
}
}
| 2,655 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/SingleUseIterable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl;
import java.util.Iterator;
/**
* Wrapper around a Reducer's input Iterable. Ensures that the
* {@link #iterator()} method is not called more than once.
*/
public class SingleUseIterable<T> implements Iterable<T> {
private boolean used = false;
private Iterable<T> wrappedIterable;
/**
* Instantiate around an Iterable that may only be used once.
*
* @param toWrap iterable to wrap
*/
public SingleUseIterable(Iterable<T> toWrap) {
this.wrappedIterable = toWrap;
}
@Override
public Iterator<T> iterator() {
if (used) {
throw new IllegalStateException("iterator() can only be called once on this Iterable");
}
used = true;
return wrappedIterable.iterator();
}
}
| 2,656 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/MRPipelineExecution.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr;
import org.apache.crunch.PipelineExecution;
import java.util.List;
public interface MRPipelineExecution extends PipelineExecution {
List<MRJob> getJobs();
}
| 2,657 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/MRPipeline.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.PCollection;
import org.apache.crunch.PipelineExecution;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.hadoop.mapreduce.lib.jobcontrol.CrunchControlledJob;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.collect.MRCollectionFactory;
import org.apache.crunch.impl.mr.exec.MRExecutor;
import org.apache.crunch.impl.mr.plan.MSCRPlanner;
import org.apache.crunch.impl.mr.plan.PlanningParameters;
import org.apache.crunch.io.ReadableSource;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Pipeline implementation that is executed within Hadoop MapReduce.
*/
public class MRPipeline extends DistributedPipeline {
private static final Logger LOG = LoggerFactory.getLogger(MRPipeline.class);
private final Class<?> jarClass;
private final List<CrunchControlledJob.Hook> prepareHooks;
private final List<CrunchControlledJob.Hook> completionHooks;
/**
* Instantiate with a default Configuration and name.
*
* @param jarClass Class containing the main driver method for running the pipeline
*/
public MRPipeline(Class<?> jarClass) {
this(jarClass, new Configuration());
}
/**
* Instantiate with a custom pipeline name. The name will be displayed in the Hadoop JobTracker.
*
* @param jarClass Class containing the main driver method for running the pipeline
* @param name Display name of the pipeline
*/
public MRPipeline(Class<?> jarClass, String name) {
this(jarClass, name, new Configuration());
}
/**
* Instantiate with a custom configuration and default naming.
*
* @param jarClass Class containing the main driver method for running the pipeline
* @param conf Configuration to be used within all MapReduce jobs run in the pipeline
*/
public MRPipeline(Class<?> jarClass, Configuration conf) {
this(jarClass, jarClass.getName(), conf);
}
/**
* Instantiate with a custom name and configuration. The name will be displayed in the Hadoop
* JobTracker.
*
* @param jarClass Class containing the main driver method for running the pipeline
* @param name Display name of the pipeline
* @param conf Configuration to be used within all MapReduce jobs run in the pipeline
*/
public MRPipeline(Class<?> jarClass, String name, Configuration conf) {
super(name, conf, new MRCollectionFactory());
this.jarClass = jarClass;
this.prepareHooks = Lists.newArrayList();
this.completionHooks = Lists.newArrayList();
}
public MRPipeline addPrepareHook(CrunchControlledJob.Hook hook) {
this.prepareHooks.add(hook);
return this;
}
public List<CrunchControlledJob.Hook> getPrepareHooks() {
return prepareHooks;
}
public MRPipeline addCompletionHook(CrunchControlledJob.Hook hook) {
this.completionHooks.add(hook);
return this;
}
public List<CrunchControlledJob.Hook> getCompletionHooks() {
return completionHooks;
}
public MRExecutor plan() {
Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize = Maps.newHashMap();
for (PCollectionImpl<?> c : outputTargets.keySet()) {
if (outputTargetsToMaterialize.containsKey(c)) {
toMaterialize.put(c, outputTargetsToMaterialize.get(c));
outputTargetsToMaterialize.remove(c);
}
}
MSCRPlanner planner = new MSCRPlanner(this, outputTargets, toMaterialize, appendedTargets, allPipelineCallables);
try {
return planner.plan(jarClass, getConfiguration());
} catch (IOException e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public PipelineResult run() {
try {
PipelineExecution pipelineExecution = runAsync();
pipelineExecution.waitUntilDone();
return pipelineExecution.getResult();
} catch (InterruptedException e) {
// TODO: How to handle this without changing signature?
LOG.error("Exception running pipeline", e);
return PipelineResult.EMPTY;
}
}
@Override
public MRPipelineExecution runAsync() {
MRExecutor mrExecutor = plan();
for (Entry<String, String> dotEntry: mrExecutor.getNamedDotFiles().entrySet()){
writePlanDotFile(dotEntry.getKey(), dotEntry.getValue());
}
MRPipelineExecution res = mrExecutor.execute();
outputTargets.clear();
return res;
}
@Override
public <T> Iterable<T> materialize(PCollection<T> pcollection) {
((PCollectionImpl) pcollection).setBreakpoint();
ReadableSource<T> readableSrc = getMaterializeSourceTarget(pcollection);
MaterializableIterable<T> c = new MaterializableIterable<T>(this, readableSrc);
if (!outputTargetsToMaterialize.containsKey(pcollection)) {
outputTargetsToMaterialize.put((PCollectionImpl) pcollection, c);
}
return c;
}
@Override
public <T> void cache(PCollection<T> pcollection, CachingOptions options) {
// Identical to materialization in a MapReduce context
materialize(pcollection);
}
/**
* Writes the MR job plan dot file contents to a timestamped file if the PIPELINE_DOTFILE_OUTPUT_DIR
* config key is set with an output directory.
*
* @param dotFileContents contents to be written to the dot file
*/
private void writePlanDotFile(String fileName, String dotFileContents) {
String dotFileDir = getConfiguration().get(PlanningParameters.PIPELINE_DOTFILE_OUTPUT_DIR);
if (dotFileDir != null) {
FSDataOutputStream outputStream = null;
Exception thrownException = null;
try {
URI uri = new URI(dotFileDir);
FileSystem fs = FileSystem.get(uri, getConfiguration());
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd_HH.mm.ss.SSS");
String filenameSuffix = String.format("_%s_%s.dot", dateFormat.format(new Date()), fileName);
String encodedName = URLEncoder.encode(getName(), "UTF-8");
// We limit the pipeline name to the first 150 characters to keep the output dotfile length less
// than 200, as it's not clear what the exact limits are on the filesystem we're writing to (this
// might be HDFS or it might be a local filesystem)
final int maxPipeNameLength = 150;
String filenamePrefix = encodedName.substring(0, Math.min(maxPipeNameLength, encodedName.length()));
Path jobPlanPath = new Path(uri.getPath(), filenamePrefix + filenameSuffix);
LOG.info("Writing jobplan to {}", jobPlanPath);
outputStream = fs.create(jobPlanPath, true);
outputStream.write(dotFileContents.getBytes(Charsets.UTF_8));
} catch (URISyntaxException e) {
thrownException = e;
throw new CrunchRuntimeException("Invalid dot file dir URI, job plan will not be written: " + dotFileDir, e);
} catch (IOException e) {
thrownException = e;
throw new CrunchRuntimeException("Error writing dotfile contents to " + dotFileDir, e);
} catch (RuntimeException e) {
thrownException = e;
throw e;
} finally {
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException e) {
if (thrownException == null)
throw new CrunchRuntimeException("Error closing dotfile", e);
}
}
}
}
}
}
| 2,658 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/MRJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr;
import org.apache.hadoop.mapreduce.Job;
import java.util.List;
/**
* A Hadoop MapReduce job managed by Crunch.
*/
public interface MRJob {
/** A job will be in one of the following states. */
public static enum State {
SUCCESS, WAITING, RUNNING, READY, FAILED, DEPENDENT_FAILED
};
/** @return the Job ID assigned by Crunch */
int getJobID();
/** @return the internal Hadoop MapReduce job */
Job getJob();
/** @return the depending jobs of this job */
List<MRJob> getDependentJobs();
/** @return the state of this job */
State getJobState();
}
| 2,659 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A Pipeline implementation that runs on Hadoop MapReduce.
*/
package org.apache.crunch.impl.mr;
| 2,660 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/MRCollectionFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.PTable;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Source;
import org.apache.crunch.TableSource;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.BaseDoCollection;
import org.apache.crunch.impl.dist.collect.BaseDoTable;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.BaseInputTable;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.PCollectionFactory;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.collect.DoCollection;
import org.apache.crunch.impl.mr.collect.DoTable;
import org.apache.crunch.impl.mr.collect.InputCollection;
import org.apache.crunch.impl.mr.collect.InputTable;
import org.apache.crunch.impl.mr.collect.PGroupedTableImpl;
import org.apache.crunch.impl.mr.collect.UnionCollection;
import org.apache.crunch.impl.mr.collect.UnionTable;
import org.apache.crunch.types.PTableType;
import org.apache.crunch.types.PType;
import java.util.List;
public class MRCollectionFactory implements PCollectionFactory {
@Override
public <S> BaseInputCollection<S> createInputCollection(
Source<S> source,
String name,
DistributedPipeline pipeline,
ParallelDoOptions doOpts) {
return new InputCollection<S>(source, name, (MRPipeline) pipeline, doOpts);
}
@Override
public <K, V> BaseInputTable<K, V> createInputTable(
TableSource<K, V> source,
String name,
DistributedPipeline pipeline,
ParallelDoOptions doOpts) {
return new InputTable<K, V>(source, name, (MRPipeline) pipeline, doOpts);
}
@Override
public <S> BaseUnionCollection<S> createUnionCollection(List<? extends PCollectionImpl<S>> internal) {
return new UnionCollection<S>(internal);
}
@Override
public <S, T> BaseDoCollection<T> createDoCollection(
String name,
PCollectionImpl<S> parent,
DoFn<S, T> fn,
PType<T> type,
ParallelDoOptions options) {
return new DoCollection<T>(name, parent, fn, type, options);
}
@Override
public <S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> parent,
DoFn<S, Pair<K, V>> fn,
PTableType<K, V> type,
ParallelDoOptions options) {
return new DoTable<K, V>(name, parent, fn, type, options);
}
@Override
public <S, K, V> BaseDoTable<K, V> createDoTable(
String name,
PCollectionImpl<S> parent,
CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> reduceFn,
PTableType<K, V> type) {
return new DoTable<K, V>(name, parent, combineFn, reduceFn, type);
}
@Override
public <K, V> BaseGroupedTable<K, V> createGroupedTable(PTableBase<K, V> parent, GroupingOptions groupingOptions) {
return new PGroupedTableImpl<K, V>(parent, groupingOptions);
}
@Override
public <K, V> PTable<K, V> createUnionTable(List<PTableBase<K, V>> internal) {
return new UnionTable<K, V>(internal);
}
}
| 2,661 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/DoTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.CombineFn;
import org.apache.crunch.DoFn;
import org.apache.crunch.Pair;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.impl.dist.collect.BaseDoTable;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.plan.DoNode;
import org.apache.crunch.types.PTableType;
public class DoTable<K, V> extends BaseDoTable<K, V> implements MRCollection {
<S> DoTable(String name, PCollectionImpl<S> parent,
DoFn<S, Pair<K, V>> fn, PTableType<K, V> ntype, ParallelDoOptions options) {
super(name, parent, fn, ntype, options);
}
<S> DoTable(String name, PCollectionImpl<S> parent, CombineFn<K, V> combineFn,
DoFn<S, Pair<K, V>> fn, PTableType<K, V> ntype) {
super(name, parent, combineFn, fn, ntype);
}
@Override
public DoNode createDoNode() {
return DoNode.createFnNode(getName(), fn, type, doOptions);
}
public DoNode createCombineNode() {
return DoNode.createFnNode(getName(), combineFn, type, doOptions);
}
public boolean hasCombineFn() {
return combineFn != null;
}
}
| 2,662 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/UnionCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import java.util.List;
public class UnionCollection<S> extends BaseUnionCollection<S> {
UnionCollection(List<? extends PCollectionImpl<S>> collections) {
super(collections);
}
}
| 2,663 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/InputCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.ReadableData;
import org.apache.crunch.Source;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.plan.DoNode;
import org.apache.crunch.io.ReadableSource;
public class InputCollection<S> extends BaseInputCollection<S> implements MRCollection {
public InputCollection(Source<S> source, String name, MRPipeline pipeline, ParallelDoOptions doOpts) {
super(source, name, pipeline, doOpts);
}
@Override
protected ReadableData<S> getReadableDataInternal() {
if (source instanceof ReadableSource) {
return ((ReadableSource<S>) source).asReadable();
} else {
return materializedData();
}
}
@Override
public DoNode createDoNode() {
return DoNode.createInputNode(source);
}
}
| 2,664 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/PGroupedTableImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.GroupingOptions;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.dist.collect.PTableBase;
import org.apache.crunch.impl.mr.plan.DoNode;
import org.apache.crunch.util.PartitionUtils;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PGroupedTableImpl<K, V> extends BaseGroupedTable<K, V> implements MRCollection {
private static final Logger LOG = LoggerFactory.getLogger(PGroupedTableImpl.class);
PGroupedTableImpl(PTableBase<K, V> parent, GroupingOptions groupingOptions) {
super(parent, groupingOptions);
}
public void configureShuffle(Job job) {
ptype.configureShuffle(job, groupingOptions);
if (groupingOptions == null || groupingOptions.getNumReducers() <= 0) {
int numReduceTasks = PartitionUtils.getRecommendedPartitions(this, getPipeline().getConfiguration());
if (numReduceTasks > 0) {
job.setNumReduceTasks(numReduceTasks);
LOG.info("Setting num reduce tasks to {}", numReduceTasks);
} else {
LOG.warn("Attempted to set a negative number of reduce tasks");
}
}
}
@Override
public void accept(Visitor visitor) {
visitor.visitGroupedTable(this);
}
@Override
public DoNode createDoNode() {
return DoNode.createFnNode(getName(), ptype.getInputMapFn(), ptype, doOptions);
}
public DoNode getGroupingNode() {
return DoNode.createGroupingNode("", ptype);
}
public int getNumReduceTasks() {
int numReduceTasks;
if (groupingOptions == null || groupingOptions.getNumReducers() <= 0) {
numReduceTasks = PartitionUtils.getRecommendedPartitions(this, getPipeline().getConfiguration());
} else {
numReduceTasks = groupingOptions.getNumReducers();
}
return numReduceTasks;
}
public boolean isNumReduceTasksSetByUser() {
return (groupingOptions != null && groupingOptions.getNumReducers() > 0);
}
}
| 2,665 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/DoCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.CachingOptions;
import org.apache.crunch.DoFn;
import org.apache.crunch.PCollection;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.impl.dist.collect.BaseDoCollection;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.plan.DoNode;
import org.apache.crunch.types.PType;
public class DoCollection<S> extends BaseDoCollection<S> implements MRCollection {
<T> DoCollection(
String name,
PCollectionImpl<T> parent,
DoFn<T, S> fn,
PType<S> ptype,
ParallelDoOptions options) {
super(name, parent, fn, ptype, options);
}
@Override
public DoNode createDoNode() {
return DoNode.createFnNode(getName(), fn, ptype, doOptions);
}
}
| 2,666 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/InputTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.TableSource;
import org.apache.crunch.impl.dist.collect.BaseInputTable;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.plan.DoNode;
public class InputTable<K, V> extends BaseInputTable<K, V> implements MRCollection {
public InputTable(TableSource<K, V> source, String name, MRPipeline pipeline, ParallelDoOptions doOpts) {
super(source, name, pipeline, doOpts);
}
@Override
public DoNode createDoNode() {
return DoNode.createInputNode(source);
}
}
| 2,667 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/collect/UnionTable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.collect;
import org.apache.crunch.impl.dist.collect.BaseUnionTable;
import org.apache.crunch.impl.dist.collect.PTableBase;
import java.util.List;
public class UnionTable<K, V> extends BaseUnionTable<K, V> {
UnionTable(List<PTableBase<K, V>> internal) {
super(internal);
}
}
| 2,668 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileUtills.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import org.apache.hadoop.conf.Configuration;
/**
* Helper class that manages the dotfile generation lifecycle and configuring the dotfile debug context.
*
* @deprecated use {@link DotfileUtil} instead
*/
public class DotfileUtills {
public static boolean isDebugDotfilesEnabled(Configuration conf) {
return DotfileUtil.isDebugDotfilesEnabled(conf);
}
public static void enableDebugDotfiles(Configuration conf) {
DotfileUtil.enableDebugDotfiles(conf);
}
public static void disableDebugDotfilesEnabled(Configuration conf) {
DotfileUtil.disableDebugDotfiles(conf);
}
public static void setPipelineDotfileOutputDir(Configuration conf, String outputDir) {
DotfileUtil.setPipelineDotfileOutputDir(conf, outputDir);
}
public static String getPipelineDotfileOutputDir(Configuration conf) {
return DotfileUtil.getPipelineDotfileOutputDir(conf);
}
}
| 2,669 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/NodePath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.Iterator;
import java.util.LinkedList;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import com.google.common.collect.Lists;
class NodePath implements Iterable<PCollectionImpl<?>> {
private LinkedList<PCollectionImpl<?>> path;
public NodePath() {
this.path = Lists.newLinkedList();
}
public NodePath(PCollectionImpl<?> tail) {
this.path = Lists.newLinkedList();
this.path.add(tail);
}
public NodePath(NodePath other) {
this.path = Lists.newLinkedList(other.path);
}
public void push(PCollectionImpl<?> stage) {
this.path.push(stage);
}
public NodePath close(PCollectionImpl<?> head) {
this.path.push(head);
return this;
}
public Iterator<PCollectionImpl<?>> iterator() {
return path.iterator();
}
public Iterator<PCollectionImpl<?>> descendingIterator() {
return path.descendingIterator();
}
public PCollectionImpl<?> get(int index) {
return path.get(index);
}
public PCollectionImpl<?> head() {
return path.peekFirst();
}
public PCollectionImpl<?> tail() {
return path.peekLast();
}
@Override
public boolean equals(Object other) {
if (other == null || !(other instanceof NodePath)) {
return false;
}
NodePath nodePath = (NodePath) other;
return path.equals(nodePath.path);
}
@Override
public int hashCode() {
return 17 + 37 * path.hashCode();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (PCollectionImpl<?> collect : path) {
sb.append(collect.getName() + "|");
}
sb.deleteCharAt(sb.length() - 1);
return sb.toString();
}
public NodePath splitAt(int splitIndex, PCollectionImpl<?> newHead) {
NodePath top = new NodePath();
for (int i = 0; i <= splitIndex; i++) {
top.path.add(path.get(i));
}
LinkedList<PCollectionImpl<?>> nextPath = Lists.newLinkedList();
nextPath.add(newHead);
nextPath.addAll(path.subList(splitIndex + 1, path.size()));
path = nextPath;
return top;
}
public NodePath splitAt(PCollectionImpl split, PCollectionImpl<?> newHead) {
NodePath top = new NodePath();
int splitIndex = 0;
for (PCollectionImpl p : path) {
top.path.add(p);
if (p == split) {
break;
}
splitIndex++;
}
LinkedList<PCollectionImpl<?>> nextPath = Lists.newLinkedList();
nextPath.add(newHead);
nextPath.addAll(path.subList(splitIndex + 1, path.size()));
path = nextPath;
return top;
}
}
| 2,670 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/CommonDotfileWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import static java.lang.String.format;
import static org.apache.commons.collections.CollectionUtils.isEmpty;
import java.util.ArrayList;
import org.apache.crunch.Source;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.types.PType;
import com.google.common.base.Joiner;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
/**
* Common Debug dotfile writer class. Provides the draw abstraction common for all debug dotfile writers.
*/
@SuppressWarnings({ "rawtypes" })
public abstract class CommonDotfileWriter {
protected static final String DEFAULT_FOLDER_COLOR = "darkGreen";
protected static final String[] COLORS = new String[] { "blue", "red", "green", "yellow", "cyan", "darkGray", "gray",
"magenta", "darkGreen", "black" };
protected StringBuilder edgeBuilder = null;
protected StringBuilder contentBuilder = null;
protected String label(String text) {
return text == null ? "-" : text;
}
protected String className(Object obj) {
if (obj == null) {
return "-";
}
Class clazz = null;
if (obj instanceof Class) {
clazz = (Class) obj;
} else {
clazz = obj.getClass();
}
String s = clazz.getName();
s = s.substring(s.lastIndexOf('.') + 1);
return s;
}
protected String getPCollectionID(PCollectionImpl<?> pcollectionImpl) {
return String.format("\"%s@%d\"", pcollectionImpl.getName(), pcollectionImpl.hashCode());
}
protected String formatPCollection(PCollectionImpl<?> pcollectionImpl) {
String withBreakpoint = pcollectionImpl.isBreakpoint() ? " [breakpoint]" : "";
return String.format("%s [label=\"{%s | %s | %s }\", shape=%s, color=%s];\n", getPCollectionID(pcollectionImpl),
pcollectionImpl.getName(), className(pcollectionImpl) + withBreakpoint,
formatPType(pcollectionImpl.getPType()), "record", "black");
}
protected String formatPType(PType ptype) {
StringBuilder sb = new StringBuilder();
sb.append(className(ptype.getTypeClass()));
if (!isEmpty(ptype.getSubTypes())) {
ArrayList<String> subtypes = Lists.newArrayList();
for (Object subType : ptype.getSubTypes()) {
if (subType instanceof PType) {
subtypes.add(formatPType((PType) subType));
} else {
subtypes.add(className(subType));
}
}
sb.append("[").append(Joiner.on(", ").join(subtypes)).append("]");
}
return sb.toString();
}
private String getSourceID(Source s) {
return "\"ST@" + s + "\"";
}
private String getTargetID(Target t) {
return "\"ST@" + t + "\"";
}
protected void formatTarget(Target target, String color) {
contentBuilder.append(String.format("%s [label=\"%s\", shape=folder, color=\"%s\"];\n", getTargetID(target),
target.toString(), color));
}
protected void formatSource(Source source, String color) {
contentBuilder.append(String.format("%s [label=\"%s\", shape=folder, color=\"%s\"];\n", getSourceID(source),
source.toString(), color));
}
protected void link(String from, String to, String color) {
edgeBuilder.append(String.format("%s -> %s [color=\"%s\"];\n", from, to, color));
}
protected void link(PCollectionImpl pc, Target target, String color) {
link(getPCollectionID(pc), getTargetID(target), color);
}
protected void link(PCollectionImpl parent, PCollectionImpl child, String color) {
link(getPCollectionID(parent), getPCollectionID(child), color);
}
protected void link(Source source, PCollectionImpl pc, String color) {
link(getSourceID(source), getPCollectionID(pc), color);
}
public String buildDiagram(String diagramName) {
edgeBuilder = new StringBuilder();
contentBuilder = new StringBuilder();
contentBuilder.append("digraph G {\n");
contentBuilder.append(format(" label=\"%s \\n\\n\"; fontsize=24; labelloc=\"t\"; \n", diagramName));
contentBuilder.append(getLgentd());
try {
doBuildDiagram();
} catch (Throwable t) {
contentBuilder.append("\"" + Throwables.getRootCause(t) + "\"");
}
contentBuilder.append(edgeBuilder);
contentBuilder.append("}\n");
return contentBuilder.toString();
}
public String getLgentd() {
StringBuilder lsb = new StringBuilder();
lsb.append("subgraph \"cluster-legend-rtnodes\" {\n").append(
"label=\"LEGEND\" ; fontsize=10; style=filled; color=lightblue;\n");
doGetLegend(lsb);
lsb.append("}\n");
return lsb.toString();
}
protected abstract void doBuildDiagram();
protected abstract void doGetLegend(StringBuilder lsb);
}
| 2,671 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DoNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.List;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.crunch.DoFn;
import org.apache.crunch.ParallelDoOptions;
import org.apache.crunch.Source;
import org.apache.crunch.fn.IdentityFn;
import org.apache.crunch.impl.mr.run.NodeContext;
import org.apache.crunch.impl.mr.run.RTNode;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PGroupedTableType;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
public class DoNode {
private static final List<DoNode> NO_CHILDREN = ImmutableList.of();
private final DoFn fn;
private final String name;
private final PType<?> ptype;
private final List<DoNode> children;
private final Converter outputConverter;
private final Source<?> source;
private final ParallelDoOptions options;
private String outputName;
private DoNode(DoFn fn, String name, PType<?> ptype, List<DoNode> children, Converter outputConverter,
Source<?> source, ParallelDoOptions options) {
this.fn = fn;
this.name = name;
this.ptype = ptype;
this.children = children;
this.outputConverter = outputConverter;
this.source = source;
this.options = options;
}
private static List<DoNode> allowsChildren() {
return Lists.newArrayList();
}
public static <K, V> DoNode createGroupingNode(String name, PGroupedTableType<K, V> ptype) {
Converter groupingConverter = ptype.getGroupingConverter();
DoFn<?, ?> fn = groupingConverter.applyPTypeTransforms() ? ptype.getOutputMapFn() : IdentityFn.getInstance();
return new DoNode(fn, name, ptype, NO_CHILDREN, ptype.getGroupingConverter(), null, null);
}
public static DoNode createOutputNode(String name, Converter outputConverter, PType<?> ptype) {
DoFn<?, ?> fn = outputConverter.applyPTypeTransforms() ? ptype.getOutputMapFn() : IdentityFn.getInstance();
return new DoNode(fn, name, ptype, NO_CHILDREN, outputConverter, null, null);
}
public static DoNode createFnNode(String name, DoFn<?, ?> function, PType<?> ptype, ParallelDoOptions options) {
return new DoNode(function, name, ptype, allowsChildren(), null, null, options);
}
public static <S> DoNode createInputNode(Source<S> source) {
Converter srcConverter = source.getConverter();
PType<?> ptype = source.getType();
DoFn<?, ?> fn = srcConverter.applyPTypeTransforms() ? ptype.getInputMapFn() : IdentityFn.getInstance();
return new DoNode(fn, source.toString(), ptype, allowsChildren(), null, source, null);
}
public boolean isOutputNode() {
return outputConverter != null;
}
public String getName() {
return name;
}
public List<DoNode> getChildren() {
return children;
}
public Source<?> getSource() {
return source;
}
public PType<?> getPType() {
return ptype;
}
public DoNode addChild(DoNode node) {
// TODO: This is sort of terrible, refactor the code to make this make more sense.
boolean exists = false;
for (DoNode child : children) {
if (node == child || (node.isOutputNode() && node.equals(child))) {
exists = true;
break;
}
}
if (!exists) {
children.add(node);
}
return this;
}
public void setOutputName(String outputName) {
if (outputConverter == null) {
throw new IllegalStateException("Cannot set output name w/o output converter: " + outputName);
}
this.outputName = outputName;
}
public RTNode toRTNode(boolean inputNode, Configuration conf, NodeContext nodeContext) {
List<RTNode> childRTNodes = Lists.newArrayList();
if (options != null) {
options.configure(conf);
}
fn.configure(conf);
for (DoNode child : children) {
childRTNodes.add(child.toRTNode(false, conf, nodeContext));
}
Converter inputConverter = null;
if (inputNode) {
if (nodeContext == NodeContext.MAP) {
inputConverter = source.getConverter();
} else {
inputConverter = ((PGroupedTableType<?, ?>) ptype).getGroupingConverter();
}
}
return new RTNode(fn, (PType<Object>) getPType(), name, childRTNodes, inputConverter, outputConverter, outputName);
}
@Override
public boolean equals(Object other) {
if (!(other instanceof DoNode)) {
return false;
}
if (this == other) {
return true;
}
DoNode o = (DoNode) other;
return name.equals(o.name) && fn.equals(o.fn) && source == o.source && outputConverter == o.outputConverter;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
return hcb.append(name).append(fn).append(source).append(outputConverter).toHashCode();
}
}
| 2,672 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/Graph.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.crunch.Pair;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
*
*/
class Graph implements Iterable<Vertex> {
private final Map<PCollectionImpl, Vertex> vertices;
private final Map<Pair<Vertex, Vertex>, Edge> edges;
private final Map<Vertex, List<Vertex>> dependencies;
public Graph() {
this.vertices = Maps.newHashMap();
this.edges = Maps.newHashMap();
this.dependencies = Maps.newHashMap();
}
public Vertex getVertexAt(PCollectionImpl impl) {
return vertices.get(impl);
}
public Vertex addVertex(PCollectionImpl impl, boolean output) {
if (vertices.containsKey(impl)) {
Vertex v = vertices.get(impl);
if (output) {
v.setOutput();
}
return v;
}
Vertex v = new Vertex(impl);
vertices.put(impl, v);
if (output) {
v.setOutput();
}
return v;
}
public Edge getEdge(Vertex head, Vertex tail) {
Pair<Vertex, Vertex> p = Pair.of(head, tail);
if (edges.containsKey(p)) {
return edges.get(p);
}
Edge e = new Edge(head, tail);
edges.put(p, e);
tail.addIncoming(e);
head.addOutgoing(e);
return e;
}
@Override
public Iterator<Vertex> iterator() {
return Sets.newHashSet(vertices.values()).iterator();
}
public Set<Edge> getAllEdges() {
return Sets.newHashSet(edges.values());
}
public void markDependency(Vertex child, Vertex parent) {
List<Vertex> parents = dependencies.get(child);
if (parents == null) {
parents = Lists.newArrayList();
dependencies.put(child, parents);
}
parents.add(parent);
}
public List<Vertex> getParents(Vertex child) {
if (dependencies.containsKey(child)) {
return dependencies.get(child);
}
return ImmutableList.of();
}
public List<List<Vertex>> connectedComponents() {
List<List<Vertex>> components = Lists.newArrayList();
Set<Vertex> unassigned = Sets.newHashSet(vertices.values());
while (!unassigned.isEmpty()) {
Vertex base = unassigned.iterator().next();
List<Vertex> component = Lists.newArrayList();
component.add(base);
unassigned.remove(base);
Set<Vertex> working = Sets.newHashSet(base.getAllNeighbors());
while (!working.isEmpty()) {
Vertex n = working.iterator().next();
working.remove(n);
if (unassigned.contains(n)) {
component.add(n);
unassigned.remove(n);
for (Vertex n2 : n.getAllNeighbors()) {
if (unassigned.contains(n2)) {
working.add(n2);
}
}
}
}
components.add(component);
}
return components;
}
}
| 2,673 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/MSCRPlanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.Source;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.collect.InputCollection;
import org.apache.crunch.impl.mr.collect.PGroupedTableImpl;
import org.apache.crunch.impl.mr.exec.MRExecutor;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
public class MSCRPlanner {
private static final Logger LOG = LoggerFactory.getLogger(MSCRPlanner.class);
private final MRPipeline pipeline;
private final Map<PCollectionImpl<?>, Set<Target>> outputs;
private final Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize;
private final Set<Target> appendedTargets;
private final Map<PipelineCallable<?>, Set<Target>> pipelineCallables;
private int lastJobID = 0;
public MSCRPlanner(MRPipeline pipeline, Map<PCollectionImpl<?>, Set<Target>> outputs,
Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize,
Set<Target> appendedTargets,
Map<PipelineCallable<?>, Set<Target>> pipelineCallables) {
this.pipeline = pipeline;
this.outputs = new TreeMap<PCollectionImpl<?>, Set<Target>>(DEPTH_COMPARATOR);
this.outputs.putAll(outputs);
this.toMaterialize = toMaterialize;
this.appendedTargets = appendedTargets;
this.pipelineCallables = pipelineCallables;
}
// Used to ensure that we always build pipelines starting from the deepest
// outputs, which helps ensure that we handle intermediate outputs correctly.
static final Comparator<PCollectionImpl<?>> DEPTH_COMPARATOR = new Comparator<PCollectionImpl<?>>() {
@Override
public int compare(PCollectionImpl<?> left, PCollectionImpl<?> right) {
int cmp = right.getDepth() - left.getDepth();
if (cmp == 0) {
// Ensure we don't throw away two output collections at the same depth.
// Using the collection name would be nicer here, but names aren't
// necessarily unique.
cmp = new Integer(right.hashCode()).compareTo(left.hashCode());
}
return cmp;
}
};
public MRExecutor plan(Class<?> jarClass, Configuration conf) throws IOException {
DotfileUtil dotfileUtil = new DotfileUtil(jarClass, conf);
// Generate the debug lineage dotfiles (if configuration is enabled)
dotfileUtil.buildLineageDotfile(outputs);
Map<PCollectionImpl<?>, Set<Target>> targetDeps = Maps.newTreeMap(DEPTH_COMPARATOR);
for (PCollectionImpl<?> pcollect : outputs.keySet()) {
targetDeps.put(pcollect, pcollect.getTargetDependencies());
}
Multimap<Target, JobPrototype> assignments = HashMultimap.create();
while (!targetDeps.isEmpty()) {
Set<Target> allTargets = Sets.newHashSet();
for (PCollectionImpl<?> pcollect : targetDeps.keySet()) {
allTargets.addAll(outputs.get(pcollect));
}
GraphBuilder graphBuilder = new GraphBuilder();
// Walk the current plan tree and build a graph in which the vertices are
// sources, targets, and GBK operations.
Set<PCollectionImpl<?>> currentStage = Sets.newHashSet();
for (PCollectionImpl<?> output : targetDeps.keySet()) {
Set<Target> deps = Sets.intersection(allTargets, targetDeps.get(output));
if (deps.isEmpty()) {
graphBuilder.visitOutput(output);
currentStage.add(output);
}
}
Graph baseGraph = graphBuilder.getGraph();
boolean hasInputs = false;
for (Vertex v : baseGraph) {
if (v.isInput()) {
hasInputs = true;
break;
}
}
if (!hasInputs) {
LOG.warn("No input sources for pipeline, nothing to do...");
return new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets, pipelineCallables);
}
// Create a new graph that splits up up dependent GBK nodes.
Graph graph = prepareFinalGraph(baseGraph);
// Break the graph up into connected components.
List<List<Vertex>> components = graph.connectedComponents();
// Generate the debug graph dotfiles (if configuration is enabled)
dotfileUtil.buildBaseGraphDotfile(outputs, baseGraph);
dotfileUtil.buildSplitGraphDotfile(outputs, graph);
dotfileUtil.buildSplitGraphWithComponentsDotfile(outputs, graph, components);
// For each component, we will create one or more job prototypes,
// depending on its profile.
// For dependency handling, we only need to care about which
// job prototype a particular GBK is assigned to.
Multimap<Vertex, JobPrototype> newAssignments = HashMultimap.create();
for (List<Vertex> component : components) {
newAssignments.putAll(constructJobPrototypes(component));
}
// Add in the job dependency information here.
for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) {
JobPrototype current = e.getValue();
for (Vertex parent : graph.getParents(e.getKey())) {
for (JobPrototype parentJobProto : newAssignments.get(parent)) {
current.addDependency(parentJobProto);
}
}
}
ImmutableMultimap<Target, JobPrototype> previousStages = ImmutableMultimap.copyOf(assignments);
for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) {
if (e.getKey().isOutput()) {
PCollectionImpl<?> pcollect = e.getKey().getPCollection();
JobPrototype current = e.getValue();
// Add in implicit dependencies via SourceTargets that are read into memory
for (Target pt : pcollect.getTargetDependencies()) {
for (JobPrototype parentJobProto : assignments.get(pt)) {
current.addDependency(parentJobProto);
}
}
// Add this to the set of output assignments
for (Target t : outputs.get(pcollect)) {
assignments.put(t, e.getValue());
}
} else {
Source source = e.getKey().getSource();
if (source != null && source instanceof Target) {
JobPrototype current = e.getValue();
Collection<JobPrototype> parentJobPrototypes = previousStages.get((Target) source);
if (parentJobPrototypes != null) {
for (JobPrototype parentJobProto : parentJobPrototypes) {
current.addDependency(parentJobProto);
}
}
}
}
}
// Remove completed outputs and mark materialized output locations
// for subsequent job processing.
for (PCollectionImpl<?> output : currentStage) {
if (toMaterialize.containsKey(output)) {
MaterializableIterable mi = toMaterialize.get(output);
if (mi.isSourceTarget()) {
output.materializeAt((SourceTarget) mi.getSource());
}
}
targetDeps.remove(output);
}
}
// Finally, construct the jobs from the prototypes and return.
MRExecutor exec = new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets, pipelineCallables);
// Generate the debug Plan dotfiles
dotfileUtil.buildPlanDotfile(exec, assignments, pipeline, lastJobID);
for (JobPrototype proto : Sets.newHashSet(assignments.values())) {
exec.addJob(proto.getCrunchJob(jarClass, conf, pipeline, lastJobID));
}
// Generate the debug RTNode dotfiles (if configuration is enabled)
dotfileUtil.buildRTNodesDotfile(exec);
// Attach the dotfiles to the MRExcutor context
dotfileUtil.addDotfilesToContext(exec);
return exec;
}
private Graph prepareFinalGraph(Graph baseGraph) {
Graph graph = new Graph();
for (Vertex baseVertex : baseGraph) {
// Add all of the vertices in the base graph, but no edges (yet).
graph.addVertex(baseVertex.getPCollection(), baseVertex.isOutput());
}
for (Edge e : baseGraph.getAllEdges()) {
// Add back all of the edges where neither vertex is a GBK.
if (!e.getHead().isGBK() && !e.getTail().isGBK()) {
Vertex head = graph.getVertexAt(e.getHead().getPCollection());
Vertex tail = graph.getVertexAt(e.getTail().getPCollection());
graph.getEdge(head, tail).addAllNodePaths(e.getNodePaths());
}
}
for (Vertex baseVertex : baseGraph) {
if (baseVertex.isGBK()) {
Vertex vertex = graph.getVertexAt(baseVertex.getPCollection());
for (Edge e : baseVertex.getIncomingEdges()) {
if (e.getHead().isOutput()) {
// Execute an edge split.
Vertex splitTail = e.getHead();
PCollectionImpl<?> split = splitTail.getPCollection();
InputCollection<?> inputNode = handleSplitTarget(split);
Vertex splitHead = graph.addVertex(inputNode, false);
// Divide up the node paths in the edge between the two GBK nodes so
// that each node is either owned by GBK1 -> newTail or newHead -> GBK2.
for (NodePath path : e.getNodePaths()) {
NodePath headPath = path.splitAt(split, splitHead.getPCollection());
graph.getEdge(vertex, splitTail).addNodePath(headPath);
graph.getEdge(splitHead, vertex).addNodePath(path);
}
// Note the dependency between the vertices in the graph.
graph.markDependency(splitHead, splitTail);
} else if (!e.getHead().isGBK()) {
Vertex newHead = graph.getVertexAt(e.getHead().getPCollection());
Map<NodePath, PCollectionImpl> splitPoints = e.getSplitPoints(true /* breakpoints only */);
if (splitPoints.isEmpty()) {
graph.getEdge(newHead, vertex).addAllNodePaths(e.getNodePaths());
} else {
for (Map.Entry<NodePath, PCollectionImpl> s : splitPoints.entrySet()) {
NodePath path = s.getKey();
PCollectionImpl split = s.getValue();
InputCollection<?> inputNode = handleSplitTarget(split);
Vertex splitTail = graph.addVertex(split, true);
Vertex splitHead = graph.addVertex(inputNode, false);
NodePath headPath = path.splitAt(split, splitHead.getPCollection());
graph.getEdge(newHead, splitTail).addNodePath(headPath);
graph.getEdge(splitHead, vertex).addNodePath(path);
// Note the dependency between the vertices in the graph.
graph.markDependency(splitHead, splitTail);
}
}
}
}
for (Edge e : baseVertex.getOutgoingEdges()) {
if (!e.getTail().isGBK()) {
Vertex newTail = graph.getVertexAt(e.getTail().getPCollection());
graph.getEdge(vertex, newTail).addAllNodePaths(e.getNodePaths());
} else {
// Execute an Edge split
Vertex newGraphTail = graph.getVertexAt(e.getTail().getPCollection());
Map<NodePath, PCollectionImpl> splitPoints = e.getSplitPoints(false /* breakpoints only */);
for (Map.Entry<NodePath, PCollectionImpl> s : splitPoints.entrySet()) {
NodePath path = s.getKey();
PCollectionImpl split = s.getValue();
InputCollection<?> inputNode = handleSplitTarget(split);
Vertex splitTail = graph.addVertex(split, true);
Vertex splitHead = graph.addVertex(inputNode, false);
NodePath headPath = path.splitAt(split, splitHead.getPCollection());
graph.getEdge(vertex, splitTail).addNodePath(headPath);
graph.getEdge(splitHead, newGraphTail).addNodePath(path);
// Note the dependency between the vertices in the graph.
graph.markDependency(splitHead, splitTail);
}
}
}
}
}
return graph;
}
private Multimap<Vertex, JobPrototype> constructJobPrototypes(List<Vertex> component) {
Multimap<Vertex, JobPrototype> assignment = HashMultimap.create();
List<Vertex> gbks = Lists.newArrayList();
for (Vertex v : component) {
if (v.isGBK()) {
gbks.add(v);
}
}
if (gbks.isEmpty()) {
HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
for (Vertex v : component) {
if (v.isInput()) {
for (Edge e : v.getOutgoingEdges()) {
for (NodePath nodePath : e.getNodePaths()) {
PCollectionImpl target = nodePath.tail();
for (Target t : outputs.get(target)) {
outputPaths.put(t, nodePath);
}
}
}
}
}
if (outputPaths.isEmpty()) {
throw new IllegalStateException("No outputs?");
}
JobPrototype prototype = JobPrototype.createMapOnlyJob(
++lastJobID, outputPaths, pipeline.createTempPath());
for (Vertex v : component) {
assignment.put(v, prototype);
}
} else {
Set<Edge> usedEdges = Sets.newHashSet();
for (Vertex g : gbks) {
Set<NodePath> inputs = Sets.newHashSet();
HashMultimap<Target, NodePath> mapSideOutputPaths = HashMultimap.create();
for (Edge e : g.getIncomingEdges()) {
inputs.addAll(e.getNodePaths());
usedEdges.add(e);
if (e.getHead().isInput()) {
for (Edge ep : e.getHead().getOutgoingEdges()) {
if (ep.getTail().isOutput() && !usedEdges.contains(ep)) { // map-side output
for (Target t : outputs.get(ep.getTail().getPCollection())) {
mapSideOutputPaths.putAll(t, ep.getNodePaths());
}
usedEdges.add(ep);
}
}
}
}
JobPrototype prototype = JobPrototype.createMapReduceJob(
++lastJobID, (PGroupedTableImpl) g.getPCollection(), inputs, pipeline.createTempPath());
prototype.addMapSideOutputs(mapSideOutputPaths);
assignment.put(g, prototype);
for (Edge e : g.getIncomingEdges()) {
assignment.put(e.getHead(), prototype);
if (e.getHead().isInput()) {
for (Edge ep : e.getHead().getOutgoingEdges()) {
if (ep.getTail().isOutput() && !assignment.containsKey(ep.getTail())) { // map-side output
assignment.put(ep.getTail(), prototype);
}
}
}
}
HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
for (Edge e : g.getOutgoingEdges()) {
Vertex output = e.getTail();
for (Target t : outputs.get(output.getPCollection())) {
outputPaths.putAll(t, e.getNodePaths());
}
assignment.put(output, prototype);
usedEdges.add(e);
}
prototype.addReducePaths(outputPaths);
}
// Check for any un-assigned vertices, which should be map-side outputs
// that we will need to run in a map-only job.
HashMultimap<Target, NodePath> outputPaths = HashMultimap.create();
Set<Vertex> orphans = Sets.newHashSet();
for (Vertex v : component) {
// Check if this vertex has multiple inputs but only a subset of
// them have already been assigned
boolean vertexHasUnassignedIncomingEdges = false;
if (v.isOutput()) {
for (Edge e : v.getIncomingEdges()) {
if (!usedEdges.contains(e)) {
vertexHasUnassignedIncomingEdges = true;
}
}
}
if (v.isOutput() && (vertexHasUnassignedIncomingEdges || !assignment.containsKey(v))) {
orphans.add(v);
for (Edge e : v.getIncomingEdges()) {
if (vertexHasUnassignedIncomingEdges && usedEdges.contains(e)) {
// We've already dealt with this incoming edge
continue;
}
orphans.add(e.getHead());
for (NodePath nodePath : e.getNodePaths()) {
PCollectionImpl target = nodePath.tail();
for (Target t : outputs.get(target)) {
outputPaths.put(t, nodePath);
}
}
}
}
}
if (!outputPaths.isEmpty()) {
JobPrototype prototype = JobPrototype.createMapOnlyJob(
++lastJobID, outputPaths, pipeline.createTempPath());
for (Vertex orphan : orphans) {
assignment.put(orphan, prototype);
}
}
}
return assignment;
}
private InputCollection<?> handleSplitTarget(PCollectionImpl<?> splitTarget) {
if (!outputs.containsKey(splitTarget)) {
outputs.put(splitTarget, Sets.<Target> newHashSet());
}
SourceTarget srcTarget = null;
Target targetToReplace = null;
for (Target t : outputs.get(splitTarget)) {
if (t instanceof SourceTarget) {
srcTarget = (SourceTarget<?>) t;
break;
} else {
srcTarget = t.asSourceTarget(splitTarget.getPType());
if (srcTarget != null) {
targetToReplace = t;
break;
}
}
}
if (targetToReplace != null) {
outputs.get(splitTarget).remove(targetToReplace);
} else if (srcTarget == null) {
srcTarget = pipeline.createIntermediateOutput(splitTarget.getPType());
}
outputs.get(splitTarget).add(srcTarget);
splitTarget.materializeAt(srcTarget);
return (InputCollection<?>) pipeline.read(srcTarget);
}
}
| 2,674 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Joiner;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.commons.lang.StringUtils;
import org.apache.crunch.Pair;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.collect.InputCollection;
import org.apache.crunch.impl.mr.collect.PGroupedTableImpl;
/**
* Writes <a href="http://www.graphviz.org">Graphviz</a> dot files to illustrate
* the topology of Crunch pipelines.
*/
public class DotfileWriter {
// Maximum length that a node name may have in the produced dot file
static final int MAX_NODE_NAME_LENGTH = 300;
/** The types of tasks within a MapReduce job. */
enum MRTaskType { MAP, REDUCE }
private Set<JobPrototype> jobPrototypes = Sets.newHashSet();
private HashMultimap<Pair<JobPrototype, MRTaskType>, String> jobNodeDeclarations = HashMultimap.create();
private Set<String> globalNodeDeclarations = Sets.newHashSet();
private Set<String> nodePathChains = Sets.newHashSet();
/**
* Format the declaration of a node based on a PCollection.
*
* @param pcollectionImpl PCollection for which a node will be declared
* @param jobPrototype The job containing the PCollection
* @return The node declaration
*/
String formatPCollectionNodeDeclaration(PCollectionImpl<?> pcollectionImpl, JobPrototype jobPrototype) {
String shape = "box";
if (pcollectionImpl instanceof InputCollection) {
shape = "folder";
}
String size = "";
try {
DecimalFormatSymbols formatSymbols = new DecimalFormatSymbols(Locale.ENGLISH);
DecimalFormat formatter = new DecimalFormat("#,###.##", formatSymbols);
size = " " + formatter.format(pcollectionImpl.getSize()/1024.0/1024.0) + " Mb";
} catch (Exception e) {
// Just skip those that don't have a size
}
if (pcollectionImpl instanceof PGroupedTableImpl) {
int numReduceTasks = ((PGroupedTableImpl) pcollectionImpl).getNumReduceTasks();
if (numReduceTasks > 0) {
PGroupedTableImpl pGroupedTable = (PGroupedTableImpl) pcollectionImpl;
String setByUser = pGroupedTable.isNumReduceTasksSetByUser() ? "Manual" : "Automatic";
size += " (" + pGroupedTable.getNumReduceTasks() + " " + setByUser + " reducers)";
}
}
return String.format("%s [label=\"%s%s\" shape=%s];",
formatPCollection(pcollectionImpl, jobPrototype),
limitNodeNameLength(pcollectionImpl.getName()),
size,
shape);
}
/**
* Format a Target as a node declaration.
*
* @param target A Target used within a MapReduce pipeline
* @return The global node declaration for the Target
*/
String formatTargetNodeDeclaration(Target target) {
String nodeName = limitNodeNameLength(target.toString());
return String.format("\"%s\" [label=\"%s\" shape=folder];", nodeName, nodeName);
}
/**
* Format a PCollectionImpl into a format to be used for dot files.
*
* @param pcollectionImpl The PCollectionImpl to be formatted
* @param jobPrototype The job containing the PCollection
* @return The dot file formatted representation of the PCollectionImpl
*/
String formatPCollection(PCollectionImpl<?> pcollectionImpl, JobPrototype jobPrototype) {
if (pcollectionImpl instanceof InputCollection) {
InputCollection<?> inputCollection = (InputCollection<?>) pcollectionImpl;
return String.format("\"%s\"", limitNodeNameLength(inputCollection.getSource().toString()));
}
return String.format("\"%s\"",
limitNodeNameLength(
String.format("%s@%d@%d", pcollectionImpl.getName(), pcollectionImpl.hashCode(), jobPrototype.hashCode())));
}
/**
* Format a collection of node strings into dot file syntax.
*
* @param nodeCollection Collection of chained node strings
* @return The dot-formatted chain of nodes
*/
String formatNodeCollection(List<String> nodeCollection) {
return formatNodeCollection(nodeCollection, ImmutableMap.<String, String>of());
}
/**
* Limit a node name length down to {@link #MAX_NODE_NAME_LENGTH}, to ensure valid (and readable) dot files. If the
* name is already less than or equal to the maximum length, it will be returned untouched.
*
* @param nodeName node name to be limited in length
* @return the abbreviated node name if it was longer than the given maximum allowable length
*/
static String limitNodeNameLength(String nodeName) {
if (nodeName.length() <= MAX_NODE_NAME_LENGTH) {
return nodeName;
}
String hashString = Integer.toString(nodeName.hashCode());
return String.format("%s@%s",
StringUtils.abbreviate(nodeName, MAX_NODE_NAME_LENGTH - (hashString.length() + 1)), hashString);
}
/**
* Format a collection of node strings into dot file syntax.
*
* @param nodeCollection Collection of chained node strings
* @param edgeAttributes map of attribute names and values to be applied to the edge
* @return The dot-formatted chain of nodes
*/
String formatNodeCollection(List<String> nodeCollection, Map<String,String> edgeAttributes) {
String edgeAttributeString = "";
if (!edgeAttributes.isEmpty()) {
edgeAttributeString = String.format(" [%s]",
Joiner.on(' ').withKeyValueSeparator("=").join(edgeAttributes));
}
return String.format("%s%s;", Joiner.on(" -> ").join(nodeCollection), edgeAttributeString);
}
/**
* Format a NodePath in dot file syntax.
*
* @param nodePath The node path to be formatted
* @param jobPrototype The job containing the NodePath
* @return The dot file representation of the node path
*/
List<String> formatNodePath(NodePath nodePath, JobPrototype jobPrototype) {
List<String> formattedNodePaths = Lists.newArrayList();
List<PCollectionImpl<?>> pcollections = ImmutableList.copyOf(nodePath);
for (int collectionIndex = 1; collectionIndex < pcollections.size(); collectionIndex++){
String fromNode = formatPCollection(pcollections.get(collectionIndex - 1), jobPrototype);
String toNode = formatPCollection(pcollections.get(collectionIndex), jobPrototype);
formattedNodePaths.add(formatNodeCollection(ImmutableList.of(fromNode, toNode)));
}
// Add SourceTarget dependencies, if any
for (PCollectionImpl<?> pcollection : pcollections) {
Set<SourceTarget<?>> targetDeps = pcollection.getParallelDoOptions().getSourceTargets();
if (!targetDeps.isEmpty()) {
String toNode = formatPCollection(pcollection, jobPrototype);
for(Target target : targetDeps) {
globalNodeDeclarations.add(formatTargetNodeDeclaration(target));
String fromNode = String.format("\"%s\"", limitNodeNameLength(target.toString()));
formattedNodePaths.add(
formatNodeCollection(
ImmutableList.of(fromNode, toNode),
ImmutableMap.of("style", "dashed")));
}
}
}
return formattedNodePaths;
}
/**
* Add a NodePath to be formatted as a list of node declarations within a
* single job.
*
* @param jobPrototype The job containing the node path
* @param nodePath The node path to be formatted
*/
void addNodePathDeclarations(JobPrototype jobPrototype, NodePath nodePath) {
boolean groupingEncountered = false;
for (PCollectionImpl<?> pcollectionImpl : nodePath) {
if (pcollectionImpl instanceof InputCollection) {
globalNodeDeclarations.add(formatPCollectionNodeDeclaration(pcollectionImpl, jobPrototype));
} else {
if (!groupingEncountered) {
groupingEncountered = (pcollectionImpl instanceof PGroupedTableImpl);
}
MRTaskType taskType = groupingEncountered ? MRTaskType.REDUCE : MRTaskType.MAP;
jobNodeDeclarations.put(Pair.of(jobPrototype, taskType),
formatPCollectionNodeDeclaration(pcollectionImpl, jobPrototype));
}
}
}
/**
* Add the chaining of a NodePath to the graph.
*
* @param nodePath The path to be formatted as a node chain in the dot file
* @param jobPrototype The job containing the NodePath
*/
void addNodePathChain(NodePath nodePath, JobPrototype jobPrototype) {
for (String nodePathChain : formatNodePath(nodePath, jobPrototype)){
this.nodePathChains.add(nodePathChain);
}
}
/**
* Get the graph attributes for a task-specific subgraph.
*
* @param taskType The type of task in the subgraph
* @return Graph attributes
*/
String getTaskGraphAttributes(MRTaskType taskType) {
if (taskType == MRTaskType.MAP) {
return "label = Map; color = blue;";
} else {
return "label = Reduce; color = red;";
}
}
private void processNodePaths(JobPrototype jobPrototype, HashMultimap<Target, NodePath> nodePaths) {
if (nodePaths != null) {
for (Target target : nodePaths.keySet()) {
globalNodeDeclarations.add(formatTargetNodeDeclaration(target));
for (NodePath nodePath : nodePaths.get(target)) {
addNodePathDeclarations(jobPrototype, nodePath);
addNodePathChain(nodePath, jobPrototype);
nodePathChains.add(formatNodeCollection(
Lists.newArrayList(formatPCollection(nodePath.descendingIterator().next(), jobPrototype),
String.format("\"%s\"", limitNodeNameLength(target.toString())))));
}
}
}
}
/**
* Add the contents of a {@link JobPrototype} to the graph describing a
* pipeline.
*
* @param jobPrototype A JobPrototype representing a portion of a MapReduce
* pipeline
*/
public void addJobPrototype(JobPrototype jobPrototype) {
jobPrototypes.add(jobPrototype);
if (!jobPrototype.isMapOnly()) {
for (NodePath nodePath : jobPrototype.getMapNodePaths()) {
addNodePathDeclarations(jobPrototype, nodePath);
addNodePathChain(nodePath, jobPrototype);
}
processNodePaths(jobPrototype, jobPrototype.getMapSideNodePaths());
}
processNodePaths(jobPrototype, jobPrototype.getTargetsToNodePaths());
}
/**
* Build up the full dot file containing the description of a MapReduce
* pipeline.
*
* @return Graphviz dot file contents
*/
public String buildDotfile() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("digraph G {\n");
for (String globalDeclaration : globalNodeDeclarations) {
stringBuilder.append(String.format(" %s\n", globalDeclaration));
}
for (JobPrototype jobPrototype : jobPrototypes){
// Must prefix subgraph name with "cluster", otherwise its border won't render. I don't know why.
StringBuilder jobProtoStringBuilder = new StringBuilder();
jobProtoStringBuilder.append(String.format(" subgraph \"cluster-job%d\" {\n", jobPrototype.getJobID()));
jobProtoStringBuilder.append(String.format(" label=\"Crunch Job %d\";\n", jobPrototype.getJobID()));
for (MRTaskType taskType : MRTaskType.values()){
Pair<JobPrototype,MRTaskType> jobTaskKey = Pair.of(jobPrototype, taskType);
if (jobNodeDeclarations.containsKey(jobTaskKey)){
jobProtoStringBuilder.append(String.format(
" subgraph \"cluster-job%d-%s\" {\n", jobPrototype.getJobID(), taskType.name().toLowerCase()));
jobProtoStringBuilder.append(String.format(" %s\n", getTaskGraphAttributes(taskType)));
for (String declarationEntry : jobNodeDeclarations.get(jobTaskKey)){
jobProtoStringBuilder.append(String.format(" %s\n", declarationEntry));
}
jobProtoStringBuilder.append(" }\n");
}
}
jobProtoStringBuilder.append(" }\n");
stringBuilder.append(jobProtoStringBuilder.toString());
}
for (String nodePathChain : nodePathChains) {
stringBuilder.append(String.format(" %s\n", nodePathChain));
}
stringBuilder.append("}\n");
return stringBuilder.toString();
}
}
| 2,675 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/Vertex.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.builder.ReflectionToStringBuilder;
import org.apache.commons.lang.builder.ToStringStyle;
import org.apache.crunch.Source;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
*
*/
class Vertex {
private final PCollectionImpl impl;
private boolean output;
private Set<Edge> incoming;
private Set<Edge> outgoing;
public Vertex(PCollectionImpl impl) {
this.impl = impl;
this.incoming = Sets.newHashSet();
this.outgoing = Sets.newHashSet();
}
public PCollectionImpl getPCollection() {
return impl;
}
public boolean isInput() {
return impl instanceof BaseInputCollection;
}
public boolean isGBK() {
return impl instanceof BaseGroupedTable;
}
public void setOutput() {
this.output = true;
}
public boolean isOutput() {
return output;
}
public Source getSource() {
if (isInput()) {
return ((BaseInputCollection) impl).getSource();
}
return null;
}
public void addIncoming(Edge edge) {
this.incoming.add(edge);
}
public void addOutgoing(Edge edge) {
this.outgoing.add(edge);
}
public List<Vertex> getAllNeighbors() {
List<Vertex> n = Lists.newArrayList();
for (Edge e : incoming) {
n.add(e.getHead());
}
for (Edge e : outgoing) {
n.add(e.getTail());
}
return n;
}
public Set<Edge> getIncomingEdges() {
return incoming;
}
public Set<Edge> getOutgoingEdges() {
return outgoing;
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof Vertex)) {
return false;
}
Vertex other = (Vertex) obj;
return impl.equals(other.impl);
}
@Override
public int hashCode() {
return 17 + 37 * impl.hashCode();
}
@Override
public String toString() {
return new ReflectionToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).setExcludeFieldNames(
new String[] { "outgoing", "incoming" }).toString();
}
}
| 2,676 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/MSCROutputHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.Map;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Target;
import org.apache.crunch.io.MapReduceTarget;
import org.apache.crunch.io.OutputHandler;
import org.apache.crunch.io.PathTarget;
import org.apache.crunch.types.PType;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import com.google.common.collect.Maps;
public class MSCROutputHandler implements OutputHandler {
private final Job job;
private final Path path;
private final boolean mapOnlyJob;
private DoNode workingNode;
private Map<Integer, PathTarget> multiPaths;
private int jobCount;
public MSCROutputHandler(Job job, Path outputPath, boolean mapOnlyJob) {
this.job = job;
this.path = outputPath;
this.mapOnlyJob = mapOnlyJob;
this.multiPaths = Maps.newHashMap();
}
public void configureNode(DoNode node, Target target) {
workingNode = node;
if (!target.accept(this, node.getPType())) {
throw new CrunchRuntimeException("Target " + target + " cannot serialize PType of class: " +
node.getPType().getClass());
}
}
public boolean configure(Target target, PType<?> ptype) {
if (target instanceof MapReduceTarget) {
if (target instanceof PathTarget) {
multiPaths.put(jobCount, (PathTarget) target);
}
String name = PlanningParameters.MULTI_OUTPUT_PREFIX + jobCount;
jobCount++;
workingNode.setOutputName(name);
((MapReduceTarget) target).configureForMapReduce(job, ptype, path, name);
return true;
}
return false;
}
public boolean isMapOnlyJob() {
return mapOnlyJob;
}
public Map<Integer, PathTarget> getMultiPaths() {
return multiPaths;
}
}
| 2,677 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/JobNameBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
/**
* Visitor that traverses the {@code DoNode} instances in a job and builds a
* String that identifies the stages of the pipeline that belong to this job.
*/
public class JobNameBuilder {
private static final Joiner JOINER = Joiner.on("+");
private static final Joiner CHILD_JOINER = Joiner.on("/");
private static final int DEFAULT_JOB_NAME_MAX_STACK_LENGTH = 60;
private final String pipelineName;
private final int jobID;
private int jobSequence;
private final int numOfJobs;
List<String> rootStack = Lists.newArrayList();
private final int maxStackNameLength;
public JobNameBuilder(Configuration conf, String pipelineName, int jobID, int numOfJobs) {
this.pipelineName = pipelineName;
this.jobID = jobID;
this.numOfJobs = numOfJobs;
this.maxStackNameLength = conf.getInt(
PlanningParameters.JOB_NAME_MAX_STACK_LENGTH, DEFAULT_JOB_NAME_MAX_STACK_LENGTH);
}
public JobNameBuilder jobSequence(int jobSequence) {
this.jobSequence = jobSequence;
return this;
}
public void visit(DoNode node) {
visit(node, rootStack);
}
public void visit(List<DoNode> nodes) {
visit(nodes, rootStack);
}
private void visit(List<DoNode> nodes, List<String> stack) {
if (nodes.size() == 1) {
visit(nodes.get(0), stack);
} else {
List<String> childStack = Lists.newArrayList();
for (int i = 0; i < nodes.size(); i++) {
DoNode node = nodes.get(i);
List<String> subStack = Lists.newArrayList();
visit(node, subStack);
if (!subStack.isEmpty()) {
childStack.add("[" + JOINER.join(subStack) + "]");
}
}
if (!childStack.isEmpty()) {
stack.add("[" + CHILD_JOINER.join(childStack) + "]");
}
}
}
private void visit(DoNode node, List<String> stack) {
String name = node.getName();
if (!name.isEmpty()) {
stack.add(node.getName());
}
visit(node.getChildren(), stack);
}
public String build() {
return String.format("%s: %s ID=%d (%d/%d)",
pipelineName,
shortenRootStackName(JOINER.join(rootStack), maxStackNameLength),
jobID,
jobSequence,
numOfJobs);
}
private static String shortenRootStackName(String s, int len) {
int n = s.length();
if (len <= 3) {
return "...";
}
if (n <= len) {
return s;
}
return s.substring(0, len - 3) + "...";
}
}
| 2,678 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/GraphBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import org.apache.crunch.impl.dist.collect.BaseDoCollection;
import org.apache.crunch.impl.dist.collect.BaseDoTable;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.BaseInputCollection;
import org.apache.crunch.impl.dist.collect.BaseUnionCollection;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
/**
*
*/
class GraphBuilder implements PCollectionImpl.Visitor {
private Graph graph = new Graph();
private Vertex workingVertex;
private NodePath workingPath;
public Graph getGraph() {
return graph;
}
public void visitOutput(PCollectionImpl<?> output) {
workingVertex = graph.addVertex(output, true);
workingPath = new NodePath();
output.accept(this);
}
@Override
public void visitInputCollection(BaseInputCollection<?> collection) {
Vertex v = graph.addVertex(collection, false);
graph.getEdge(v, workingVertex).addNodePath(workingPath.close(collection));
}
@Override
public void visitUnionCollection(BaseUnionCollection<?> collection) {
Vertex baseVertex = workingVertex;
NodePath basePath = workingPath;
for (PCollectionImpl<?> parent : collection.getParents()) {
workingPath = new NodePath(basePath);
workingVertex = baseVertex;
processParent(parent);
}
}
@Override
public void visitDoCollection(BaseDoCollection<?> collection) {
workingPath.push(collection);
processParent(collection.getOnlyParent());
}
@Override
public void visitDoTable(BaseDoTable<?, ?> collection) {
workingPath.push(collection);
processParent(collection.getOnlyParent());
}
@Override
public void visitGroupedTable(BaseGroupedTable<?, ?> collection) {
Vertex v = graph.addVertex(collection, false);
graph.getEdge(v, workingVertex).addNodePath(workingPath.close(collection));
workingVertex = v;
workingPath = new NodePath(collection);
processParent(collection.getOnlyParent());
}
private void processParent(PCollectionImpl<?> parent) {
Vertex v = graph.getVertexAt(parent);
if (v == null) {
parent.accept(this);
} else {
graph.getEdge(v, workingVertex).addNodePath(workingPath.close(parent));
}
}
}
| 2,679 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/PlanningParameters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import org.apache.hadoop.conf.Configuration;
/**
* Collection of Configuration keys and various constants used when planning MapReduce jobs for a
* pipeline.
*/
public final class PlanningParameters {
public static final String MULTI_OUTPUT_PREFIX = "out";
public static final String CRUNCH_WORKING_DIRECTORY = "crunch.work.dir";
/**
* Configuration key under which a <a href="http://www.graphviz.org">DOT</a> file containing the
* pipeline job graph is stored by the planner.
*
* @deprecated use {@link DotfileUtil#setPipelineDotfileOutputDir(Configuration, String)} instead
*/
@Deprecated
public static final String PIPELINE_PLAN_DOTFILE = "crunch.planner.dotfile";
public static final String DEBUG_DOTFILES_ENABLED = "crunch.internals.dotfiles";
/**
* Configuration key under which a directory URI can be stored where MapReduce pipeline job plans in
* <a href="http://www.graphviz.org">DOT</a> format are stored. The dot files are only written if this configuration
* parameter is set.
*/
public static final String PIPELINE_DOTFILE_OUTPUT_DIR = "crunch.planner.dotfile.outputdir";
public static final String JOB_NAME_MAX_STACK_LENGTH = "crunch.job.name.max.stack.length";
private PlanningParameters() {
}
}
| 2,680 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileWriterGraph.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import static java.lang.String.format;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.collections.CollectionUtils;
import org.apache.crunch.Source;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
@SuppressWarnings("rawtypes")
public class DotfileWriterGraph extends CommonDotfileWriter {
private Graph graph;
private Map<PCollectionImpl<?>, Set<Target>> outputTargets;
private List<List<Vertex>> components;
public DotfileWriterGraph(Graph graph, Map<PCollectionImpl<?>, Set<Target>> outputTargets,
List<List<Vertex>> components) {
super();
this.graph = graph;
this.outputTargets = outputTargets;
this.components = components;
}
private String formatVertex(Vertex v) {
return formatPCollection(v.getPCollection());
}
private String formatNodePaths(Set<NodePath> nodePaths) {
ArrayList<String> path = new ArrayList<String>();
for (NodePath np : nodePaths) {
path.add(Joiner.on(", \\l").join(np) + " \\l");
}
return format("%s", Joiner.on(" | \\n").join(path));
}
private void link(Edge e) {
edgeBuilder.append(String.format("%s -> %s [label=\"%s\", labeljust=r, color=\"%s\"];\n", getPCollectionID(e.getHead()
.getPCollection()), getPCollectionID(e.getTail().getPCollection()), formatNodePaths(e.getNodePaths()), "black"));
}
private void link(Source source, Vertex v, String color) {
link(source, v.getPCollection(), color);
}
private void link(Vertex v, Target target, String color) {
link(v.getPCollection(), target, color);
}
private class ComponentContentBuilder {
private Map<List<Vertex>, StringBuilder> contentBuilderMap = Maps.newHashMap();
private StringBuilder topContentBuilder;
public ComponentContentBuilder(StringBuilder contentBuilder, List<List<Vertex>> components) {
this.topContentBuilder = contentBuilder;
if (!CollectionUtils.isEmpty(components)) {
for (List<Vertex> vl : components) {
contentBuilderMap.put(vl, new StringBuilder());
}
}
}
private StringBuilder getContentBuilder(Vertex v) {
for (Entry<List<Vertex>, StringBuilder> entry : contentBuilderMap.entrySet()) {
if (entry.getKey().contains(v)) {
return entry.getValue();
}
}
return topContentBuilder;
}
public void append(Vertex v) {
this.getContentBuilder(v).append(formatVertex(v));
}
public StringBuilder build() {
int index = 0;
for (Entry<List<Vertex>, StringBuilder> entry : contentBuilderMap.entrySet()) {
topContentBuilder.append("subgraph \"cluster-component" + index + "\" {\n");
topContentBuilder.append(format(
" label=\"Component%s\"; fontsize=14; graph[style=dotted]; fontcolor=red color=red; \n", index));
topContentBuilder.append(entry.getValue());
topContentBuilder.append("}\n");
index++;
}
return topContentBuilder;
}
}
@Override
protected void doGetLegend(StringBuilder lsb) {
lsb.append(" \"Folder\" [label=\"Folder Name\", fontsize=10, shape=folder, color=darkGreen]\n")
.append(" \"Vertex1\" [label=\"{Vertex Name | Vertex PCollection | PType }\", fontsize=10, shape=record]\n")
.append(" subgraph \"cluster-component-legend\" {\n")
.append(" label=\"Component1\" fontsize=14 graph[style=dotted] fontcolor=red color=red\n")
.append(
" \"Vertex2\" [label=\"{Vertex Name | Vertex PCollection | PType }\", fontsize=10, shape=record]\n")
.append(" }\n").append(" \"Vertex1\" -> \"Vertex2\" [label=\"Path List\", fontsize=10];\n");
}
@Override
public void doBuildDiagram() {
ComponentContentBuilder componentContentBuilder = new ComponentContentBuilder(contentBuilder, components);
for (Vertex v : graph) {
componentContentBuilder.append(v);
Source source = v.getSource();
if (source != null) {
formatSource(source, DEFAULT_FOLDER_COLOR);
link(source, v, DEFAULT_FOLDER_COLOR);
}
if (v.isOutput() && outputTargets != null) {
for (Target target2 : outputTargets.get(v.getPCollection())) {
formatTarget(target2, DEFAULT_FOLDER_COLOR);
link(v, target2, DEFAULT_FOLDER_COLOR);
}
}
}
contentBuilder = componentContentBuilder.build();
for (Edge e : graph.getAllEdges()) {
link(e);
}
}
}
| 2,681 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileWriterRTNodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import static java.lang.String.format;
import static org.apache.commons.collections.CollectionUtils.isEmpty;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.lang.StringUtils;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.impl.mr.MRJob;
import org.apache.crunch.impl.mr.run.NodeContext;
import org.apache.crunch.impl.mr.run.RTNode;
import org.apache.crunch.io.CrunchInputs;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.crunch.io.CrunchOutputs.OutputConfig;
import org.apache.crunch.io.FormatBundle;
import org.apache.crunch.types.Converter;
import org.apache.crunch.util.DistCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
/**
* Writes <a href="http://www.graphviz.org">Graphviz</a> dot files to illustrate the topology of Crunch pipelines.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public class DotfileWriterRTNodes extends CommonDotfileWriter {
private static final String GREEN = "green";
private static final String RED = "red";
private static final String CYAN = "cyan";
private static final String BLUE = "blue";
private static final String BLACK = "black";
private List<MRJob> mrJobs;
public DotfileWriterRTNodes(List<MRJob> mrJobs) {
super();
this.mrJobs = mrJobs;
}
private String getId(RTNode rtNode) {
return format("\"%s@%d\"", rtNode.getNodeName(), rtNode.hashCode());
}
private String getOutputNameId(String outputName, MRJob mrJob) {
return format("\"%s@%s\"", outputName, mrJob.getJobID());
}
private String getId(FormatBundle bundle, MRJob mrJob) {
String name = (bundle == null) ? "-" : bundle.getName();
return format("\"%s@%s\"", name, mrJob.getJobID());
}
private String formatConvertor(Converter converter) {
StringBuffer sb = new StringBuffer();
sb.append(className(converter));
if (converter != null) {
if (!converter.applyPTypeTransforms()) {
sb.append(" (applyPTypeTransforms = ").append(converter.applyPTypeTransforms()).append(")");
}
sb.append("[").append(converter.getKeyClass().getSimpleName()).append(", ")
.append(converter.getValueClass().getSimpleName()).append("]");
}
return sb.toString();
}
private String formatRTNode(RTNode rtNode) {
return format("%s [label=\"{{%s | %s} | %s | %s | { %s | %s } }\" shape=record; color = black;];\n", getId(rtNode),
label(rtNode.getNodeName()), label(rtNode.getOutputName()), className(rtNode.getDoFn()),
formatPType(rtNode.getPType()), formatConvertor(rtNode.getInputConverter()),
formatConvertor(rtNode.getOutputConverter()));
}
private void formatRTNodeTree(RTNode parentRTNode) {
contentBuilder.append(formatRTNode(parentRTNode));
if (!isEmpty(parentRTNode.getChildren())) {
for (RTNode child : parentRTNode.getChildren()) {
// process child nodes
formatRTNodeTree(child);
// link parent to child node
link(getId(parentRTNode), getId(child), BLACK);
}
}
}
private List<RTNode> formatMRJobTask(Configuration configuration, int jobId, NodeContext nodeContext, String color) {
List<RTNode> rtNodes = getRTNodes(configuration, nodeContext);
if (rtNodes == null)
return null;
contentBuilder.append("subgraph \"cluster-job" + jobId + "_" + nodeContext + "\" {\n");
contentBuilder.append(" label=\"" + nodeContext + "\"; color=" + color + "; fontsize=14;\n");
for (RTNode rtn : rtNodes) {
formatRTNodeTree(rtn);
}
contentBuilder.append("}\n");
return rtNodes;
}
private void formatJobOutputs(Map<String, OutputConfig> namedOutputs, MRJob mrJob) {
contentBuilder.append("subgraph \"cluster-output_" + mrJob.getJobID() + "\" {\n");
contentBuilder.append(" label=\"OUTPUTS\"; fontsize=14; color= magenta;\n");
for (Entry<String, OutputConfig> entry : namedOutputs.entrySet()) {
String output = format("%s [label=\"{%s | %s | { %s | %s } }\" shape=record; color = %s];\n",
getOutputNameId(entry.getKey(), mrJob), entry.getKey(), entry.getValue().bundle.getName(),
entry.getValue().keyClass.getSimpleName(), entry.getValue().valueClass.getSimpleName(), BLACK);
contentBuilder.append(output);
}
contentBuilder.append("}\n");
}
private void formatJobInputs(Map<FormatBundle, Map<Integer, List<Path>>> inputFormatNodeMap, MRJob mrJob, String color) {
contentBuilder.append("subgraph \"cluster-inputs_" + mrJob.getJobID() + "\" {\n");
contentBuilder.append(" label=\"INPUTS\"; fontsize=14; color= " + color + ";\n");
for (Entry<FormatBundle, Map<Integer, List<Path>>> entry : inputFormatNodeMap.entrySet()) {
FormatBundle bundle = entry.getKey();
ArrayList<String> inList = new ArrayList<String>();
for (Entry<Integer, List<Path>> value : entry.getValue().entrySet()) {
inList.add(format("{ %s | %s}", value.getKey(), value.getValue()));
}
contentBuilder.append(format("%s [label=\"{ %s | %s}\" shape=record; color = %s];\n", getId(bundle, mrJob),
bundle.getName(), Joiner.on("|").join(inList), BLACK));
}
contentBuilder.append("}\n");
}
private FormatBundle findFormatBundleByNodeIndex(Map<FormatBundle, Map<Integer, List<Path>>> inputFormatNodeMap,
int nodeIndex) {
for (Entry<FormatBundle, Map<Integer, List<Path>>> entry : inputFormatNodeMap.entrySet()) {
if (entry.getValue().containsKey(nodeIndex)) {
return entry.getKey();
}
if (nodeIndex == 0 && entry.getValue().containsKey(-1)) {
return entry.getKey();
}
}
return null;
}
private List<RTNode> leafs(List<RTNode> rtNodes) {
ArrayList<RTNode> tails = Lists.newArrayListWithExpectedSize(rtNodes.size());
for (RTNode node : rtNodes) {
tails.addAll(leafs(node));
}
return tails;
}
private List<RTNode> leafs(RTNode rtNode) {
List<RTNode> leafs = Lists.newArrayList();
if (rtNode.isLeafNode()) {
leafs.add(rtNode);
} else {
for (RTNode child : rtNode.getChildren()) {
leafs.addAll(leafs(child));
}
}
return leafs;
}
private static List<RTNode> getRTNodes(Configuration conf, NodeContext nodeContext) {
Path path = new Path(new Path(conf.get(PlanningParameters.CRUNCH_WORKING_DIRECTORY)), nodeContext.toString());
try {
return (List<RTNode>) DistCache.read(conf, path);
} catch (IOException e) {
throw new CrunchRuntimeException("Could not read runtime node information", e);
}
}
@Override
protected void doBuildDiagram() {
for (MRJob mrJob : mrJobs) {
// TODO to find a way to handle job dependencies e.g mrJob.getDependentJobs()
Configuration configuration = mrJob.getJob().getConfiguration();
contentBuilder.append("subgraph \"cluster-job" + mrJob.getJobID() + "\" {\n");
contentBuilder.append(" label=\"Crunch Job " + mrJob.getJobID() + "\" ;\n");
List<RTNode> mapRTNodes = formatMRJobTask(configuration, mrJob.getJobID(), NodeContext.MAP, BLUE);
List<RTNode> combineRTNodes = formatMRJobTask(configuration, mrJob.getJobID(), NodeContext.COMBINE, CYAN);
List<RTNode> reduceRTNodes = formatMRJobTask(configuration, mrJob.getJobID(), NodeContext.REDUCE, RED);
// Deserialize Job's inputs from the CRUNCH_INPUTS Configuration property.
Map<FormatBundle, Map<Integer, List<Path>>> inputFormatNodeMap = CrunchInputs.getFormatNodeMap(mrJob.getJob());
formatJobInputs(inputFormatNodeMap, mrJob, GREEN);
// Link inputs to map RTNode tasks
for (int mapNodeIndex = 0; mapNodeIndex < mapRTNodes.size(); mapNodeIndex++) {
FormatBundle formatBundle = findFormatBundleByNodeIndex(inputFormatNodeMap, mapNodeIndex);
RTNode rtNode = mapRTNodes.get(mapNodeIndex);
link(getId(formatBundle, mrJob), getId(rtNode), BLACK);
}
// Deserialize Job's Outputs from the CRUNCH_OUTPUTS Configuration property.
Map<String, OutputConfig> namedOutputs = CrunchOutputs.getNamedOutputs(configuration);
formatJobOutputs(namedOutputs, mrJob);
List<RTNode> mapLeafs = leafs(mapRTNodes);
for (RTNode leafNode : mapLeafs) {
String outputName = leafNode.getOutputName();
if (StringUtils.isEmpty(outputName)) {
if (!isEmpty(combineRTNodes)) {
// If there is a combiner connect the map to the combiner and then the combiner to the reducer
link(getId(leafNode), getId(combineRTNodes.get(0)), BLACK);
link(getId(leafs(combineRTNodes).get(0)), getId(reduceRTNodes.get(0)), BLACK);
} else {
// connect
link(getId(leafNode), getId(reduceRTNodes.get(0)), BLACK);
}
} else {
link(getId(leafNode), getOutputNameId(outputName, mrJob), BLACK);
}
}
if (!isEmpty(reduceRTNodes)) {
List<RTNode> reduceTails = leafs(reduceRTNodes);
for (RTNode tailNode : reduceTails) {
String outputName = tailNode.getOutputName();
if (StringUtils.isEmpty(outputName)) {
throw new RuntimeException("Recue output RTNode with no named output! :" + tailNode);
} else {
link(getId(tailNode), getOutputNameId(outputName, mrJob), BLACK);
}
}
}
contentBuilder.append("}\n");
}
}
@Override
protected void doGetLegend(StringBuilder lsb) {
lsb.append(
"\"RTNodes\" [label=\"{{RTNode Name | Output Name } | DoFn | PType | { Input Converter | Output Converter}}\"; shape=record;]\n")
.append("\"Inputs\" [label=\"{InputFormat Name | {Node Index | Path List}}\"; shape=record; color = green]\n")
.append(
"\"Outputs\" [label=\"{Output Name | OutputFormat Name |{Key Class | Value Class}}\"; shape=record; color = magenta]\n")
.append("\"Inputs\" -> \"RTNodes\" [style=invis];\n").append("\"RTNodes\" -> \"Outputs\" [style=invis];\n");
}
}
| 2,682 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/JobPrototype.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
import org.apache.crunch.Target;
import org.apache.crunch.hadoop.mapreduce.lib.jobcontrol.CrunchControlledJob;
import org.apache.crunch.impl.dist.DistributedPipeline;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.collect.DoTable;
import org.apache.crunch.impl.dist.collect.MRCollection;
import org.apache.crunch.impl.mr.collect.PGroupedTableImpl;
import org.apache.crunch.impl.mr.exec.CrunchJobHooks;
import org.apache.crunch.impl.mr.run.CrunchCombiner;
import org.apache.crunch.impl.mr.run.CrunchInputFormat;
import org.apache.crunch.impl.mr.run.CrunchMapper;
import org.apache.crunch.impl.mr.run.CrunchOutputFormat;
import org.apache.crunch.impl.mr.run.CrunchReducer;
import org.apache.crunch.impl.mr.run.NodeContext;
import org.apache.crunch.impl.mr.run.RTNode;
import org.apache.crunch.io.impl.FileTargetImpl;
import org.apache.crunch.types.PType;
import org.apache.crunch.util.DistCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class JobPrototype {
private static final String DFS_REPLICATION = "dfs.replication";
private static final String DFS_REPLICATION_INITIAL = "dfs.replication.initial";
private static final String CRUNCH_TMP_DIR_REPLICATION = "crunch.tmp.dir.replication";
public static JobPrototype createMapReduceJob(int jobID, PGroupedTableImpl<?, ?> group,
Set<NodePath> inputs, Path workingPath) {
return new JobPrototype(jobID, inputs, group, workingPath);
}
public static JobPrototype createMapOnlyJob(int jobID, HashMultimap<Target, NodePath> mapNodePaths, Path workingPath) {
return new JobPrototype(jobID, mapNodePaths, workingPath);
}
private final int jobID; // TODO: maybe stageID sounds better
private final Set<NodePath> mapNodePaths;
private final PGroupedTableImpl<?, ?> group;
private final Set<JobPrototype> dependencies = Sets.newHashSet();
private final Map<PCollectionImpl<?>, DoNode> nodes = Maps.newHashMap();
private final Path workingPath;
private HashMultimap<Target, NodePath> mapSideNodePaths;
private HashMultimap<Target, NodePath> targetsToNodePaths;
private DoTable<?, ?> combineFnTable;
private CrunchControlledJob job;
private JobPrototype(int jobID, Set<NodePath> inputs, PGroupedTableImpl<?, ?> group, Path workingPath) {
this.jobID = jobID;
this.mapNodePaths = ImmutableSet.copyOf(inputs);
this.group = group;
this.workingPath = workingPath;
this.targetsToNodePaths = null;
}
@VisibleForTesting
private JobPrototype(int jobID, HashMultimap<Target, NodePath> outputPaths, Path workingPath) {
this.jobID = jobID;
this.group = null;
this.mapNodePaths = null;
this.workingPath = workingPath;
this.targetsToNodePaths = outputPaths;
}
public int getJobID() {
return jobID;
}
public boolean isMapOnly() {
return this.group == null;
}
Set<NodePath> getMapNodePaths() {
return mapNodePaths;
}
HashMultimap<Target, NodePath> getMapSideNodePaths() {
return mapSideNodePaths;
}
HashMultimap<Target, NodePath> getTargetsToNodePaths() {
return targetsToNodePaths;
}
public void addMapSideOutputs(HashMultimap<Target, NodePath> mapSideNodePaths) {
if (group == null) {
throw new IllegalStateException("Cannot side-outputs to a map-only job");
}
this.mapSideNodePaths = mapSideNodePaths;
}
public void addReducePaths(HashMultimap<Target, NodePath> outputPaths) {
if (group == null) {
throw new IllegalStateException("Cannot add a reduce phase to a map-only job");
}
this.targetsToNodePaths = outputPaths;
}
public void addDependency(JobPrototype dependency) {
this.dependencies.add(dependency);
}
public CrunchControlledJob getCrunchJob(
Class<?> jarClass, Configuration conf, MRPipeline pipeline, int numOfJobs) throws IOException {
if (job == null) {
job = build(jarClass, conf, pipeline, numOfJobs);
for (JobPrototype proto : dependencies) {
job.addDependingJob(proto.getCrunchJob(jarClass, conf, pipeline, numOfJobs));
}
}
return job;
}
private static final Logger LOG = LoggerFactory.getLogger(JobPrototype.class);
private CrunchControlledJob build(
Class<?> jarClass, Configuration conf, MRPipeline pipeline, int numOfJobs) throws IOException {
Job job = new Job(conf);
LOG.debug(String.format("Replication factor: %s", job.getConfiguration().get(DFS_REPLICATION)));
conf = job.getConfiguration();
conf.set(PlanningParameters.CRUNCH_WORKING_DIRECTORY, workingPath.toString());
job.setJarByClass(jarClass);
Set<DoNode> outputNodes = Sets.newHashSet();
Set<Target> allTargets = Sets.newHashSet();
Path outputPath = new Path(workingPath, "output");
MSCROutputHandler outputHandler = new MSCROutputHandler(job, outputPath, group == null);
boolean onlyHasTemporaryOutput =true;
for (Target target : targetsToNodePaths.keySet()) {
DoNode node = null;
LOG.debug("Target path: " + target);
for (NodePath nodePath : targetsToNodePaths.get(target)) {
if (node == null) {
PType<?> ptype = nodePath.tail().getPType();
node = DoNode.createOutputNode(target.toString(), target.getConverter(ptype), ptype);
outputHandler.configureNode(node, target);
onlyHasTemporaryOutput &= DistributedPipeline.isTempDir(job, target.toString());
}
outputNodes.add(walkPath(nodePath.descendingIterator(), node));
}
allTargets.add(target);
}
setJobReplication(job.getConfiguration(), onlyHasTemporaryOutput);
Set<DoNode> mapSideNodes = Sets.newHashSet();
if (mapSideNodePaths != null) {
for (Target target : mapSideNodePaths.keySet()) {
DoNode node = null;
for (NodePath nodePath : mapSideNodePaths.get(target)) {
if (node == null) {
PType<?> ptype = nodePath.tail().getPType();
node = DoNode.createOutputNode(target.toString(), target.getConverter(ptype), ptype);
outputHandler.configureNode(node, target);
}
mapSideNodes.add(walkPath(nodePath.descendingIterator(), node));
}
allTargets.add(target);
}
}
job.setMapperClass(CrunchMapper.class);
List<DoNode> inputNodes;
DoNode reduceNode = null;
if (group != null) {
job.setReducerClass(CrunchReducer.class);
List<DoNode> reduceNodes = Lists.newArrayList(outputNodes);
serialize(reduceNodes, conf, workingPath, NodeContext.REDUCE);
reduceNode = reduceNodes.get(0);
if (combineFnTable != null) {
job.setCombinerClass(CrunchCombiner.class);
DoNode combinerInputNode = group.createDoNode();
DoNode combineNode = combineFnTable.createCombineNode();
combineNode.addChild(group.getGroupingNode());
combinerInputNode.addChild(combineNode);
serialize(ImmutableList.of(combinerInputNode), conf, workingPath, NodeContext.COMBINE);
}
group.configureShuffle(job);
DoNode mapOutputNode = group.getGroupingNode();
Set<DoNode> mapNodes = Sets.newHashSet(mapSideNodes);
for (NodePath nodePath : mapNodePaths) {
// Advance these one step, since we've already configured
// the grouping node, and the BaseGroupedTable is the tail
// of the NodePath.
Iterator<PCollectionImpl<?>> iter = nodePath.descendingIterator();
iter.next();
mapNodes.add(walkPath(iter, mapOutputNode));
}
inputNodes = Lists.newArrayList(mapNodes);
} else { // No grouping
job.setNumReduceTasks(0);
inputNodes = Lists.newArrayList(outputNodes);
}
job.setOutputFormatClass(CrunchOutputFormat.class);
serialize(inputNodes, conf, workingPath, NodeContext.MAP);
if (inputNodes.size() == 1) {
DoNode inputNode = inputNodes.get(0);
inputNode.getSource().configureSource(job, -1);
} else {
for (int i = 0; i < inputNodes.size(); i++) {
inputNodes.get(i).getSource().configureSource(job, i);
}
job.setInputFormatClass(CrunchInputFormat.class);
}
JobNameBuilder jobNameBuilder = createJobNameBuilder(conf, pipeline.getName(), inputNodes, reduceNode, numOfJobs);
CrunchControlledJob.Hook prepareHook = getHook(new CrunchJobHooks.PrepareHook(), pipeline.getPrepareHooks());
CrunchControlledJob.Hook completionHook = getHook(
new CrunchJobHooks.CompletionHook(outputPath, outputHandler.getMultiPaths()),
pipeline.getCompletionHooks());
return new CrunchControlledJob(
jobID,
job,
jobNameBuilder,
allTargets,
prepareHook,
completionHook);
}
@VisibleForTesting
protected void setJobReplication(Configuration jobConfiguration, boolean onlyHasTemporaryOutput) {
String userSuppliedTmpDirReplication = jobConfiguration.get(CRUNCH_TMP_DIR_REPLICATION);
if (userSuppliedTmpDirReplication == null) {
return;
}
handleInitialReplication(jobConfiguration);
if (onlyHasTemporaryOutput) {
LOG.debug(String.format("Setting replication factor to: %s ", userSuppliedTmpDirReplication));
jobConfiguration.set(DFS_REPLICATION, userSuppliedTmpDirReplication);
}
else {
String originalReplication = jobConfiguration.get(DFS_REPLICATION_INITIAL);
LOG.debug(String.format("Using initial replication factor (%s)", originalReplication));
jobConfiguration.set(DFS_REPLICATION, originalReplication);
}
}
@VisibleForTesting
protected void handleInitialReplication(Configuration jobConfiguration) {
String origReplication = jobConfiguration.get(DFS_REPLICATION_INITIAL);
if (origReplication != null) {
LOG.debug(String.format("Initial replication has been already set (%s); nothing to do.", origReplication));
return;
}
String defaultReplication = jobConfiguration.get(DFS_REPLICATION);
if (defaultReplication != null) {
LOG.debug(String.format("Using dfs.replication (%s) set by user as initial replication.",
defaultReplication));
setInitialJobReplicationConfig(jobConfiguration, defaultReplication);
return;
}
Set<Target> targets = targetsToNodePaths.keySet();
Target t = targets.iterator().next();
if (t instanceof FileTargetImpl) {
Path path = ((FileTargetImpl) t).getPath();
defaultReplication = tryGetDefaultReplicationFromFileSystem(jobConfiguration, path, "3");
}
setInitialJobReplicationConfig(jobConfiguration, defaultReplication);
}
private String tryGetDefaultReplicationFromFileSystem(Configuration jobConf, Path path, String defaultReplication) {
String d;
try {
FileSystem fs = path.getFileSystem(jobConf);
d = fs.getConf().get(DFS_REPLICATION);
LOG.debug(
String.format("Using dfs.replication (%s) retrieved from remote filesystem as initial replication.", d));
} catch (IOException e) {
d = defaultReplication;
LOG.warn(String.format("Cannot read job's config. Setting initial replication to %s.", d));
}
return d;
}
private void setInitialJobReplicationConfig(Configuration jobConf, String defaultReplication) {
jobConf.set(DFS_REPLICATION_INITIAL, defaultReplication);
}
private static CrunchControlledJob.Hook getHook(
CrunchControlledJob.Hook base,
List<CrunchControlledJob.Hook> optional) {
if (optional.isEmpty()) {
return base;
}
List<CrunchControlledJob.Hook> hooks = Lists.newArrayList();
hooks.add(base);
hooks.addAll(optional);
return new CrunchJobHooks.CompositeHook(hooks);
}
private void serialize(List<DoNode> nodes, Configuration conf, Path workingPath, NodeContext context)
throws IOException {
List<RTNode> rtNodes = Lists.newArrayList();
for (DoNode node : nodes) {
rtNodes.add(node.toRTNode(true, conf, context));
}
Path path = new Path(workingPath, context.toString());
DistCache.write(conf, path, rtNodes);
}
private JobNameBuilder createJobNameBuilder(
Configuration conf,
String pipelineName,
List<DoNode> mapNodes,
DoNode reduceNode,
int numOfJobs) {
JobNameBuilder builder = new JobNameBuilder(conf, pipelineName, jobID, numOfJobs);
builder.visit(mapNodes);
if (reduceNode != null) {
builder.visit(reduceNode);
}
return builder;
}
private DoNode walkPath(Iterator<PCollectionImpl<?>> iter, DoNode working) {
while (iter.hasNext()) {
PCollectionImpl<?> collect = iter.next();
if (combineFnTable != null && !(collect instanceof PGroupedTableImpl)) {
combineFnTable = null;
} else if (collect instanceof DoTable && ((DoTable<?, ?>) collect).hasCombineFn()) {
combineFnTable = (DoTable<?, ?>) collect;
}
if (!nodes.containsKey(collect)) {
nodes.put(collect, ((MRCollection) collect).createDoNode());
}
DoNode parent = nodes.get(collect);
parent.addChild(working);
working = parent;
}
return working;
}
}
| 2,683 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileWriterPCollectionLineage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.collections.CollectionUtils;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.collect.InputCollection;
/**
* Writes <a href="http://www.graphviz.org">Graphviz</a> dot files to illustrate the topology of Crunch pipelines.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public class DotfileWriterPCollectionLineage extends CommonDotfileWriter {
private Map<PCollectionImpl<?>, Set<Target>> outputs;
public DotfileWriterPCollectionLineage(Map<PCollectionImpl<?>, Set<Target>> outputs) {
super();
this.outputs = outputs;
}
private void formatPCollectionLineage(PCollectionImpl pcollection, String color) {
contentBuilder.append(formatPCollection(pcollection));
// for input pcollections add the related source and link it to the collection
if (pcollection instanceof InputCollection) {
InputCollection ic = (InputCollection) pcollection;
formatSource(ic.getSource(), DEFAULT_FOLDER_COLOR);
link(ic.getSource(), pcollection, color);
}
List<PCollectionImpl<?>> parents = pcollection.getParents();
if (!CollectionUtils.isEmpty(parents)) {
for (PCollectionImpl parentPCollection : parents) {
link(parentPCollection, pcollection, color);
formatPCollectionLineage(parentPCollection, color);
}
}
}
@Override
protected void doBuildDiagram() {
int outputIndex = 0;
for (PCollectionImpl<?> pcollection : outputs.keySet()) {
String pathColor = COLORS[outputIndex++];
formatPCollectionLineage(pcollection, pathColor);
for (Target target : outputs.get(pcollection)) {
formatTarget(target, DEFAULT_FOLDER_COLOR);
link(pcollection, target, pathColor);
}
}
}
@Override
protected void doGetLegend(StringBuilder lsb) {
lsb.append("\"Folder\" [label=\"Folder Name\" fontsize=10 shape=folder color=darkGreen]\n").append(
"\"PCollection\" [label=\"{PCollection Name | PCollection Class| PType }\" fontsize=10 shape=record]\n");
}
}
| 2,684 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/Edge.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.builder.ReflectionToStringBuilder;
import org.apache.commons.lang.builder.ToStringStyle;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.BaseGroupedTable;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
class Edge {
private final Vertex head;
private final Vertex tail;
private final Set<NodePath> paths;
Edge(Vertex head, Vertex tail) {
this.head = head;
this.tail = tail;
this.paths = Sets.newTreeSet(NODE_CMP);
}
public Vertex getHead() {
return head;
}
public Vertex getTail() {
return tail;
}
public void addNodePath(NodePath path) {
this.paths.add(path);
}
public void addAllNodePaths(Collection<NodePath> paths) {
this.paths.addAll(paths);
}
public Set<NodePath> getNodePaths() {
return paths;
}
public Map<NodePath, PCollectionImpl> getSplitPoints(boolean breakpointsOnly) {
List<NodePath> np = Lists.newArrayList(paths);
List<PCollectionImpl<?>> smallestOverallPerPath = Lists.newArrayListWithExpectedSize(np.size());
Map<PCollectionImpl<?>, Set<Integer>> pathCounts = Maps.newTreeMap(PCOL_CMP);
Map<NodePath, PCollectionImpl> splitPoints = Maps.newHashMap();
for (int i = 0; i < np.size(); i++) {
long bestSize = Long.MAX_VALUE;
boolean breakpoint = false;
PCollectionImpl<?> best = null;
for (PCollectionImpl<?> pc : np.get(i)) {
if (!(pc instanceof BaseGroupedTable) && (!breakpointsOnly || pc.isBreakpoint())) {
if (pc.isBreakpoint()) {
if (!breakpoint || pc.getSize() < bestSize) {
best = pc;
bestSize = pc.getSize();
breakpoint = true;
}
} else if (!breakpoint && pc.getSize() < bestSize) {
best = pc;
bestSize = pc.getSize();
}
Set<Integer> cnts = pathCounts.get(pc);
if (cnts == null) {
cnts = Sets.newHashSet();
pathCounts.put(pc, cnts);
}
cnts.add(i);
}
}
smallestOverallPerPath.add(best);
if (breakpoint) {
splitPoints.put(np.get(i), best);
}
}
Set<Integer> missing = Sets.newHashSet();
for (int i = 0; i < np.size(); i++) {
if (!splitPoints.containsKey(np.get(i))) {
missing.add(i);
}
}
if (breakpointsOnly && missing.size() > 0) {
// We can't create new splits in this mode
return ImmutableMap.of();
} else if (missing.isEmpty()) {
return splitPoints;
} else {
// Need to either choose the smallest collection from each missing path,
// or the smallest single collection that is on all paths as the split target.
Set<PCollectionImpl<?>> smallest = Sets.newHashSet();
long smallestSize = 0;
for (Integer id : missing) {
PCollectionImpl<?> s = smallestOverallPerPath.get(id);
if (!smallest.contains(s)) {
smallest.add(s);
smallestSize += s.getSize();
}
}
PCollectionImpl<?> singleBest = null;
long singleSmallestSize = Long.MAX_VALUE;
for (Map.Entry<PCollectionImpl<?>, Set<Integer>> e : pathCounts.entrySet()) {
if (Sets.difference(missing, e.getValue()).isEmpty() && e.getKey().getSize() < singleSmallestSize) {
singleBest = e.getKey();
singleSmallestSize = singleBest.getSize();
}
}
if (smallestSize < singleSmallestSize) {
for (Integer id : missing) {
splitPoints.put(np.get(id), smallestOverallPerPath.get(id));
}
} else {
for (Integer id : missing) {
splitPoints.put(np.get(id), singleBest);
}
}
}
return splitPoints;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof Edge)) {
return false;
}
Edge e = (Edge) other;
return head.equals(e.head) && tail.equals(e.tail) && paths.equals(e.paths);
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(head).append(tail).toHashCode();
}
@Override
public String toString() {
return ReflectionToStringBuilder.toString(this, ToStringStyle.SHORT_PREFIX_STYLE);
}
private static Comparator<NodePath> NODE_CMP = new Comparator<NodePath>() {
@Override
public int compare(NodePath left, NodePath right) {
if (left == right || left.equals(right)) {
return 0;
}
return left.toString().compareTo(right.toString());
}
};
private static Comparator<PCollectionImpl<?>> PCOL_CMP = new Comparator<PCollectionImpl<?>>() {
@Override
public int compare(PCollectionImpl<?> left, PCollectionImpl<?> right) {
if (left == right || left.equals(right)) {
return 0;
}
String leftName = left.getName();
String rightName = right.getName();
if (leftName == null || rightName == null || leftName.equals(rightName)) {
return left.hashCode() < right.hashCode() ? -1 : 1;
}
return leftName.compareTo(rightName);
}
};
}
| 2,685 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/plan/DotfileUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.plan;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.crunch.Target;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.MRPipeline;
import org.apache.crunch.impl.mr.exec.MRExecutor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
/**
* Helper class that manages the dotfile generation lifecycle and configuring the dotfile debug context.
*/
public class DotfileUtil {
private static final Logger LOG = LoggerFactory.getLogger(DotfileUtil.class);
private final Class<?> jarClass;
private final Configuration conf;
private String rtNodesDotfile = "";
private String basePlanGraphDotfile = "";
private String splitGraphPlanDotfile = "";
private String splitGraphWithComponentsPlanDotfile = "";
private String pcollectionLineageDotfile = "";
private String planDotFile = "";
DotfileUtil(Class<?> jarClass, Configuration conf) {
this.jarClass = jarClass;
this.conf = conf;
}
/**
* Builds a lineage dotfile only if the dotfile-debug mode is enabled.
*/
void buildLineageDotfile(Map<PCollectionImpl<?>, Set<Target>> outputs) {
if (isDebugDotfilesEnabled(conf)) {
try {
pcollectionLineageDotfile = new DotfileWriterPCollectionLineage(outputs)
.buildDiagram("PCollection Lineage Plan (" + jarClass.getSimpleName() + ")");
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
}
/**
* Builds the base graph dotfile only if the dotfile-debug mode is enabled.
*/
void buildBaseGraphDotfile(Map<PCollectionImpl<?>, Set<Target>> outputs, Graph graph) {
if (isDebugDotfilesEnabled(conf)) {
try {
basePlanGraphDotfile = new DotfileWriterGraph(graph, outputs, null)
.buildDiagram("Base Graph (" + jarClass.getSimpleName() + ")");
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
}
/**
* Builds the split graph dotfile only if the dotfile-debug mode is enabled.
*/
void buildSplitGraphDotfile(Map<PCollectionImpl<?>, Set<Target>> outputs, Graph graph) {
if (isDebugDotfilesEnabled(conf)) {
try {
splitGraphPlanDotfile = new DotfileWriterGraph(graph, outputs, null)
.buildDiagram("Split Graph (" + jarClass.getSimpleName() + ")");
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
}
/**
* Builds a split graph with components dotfile only if the dotfile-debug mode is enabled.
*/
void buildSplitGraphWithComponentsDotfile(
Map<PCollectionImpl<?>, Set<Target>> outputs, Graph graph, List<List<Vertex>> components
) {
if (isDebugDotfilesEnabled(conf)) {
try {
splitGraphWithComponentsPlanDotfile = new DotfileWriterGraph(graph, outputs, components)
.buildDiagram("Split Graph With Components (" + jarClass.getSimpleName() + ")");
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
}
/**
* Builds a RT node dotfile only if the dotfile-debug mode is enabled.
*/
void buildRTNodesDotfile(MRExecutor exec) {
if (isDebugDotfilesEnabled(conf)) {
try {
rtNodesDotfile = new DotfileWriterRTNodes(exec.getJobs()).buildDiagram("Run Time Plan ("
+ jarClass.getSimpleName() + ")");
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
}
/**
* Build the plan dotfile despite of the the dotfile-debug mode.
*
* @throws IOException
*/
void buildPlanDotfile(MRExecutor exec, Multimap<Target, JobPrototype> assignments, MRPipeline pipeline, int lastJobID) {
try {
DotfileWriter dotfileWriter = new DotfileWriter();
for (JobPrototype proto : Sets.newHashSet(assignments.values())) {
dotfileWriter.addJobPrototype(proto);
}
planDotFile = dotfileWriter.buildDotfile();
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
/**
* Attach the generated dotfiles to the {@link MRExecutor} context!. Note that the planDotFile is always added!
*/
void addDotfilesToContext(MRExecutor exec) {
try {
// The job plan is always enabled and set in the Configuration;
conf.set(PlanningParameters.PIPELINE_PLAN_DOTFILE, planDotFile);
exec.addNamedDotFile("jobplan", planDotFile);
// Debug dotfiles are only stored if the configuration is set to enabled
if (isDebugDotfilesEnabled(conf)) {
exec.addNamedDotFile("rt_plan", rtNodesDotfile);
exec.addNamedDotFile("base_graph_plan", basePlanGraphDotfile);
exec.addNamedDotFile("split_graph_plan", splitGraphPlanDotfile);
exec.addNamedDotFile("split_graph_with_components_plan", splitGraphWithComponentsPlanDotfile);
exec.addNamedDotFile("lineage_plan", pcollectionLineageDotfile);
}
} catch (Exception ex) {
LOG.error("Problem creating debug dotfile:", ex);
}
}
/**
* Determine if the creation of debugging dotfiles (which explain various stages in the job planning process)
* is enabled.
* <p/>
* In order for this to be <tt>true</tt>, {@link #setPipelineDotfileOutputDir(Configuration, String)} needs to also
* have been called with the same configuration object.
* <p/>
* Note that regardless of whether or not debugging dotfile creation is enabled, the high-level job plan will always
* be dumped if {@link #setPipelineDotfileOutputDir(Configuration, String)} has been called.
*
* @param conf pipeline configuration
* @return <tt>true</tt> if the creation of debugging dotfiles is enabled, otherwise <tt>false</tt>
*/
public static boolean isDebugDotfilesEnabled(Configuration conf) {
return conf.getBoolean(PlanningParameters.DEBUG_DOTFILES_ENABLED, false)
&& conf.get(PlanningParameters.PIPELINE_DOTFILE_OUTPUT_DIR) != null;
}
/**
* Enable the creation of debugging dotfiles (which explain various stages in the job planning process).
*
* @param conf pipeline configuration
*/
public static void enableDebugDotfiles(Configuration conf) {
conf.setBoolean(PlanningParameters.DEBUG_DOTFILES_ENABLED, true);
}
/**
* Disable the creation of debugging dotfiles.
*
* @param conf pipeline configuration
*/
public static void disableDebugDotfiles(Configuration conf) {
conf.setBoolean(PlanningParameters.DEBUG_DOTFILES_ENABLED, false);
}
/**
* Set an output directory where job plan dotfiles will be written.
* <p/>
* If a directory has been set, a dotfile containing the job plan will be dumped to the given directory.
*
* @param conf the pipeline configuration
* @param outputPath the path to which dotfiles are to be written, in the form of a URI
*/
public static void setPipelineDotfileOutputDir(Configuration conf, String outputPath) {
conf.set(PlanningParameters.PIPELINE_DOTFILE_OUTPUT_DIR, outputPath);
}
/**
* Retrieve the path where job plan dotfiles are to be written.
*
* @param conf pipeline configuration
* @return the path where job plan dotfiles are to be written, or <tt>null</tt> if the output path hasn't been set
*/
public static String getPipelineDotfileOutputDir(Configuration conf) {
return conf.get(PlanningParameters.PIPELINE_DOTFILE_OUTPUT_DIR);
}
}
| 2,686 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/exec/CappedExponentialCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.exec;
/**
* Generate a series of capped numbers exponentially.
*
* It is used for creating retry intervals. It is NOT thread-safe.
*/
public class CappedExponentialCounter {
private long current;
private final long limit;
public CappedExponentialCounter(long start, long limit) {
this.current = start;
this.limit = limit;
}
public long get() {
long result = current;
current = Math.min(current * 2, limit);
return result;
}
}
| 2,687 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/exec/CrunchJobHooks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.exec;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.crunch.hadoop.mapreduce.lib.jobcontrol.CrunchControlledJob;
import org.apache.crunch.impl.mr.MRJob;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.io.PathTarget;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public final class CrunchJobHooks {
private CrunchJobHooks() {}
public static final class CompositeHook implements CrunchControlledJob.Hook {
private List<CrunchControlledJob.Hook> hooks;
public CompositeHook(List<CrunchControlledJob.Hook> hooks) {
this.hooks = hooks;
}
@Override
public void run(MRJob job) throws IOException {
for (CrunchControlledJob.Hook hook : hooks) {
hook.run(job);
}
}
}
/** Creates missing input directories before job is submitted. */
public static final class PrepareHook implements CrunchControlledJob.Hook {
@Override
public void run(MRJob job) throws IOException {
Configuration conf = job.getJob().getConfiguration();
if (conf.getBoolean(RuntimeParameters.CREATE_DIR, false)) {
Path[] inputPaths = FileInputFormat.getInputPaths(job.getJob());
for (Path inputPath : inputPaths) {
FileSystem fs = inputPath.getFileSystem(conf);
if (!fs.exists(inputPath)) {
try {
fs.mkdirs(inputPath);
} catch (IOException e) {
}
}
}
}
}
}
/** Moving output files produced by the MapReduce job to specified directories. */
public static final class CompletionHook implements CrunchControlledJob.Hook {
private final Path workingPath;
private final Map<Integer, PathTarget> multiPaths;
public CompletionHook(Path workingPath, Map<Integer, PathTarget> multiPaths) {
this.workingPath = workingPath;
this.multiPaths = multiPaths;
}
@Override
public void run(MRJob job) throws IOException {
handleMultiPaths(job.getJob());
}
private synchronized void handleMultiPaths(Job job) throws IOException {
try {
if (job.isSuccessful()) {
if (!multiPaths.isEmpty()) {
for (Map.Entry<Integer, PathTarget> entry : multiPaths.entrySet()) {
entry.getValue().handleOutputs(job.getConfiguration(), workingPath, entry.getKey());
}
}
}
} catch(Exception ie) {
throw new IOException(ie);
}
}
}
}
| 2,688 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/exec/MRExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.exec;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.AbstractFuture;
import org.apache.crunch.PipelineCallable;
import org.apache.crunch.PipelineResult;
import org.apache.crunch.SourceTarget;
import org.apache.crunch.Target;
import org.apache.crunch.hadoop.mapreduce.lib.jobcontrol.CrunchControlledJob;
import org.apache.crunch.hadoop.mapreduce.lib.jobcontrol.CrunchJobControl;
import org.apache.crunch.impl.dist.collect.PCollectionImpl;
import org.apache.crunch.impl.mr.MRJob;
import org.apache.crunch.impl.mr.MRPipelineExecution;
import org.apache.crunch.impl.mr.run.RuntimeParameters;
import org.apache.crunch.materialize.MaterializableIterable;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
/**
* Provides APIs for job control at runtime to clients.
*
* This class has a thread that submits jobs when they become ready, monitors
* the states of the running jobs, and updates the states of jobs based on the
* state changes of their depending jobs states.
*
* It is thread-safe.
*/
public class MRExecutor extends AbstractFuture<PipelineResult> implements MRPipelineExecution {
private static final Logger LOG = LoggerFactory.getLogger(MRExecutor.class);
private final CrunchJobControl control;
private final Map<PCollectionImpl<?>, Set<Target>> outputTargets;
private final Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize;
private final Set<Target> appendedTargets;
private final CountDownLatch doneSignal = new CountDownLatch(1);
private final CountDownLatch killSignal = new CountDownLatch(1);
private final CappedExponentialCounter pollInterval;
private AtomicReference<Status> status = new AtomicReference<Status>(Status.READY);
private PipelineResult result;
private Thread monitorThread;
private boolean started;
private Map<String, String> namedDotFiles;
public MRExecutor(
Configuration conf,
Class<?> jarClass,
Map<PCollectionImpl<?>, Set<Target>> outputTargets,
Map<PCollectionImpl<?>, MaterializableIterable> toMaterialize,
Set<Target> appendedTargets,
Map<PipelineCallable<?>, Set<Target>> pipelineCallables) {
this.control = new CrunchJobControl(conf, jarClass.toString(), pipelineCallables);
this.outputTargets = outputTargets;
this.toMaterialize = toMaterialize;
this.appendedTargets = appendedTargets;
this.monitorThread = new Thread(new Runnable() {
@Override
public void run() {
monitorLoop();
}
});
this.pollInterval = getPollInterval(conf);
this.namedDotFiles = new ConcurrentHashMap<String, String>();
}
public void addJob(CrunchControlledJob job) {
this.control.addJob(job);
}
public void addNamedDotFile(String fileName, String planDotFile) {
this.namedDotFiles.put(fileName, planDotFile);
}
@Override
public String getPlanDotFile() {
return this.namedDotFiles.get("jobplan");
}
@Override
public Map<String, String> getNamedDotFiles() {
return ImmutableMap.copyOf(this.namedDotFiles);
}
public synchronized MRPipelineExecution execute() {
if (!started) {
monitorThread.start();
started = true;
}
return this;
}
/** Monitors running status. It is called in {@code MonitorThread}. */
private void monitorLoop() {
status.set(Status.RUNNING);
try {
while (killSignal.getCount() > 0 && !control.allFinished() && !control.anyFailures()) {
control.pollJobStatusAndStartNewOnes();
killSignal.await(pollInterval.get(), TimeUnit.MILLISECONDS);
}
control.killAllRunningJobs();
List<CrunchControlledJob> failures = control.getFailedJobList();
if (!failures.isEmpty()) {
System.err.println(failures.size() + " job failure(s) occurred:");
for (CrunchControlledJob job : failures) {
System.err.println(job.getJobName() + "(" + job.getJobID() + "): " + job.getMessage());
}
}
List<PipelineCallable<?>> failedCallables = control.getFailedCallables();
if (!failedCallables.isEmpty()) {
System.err.println(failedCallables.size() + " callable failure(s) occurred:");
for (PipelineCallable<?> c : failedCallables) {
System.err.println(c.getName() + ": " + c.getMessage());
}
}
boolean hasFailures = !failures.isEmpty() || !failedCallables.isEmpty();
List<PipelineResult.StageResult> stages = Lists.newArrayList();
for (CrunchControlledJob job : control.getSuccessfulJobList()) {
stages.add(new PipelineResult.StageResult(job.getJobName(), job.getMapredJobID().toString(), job.getCounters(),
job.getStartTimeMsec(), job.getJobStartTimeMsec(), job.getJobEndTimeMsec(), job.getEndTimeMsec()));
}
if (!hasFailures) {
for (PCollectionImpl<?> c : outputTargets.keySet()) {
if (toMaterialize.containsKey(c)) {
MaterializableIterable iter = toMaterialize.get(c);
if (iter.isSourceTarget()) {
iter.materialize();
c.materializeAt((SourceTarget) iter.getSource());
}
} else {
boolean materialized = false;
for (Target t : outputTargets.get(c)) {
if (!materialized && !appendedTargets.contains(t)) {
if (t instanceof SourceTarget) {
c.materializeAt((SourceTarget) t);
materialized = true;
} else {
SourceTarget st = t.asSourceTarget(c.getPType());
if (st != null) {
c.materializeAt(st);
materialized = true;
}
}
}
}
}
}
}
synchronized (this) {
if (killSignal.getCount() == 0) {
status.set(Status.KILLED);
} else if (!failures.isEmpty() || !failedCallables.isEmpty()) {
status.set(Status.FAILED);
} else {
status.set(Status.SUCCEEDED);
}
result = new PipelineResult(stages, status.get());
set(result);
}
} catch (InterruptedException e) {
throw new AssertionError(e); // Nobody should interrupt us.
} catch (Exception e) {
LOG.error("Pipeline failed due to exception", e);
status.set(Status.FAILED);
setException(e);
} finally {
doneSignal.countDown();
}
}
@Override
public void waitFor(long timeout, TimeUnit timeUnit) throws InterruptedException {
doneSignal.await(timeout, timeUnit);
}
@Override
public void waitUntilDone() throws InterruptedException {
doneSignal.await();
}
@Override
public PipelineResult get() throws InterruptedException, ExecutionException {
if (getStatus() == Status.READY) {
execute();
}
return super.get();
}
@Override
public PipelineResult get(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException,
ExecutionException {
if (getStatus() == Status.READY) {
execute();
}
return super.get(timeout, unit);
}
@Override
public synchronized Status getStatus() {
return status.get();
}
@Override
public synchronized PipelineResult getResult() {
return result;
}
@Override
public void kill() throws InterruptedException {
killSignal.countDown();
}
@Override
protected void interruptTask() {
try {
kill();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
private CappedExponentialCounter getPollInterval(Configuration conf) {
long maxPollInterval = conf.getLong(RuntimeParameters.MAX_POLL_INTERVAL, -1);
if (maxPollInterval <= 0) {
if (isLocalMode()) {
maxPollInterval = 1_000;
} else {
maxPollInterval = 10_000;
}
}
long minPollInterval = Math.max(maxPollInterval / 20, 1);
return new CappedExponentialCounter(minPollInterval, maxPollInterval);
}
private static boolean isLocalMode() {
Configuration conf = new Configuration();
String frameworkName = conf.get("mapreduce.framework.name", "");
if (frameworkName.isEmpty()) {
// Fallback to older jobtracker-based checks
frameworkName = conf.get("mapreduce.jobtracker.address",
conf.get("mapred.job.tracker", "local"));
}
return "local".equals(frameworkName);
}
@Override
public List<MRJob> getJobs() {
return Lists.transform(control.getAllJobs(), new Function<CrunchControlledJob, MRJob>() {
@Override
public MRJob apply(CrunchControlledJob job) {
return job;
}
});
}
}
| 2,689 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/emit/OutputEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.emit;
import java.io.IOException;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Emitter;
import org.apache.crunch.types.Converter;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
public class OutputEmitter<T, K, V> implements Emitter<T> {
private final Converter<K, V, Object, Object> converter;
private final TaskInputOutputContext<?, ?, K, V> context;
public OutputEmitter(Converter<K, V, Object, Object> converter, TaskInputOutputContext<?, ?, K, V> context) {
this.converter = converter;
this.context = context;
}
public void emit(T emitted) {
try {
K key = converter.outputKey(emitted);
V value = converter.outputValue(emitted);
this.context.write(key, value);
} catch (IOException e) {
throw new CrunchRuntimeException(e);
} catch (InterruptedException e) {
throw new CrunchRuntimeException(e);
}
}
public void flush() {
// No-op
}
}
| 2,690 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/emit/IntermediateEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.emit;
import java.util.List;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.impl.mr.run.RTNode;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.ImmutableList;
/**
* An {@link Emitter} implementation that links the output of one {@link DoFn} to the input of
* another {@code DoFn}.
*
*/
public class IntermediateEmitter implements Emitter<Object> {
private final List<RTNode> children;
private final PType<Object> outputPType;
private final boolean needDetachedValues;
public IntermediateEmitter(PType<Object> outputPType, List<RTNode> children, Configuration conf,
boolean disableDeepCopy) {
this.outputPType = outputPType;
this.children = ImmutableList.copyOf(children);
outputPType.initialize(conf);
needDetachedValues = !disableDeepCopy && this.children.size() > 1;
}
@Override
public void emit(Object emitted) {
for (RTNode child : children) {
Object value = emitted;
if (needDetachedValues) {
value = this.outputPType.getDetachedValue(emitted);
}
child.process(value);
}
}
@Override
public void flush() {
// No-op
}
}
| 2,691 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/emit/MultipleOutputEmitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.emit;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.Emitter;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.crunch.types.Converter;
public class MultipleOutputEmitter<T, K, V> implements Emitter<T> {
private final Converter converter;
private final CrunchOutputs<K, V> outputs;
private final String outputName;
public MultipleOutputEmitter(Converter converter, CrunchOutputs<K, V> outputs,
String outputName) {
this.converter = converter;
this.outputs = outputs;
this.outputName = outputName;
}
@Override
public void emit(T emitted) {
try {
this.outputs.write(outputName,
(K) converter.outputKey(emitted),
(V) converter.outputValue(emitted));
} catch (Exception e) {
throw new CrunchRuntimeException(e);
}
}
@Override
public void flush() {
// No-op
}
}
| 2,692 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
public class CrunchCombiner extends CrunchReducer {
@Override
protected NodeContext getNodeContext() {
return NodeContext.COMBINE;
}
}
| 2,693 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchTaskContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.io.IOException;
import java.util.List;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.impl.mr.plan.PlanningParameters;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.crunch.util.DistCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
class CrunchTaskContext {
private final TaskInputOutputContext<Object, Object, Object, Object> taskContext;
private final NodeContext nodeContext;
private final List<RTNode> nodes;
private CrunchOutputs<Object, Object> multipleOutputs;
public CrunchTaskContext(TaskInputOutputContext<Object, Object, Object, Object> taskContext,
NodeContext nodeContext) {
this.taskContext = taskContext;
this.nodeContext = nodeContext;
Configuration conf = taskContext.getConfiguration();
Path path = new Path(nodeContext.toString());
try {
this.nodes = (List<RTNode>) DistCache.read(conf, path);
} catch (IOException e) {
throw new CrunchRuntimeException("Could not read runtime node information", e);
}
}
public TaskInputOutputContext<Object, Object, Object, Object> getContext() {
return taskContext;
}
public NodeContext getNodeContext() {
return nodeContext;
}
public List<RTNode> getNodes() {
return nodes;
}
public boolean isDebugRun() {
Configuration conf = taskContext.getConfiguration();
return conf.getBoolean(RuntimeParameters.DEBUG, false);
}
public void cleanup() {
if (multipleOutputs != null) {
try {
multipleOutputs.close();
} catch (IOException e) {
throw new CrunchRuntimeException(e);
} catch (InterruptedException e) {
throw new CrunchRuntimeException(e);
}
}
}
public CrunchOutputs<Object, Object> getMultipleOutputs() {
if (multipleOutputs == null) {
multipleOutputs = new CrunchOutputs<Object, Object>(taskContext);
}
return multipleOutputs;
}
}
| 2,694 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchCombineFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
public class CrunchCombineFileInputFormat<K, V> extends CombineFileInputFormat<K, V> {
public CrunchCombineFileInputFormat(JobContext jobContext) {
if (getMaxSplitSize(jobContext) == Long.MAX_VALUE) {
Configuration conf = jobContext.getConfiguration();
if (conf.get(RuntimeParameters.COMBINE_FILE_BLOCK_SIZE) != null) {
setMaxSplitSize(conf.getLong(RuntimeParameters.COMBINE_FILE_BLOCK_SIZE, 0));
} else {
setMaxSplitSize(jobContext.getConfiguration().getLongBytes("dfs.blocksize", 134217728L));
}
}
}
@Override
public RecordReader createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException {
throw new UnsupportedOperationException("CrunchCombineFileInputFormat.createRecordReader should never be called");
}
}
| 2,695 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import org.apache.crunch.impl.SingleUseIterable;
import org.apache.hadoop.mapreduce.Reducer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CrunchReducer extends Reducer<Object, Object, Object, Object> {
private static final Logger LOG = LoggerFactory.getLogger(CrunchReducer.class);
private RTNode node;
private CrunchTaskContext ctxt;
private boolean debug;
protected NodeContext getNodeContext() {
return NodeContext.REDUCE;
}
@Override
protected void setup(Reducer<Object, Object, Object, Object>.Context context) {
if (ctxt == null) {
this.ctxt = new CrunchTaskContext(context, getNodeContext());
this.debug = ctxt.isDebugRun();
}
this.node = ctxt.getNodes().get(0);
this.node.initialize(ctxt);
}
@Override
protected void reduce(Object key, Iterable<Object> values, Reducer<Object, Object, Object, Object>.Context context) {
values = new SingleUseIterable<Object>(values);
if (debug) {
try {
node.processIterable(key, values);
} catch (Exception e) {
LOG.error("Reducer exception", e);
}
} else {
node.processIterable(key, values);
}
}
@Override
protected void cleanup(Reducer<Object, Object, Object, Object>.Context context) {
node.cleanup();
ctxt.cleanup();
}
}
| 2,696 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import org.apache.crunch.io.CrunchOutputs;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
public class CrunchOutputFormat<K, V> extends OutputFormat<K, V> {
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
return new RecordWriter<K, V>() {
@Override
public void write(K k, V v) throws IOException, InterruptedException {
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
}
};
}
@Override
public void checkOutputSpecs(JobContext jobContext) throws IOException, InterruptedException {
CrunchOutputs.checkOutputSpecs(jobContext);
}
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
return CrunchOutputs.getOutputCommitter(taskAttemptContext);
}
}
| 2,697 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/RTNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.io.Serializable;
import java.util.List;
import org.apache.crunch.CrunchRuntimeException;
import org.apache.crunch.DoFn;
import org.apache.crunch.Emitter;
import org.apache.crunch.impl.mr.emit.IntermediateEmitter;
import org.apache.crunch.impl.mr.emit.MultipleOutputEmitter;
import org.apache.crunch.impl.mr.emit.OutputEmitter;
import org.apache.crunch.types.Converter;
import org.apache.crunch.types.PType;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RTNode implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(RTNode.class);
private final String nodeName;
private DoFn<Object, Object> fn;
private PType<Object> outputPType;
private final List<RTNode> children;
private final Converter inputConverter;
private final Converter outputConverter;
private final String outputName;
private transient Emitter<Object> emitter;
public RTNode(DoFn<Object, Object> fn,
PType<Object> outputPType,
String name,
List<RTNode> children,
Converter inputConverter,
Converter outputConverter,
String outputName) {
this.fn = fn;
this.outputPType = outputPType;
this.nodeName = name;
this.children = children;
this.inputConverter = inputConverter;
this.outputConverter = outputConverter;
this.outputName = outputName;
}
public void initialize(CrunchTaskContext ctxt) {
if (emitter != null) {
// Already initialized
return;
}
fn.setContext(ctxt.getContext());
fn.initialize();
for (RTNode child : children) {
child.initialize(ctxt);
}
if (outputConverter != null) {
if (outputName != null) {
this.emitter = new MultipleOutputEmitter(outputConverter, ctxt.getMultipleOutputs(),
outputName);
} else {
this.emitter = new OutputEmitter(outputConverter, ctxt.getContext());
}
} else if (!children.isEmpty()) {
Configuration conf = ctxt.getContext().getConfiguration();
boolean disableDeepCopy = conf.getBoolean(RuntimeParameters.DISABLE_DEEP_COPY, false);
this.emitter = new IntermediateEmitter(outputPType, children, conf, disableDeepCopy || fn.disableDeepCopy());
} else {
throw new CrunchRuntimeException("Invalid RTNode config: no emitter for: " + nodeName);
}
}
public boolean isLeafNode() {
return outputConverter != null && children.isEmpty();
}
public void process(Object input) {
try {
fn.process(input, emitter);
} catch (CrunchRuntimeException e) {
if (!e.wasLogged()) {
LOG.info("Crunch exception in '{}' for input: {}", new Object[]{nodeName, input, e});
e.markLogged();
}
throw e;
}
}
public void process(Object key, Object value) {
process(inputConverter.convertInput(key, value));
}
public void processIterable(Object key, Iterable values) {
process(inputConverter.convertIterableInput(key, values));
}
public void cleanup() {
fn.cleanup(emitter);
emitter.flush();
for (RTNode child : children) {
child.cleanup();
}
}
@Override
public String toString() {
return "RTNode [nodeName=" + nodeName + ", fn=" + fn + ", children=" + children + ", inputConverter="
+ inputConverter + ", outputConverter=" + outputConverter + ", outputName=" + outputName + "]";
}
// Attributes needed to plot the dotfile diagrams
public String getNodeName() {
return this.nodeName;
}
public String getOutputName() {
return this.outputName;
}
public PType getPType() {
return outputPType;
}
public List<RTNode> getChildren() {
return children;
}
public DoFn<Object, Object> getDoFn() {
return fn;
}
public Converter getInputConverter() {
return inputConverter;
}
public Converter getOutputConverter() {
return outputConverter;
}
public Emitter<Object> getEmitter() {
return emitter;
}
}
| 2,698 |
0 |
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr
|
Create_ds/crunch/crunch-core/src/main/java/org/apache/crunch/impl/mr/run/CrunchRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.crunch.impl.mr.run;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
class CrunchRecordReader<K, V> extends RecordReader<K, V> {
private RecordReader<K, V> curReader;
private CrunchInputSplit crunchSplit;
private CombineFileSplit combineFileSplit;
private TaskAttemptContext context;
private int idx;
private long progress;
private Configuration rootConf;
public CrunchRecordReader(InputSplit inputSplit, final TaskAttemptContext context) throws IOException,
InterruptedException {
this.crunchSplit = (CrunchInputSplit) inputSplit;
if (crunchSplit.get() instanceof CombineFileSplit) {
combineFileSplit = (CombineFileSplit) crunchSplit.get();
}
rootConf = context.getConfiguration();
crunchSplit.setConf(context.getConfiguration());
this.context = new TaskAttemptContextImpl(crunchSplit.getConf(), context.getTaskAttemptID());
initNextRecordReader();
}
private boolean initNextRecordReader() throws IOException, InterruptedException {
if (combineFileSplit != null) {
if (curReader != null) {
curReader.close();
curReader = null;
if (idx > 0) {
progress += combineFileSplit.getLength(idx - 1);
}
}
// if all chunks have been processed, nothing more to do.
if (idx == combineFileSplit.getNumPaths()) {
return false;
}
} else if (idx > 0) {
return false;
}
idx++;
InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils.newInstance(
crunchSplit.getInputFormatClass(),
crunchSplit.getConf());
InputSplit inputSplit = getDelegateSplit();
if (inputSplit instanceof FileSplit)
{
rootConf.set("crunch.split.file", ((FileSplit) inputSplit).getPath().toString());
}
this.curReader = inputFormat.createRecordReader(inputSplit, context);
return true;
}
private InputSplit getDelegateSplit() throws IOException {
if (combineFileSplit != null) {
return new FileSplit(combineFileSplit.getPath(idx - 1),
combineFileSplit.getOffset(idx - 1),
combineFileSplit.getLength(idx - 1),
combineFileSplit.getLocations());
} else {
return crunchSplit.get();
}
}
@Override
public void close() throws IOException {
if (curReader != null) {
curReader.close();
curReader = null;
}
}
@Override
public K getCurrentKey() throws IOException, InterruptedException {
return curReader.getCurrentKey();
}
@Override
public V getCurrentValue() throws IOException, InterruptedException {
return curReader.getCurrentValue();
}
@Override
public float getProgress() throws IOException, InterruptedException {
float curProgress = 0; // bytes processed in current split
if (null != curReader) {
curProgress = (float)(curReader.getProgress() * getCurLength());
}
return Math.min(1.0f, (progress + curProgress)/getOverallLength());
}
private long getCurLength() {
if (combineFileSplit == null) {
return 1L;
} else {
return combineFileSplit.getLength(idx - 1);
}
}
private float getOverallLength() {
if (combineFileSplit == null) {
return 1.0f;
} else {
return (float) combineFileSplit.getLength();
}
}
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException {
this.crunchSplit = (CrunchInputSplit) inputSplit;
crunchSplit.setConf(context.getConfiguration());
this.context = new TaskAttemptContextImpl(crunchSplit.getConf(), context.getTaskAttemptID());
if (crunchSplit.get() instanceof CombineFileSplit) {
combineFileSplit = (CombineFileSplit) crunchSplit.get();
}
if (curReader != null) {
curReader.initialize(getDelegateSplit(), this.context);
}
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
while ((curReader == null) || !curReader.nextKeyValue()) {
if (!initNextRecordReader()) {
return false;
}
if (curReader != null) {
curReader.initialize(getDelegateSplit(), context);
}
}
return true;
}
}
| 2,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.